Compare commits
1200 Commits
user/tmacr
...
developmen
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | 19855115ae | |
Vitaliy Filippov | 329d8ef32c | |
Vitaliy Filippov | f0ded4ea4f | |
Vitaliy Filippov | 3eea263384 | |
Vitaliy Filippov | c26d4f7d70 | |
Vitaliy Filippov | 63137e7a7b | |
Vitaliy Filippov | fdb23b1cd2 | |
Vitaliy Filippov | 4120eac127 | |
Maha Benzekri | d9bbd6cf3e | |
Maha Benzekri | 65e89d286d | |
Maha Benzekri | dcbc5ca98f | |
Maha Benzekri | 817bb836ec | |
Maha Benzekri | e3e4b2aea7 | |
Francois Ferrand | 9cd72221e8 | |
Francois Ferrand | bdcd4685ad | |
Francois Ferrand | b2b6c47ba7 | |
Jonathan Gramain | da173d53b4 | |
Jonathan Gramain | 7eb2701f21 | |
Jonathan Gramain | 6ec3c8e10d | |
Jonathan Gramain | 7aaf277db2 | |
Francois Ferrand | 67421f8c76 | |
Anurag Mittal | f13ec2cf4c | |
williamlardier | 30eaaf15eb | |
williamlardier | 9d16fb0a34 | |
williamlardier | cdc612f379 | |
williamlardier | 61dd65b2c4 | |
bert-e | 2c0696322e | |
Maha Benzekri | c464a70b90 | |
Maha Benzekri | af07bb3df4 | |
Maha Benzekri | 1858654f34 | |
Maha Benzekri | 0475c8520a | |
Maha Benzekri | 31a4de5372 | |
Maha Benzekri | 0c53d13439 | |
Maha Benzekri | cad8b14df1 | |
Nicolas Humbert | fe29bacc79 | |
Maha Benzekri | ca8f570f15 | |
Maha Benzekri | a4bca10faf | |
Jonathan Gramain | c2ab4a2052 | |
Jonathan Gramain | fd0aa314eb | |
Mickael Bourgois | 5b8fcf0313 | |
Mickael Bourgois | bdfde26fe4 | |
Mickael Bourgois | e53613783a | |
Nicolas Humbert | a1dc2bd84d | |
bert-e | 77ed018b4f | |
bert-e | 05c628728d | |
bert-e | 0dd7fe9875 | |
Mickael Bourgois | e6d0eff1a8 | |
bert-e | 7e372b7bd5 | |
Nicolas Humbert | 06402c6c94 | |
bert-e | a1eed4fefb | |
bert-e | d8f7f18f5a | |
bert-e | e151b3fff1 | |
williamlardier | b6bc11881a | |
williamlardier | 648257612b | |
Jonathan Gramain | 1a0a981271 | |
bert-e | a45b2eb6a4 | |
bert-e | 15fd621c5c | |
bert-e | 285fe2f63b | |
bert-e | 00555597e0 | |
bert-e | bddc2ccd01 | |
Jonathan Gramain | 0d7cf8d40a | |
williamlardier | 851c72bd0f | |
bert-e | 722b6ae699 | |
bert-e | 3c2283b062 | |
Jonathan Gramain | 6a116734a9 | |
Jonathan Gramain | 9325ea4996 | |
Jonathan Gramain | 33ba89f0cf | |
Mickael Bourgois | be1557d972 | |
Mickael Bourgois | a03463061c | |
Frédéric Meinnel | 5a5ef7c572 | |
Frédéric Meinnel | f8ce90f9c3 | |
Jonathan Gramain | 6f58f9dd68 | |
bert-e | 042f541a45 | |
Mickael Bourgois | 02f126f040 | |
bert-e | 1477a70e47 | |
Frédéric Meinnel | 59d47a3e21 | |
Frédéric Meinnel | 6b61347c29 | |
bert-e | c2f6b45116 | |
bert-e | 993b9e6093 | |
bert-e | 7440794d93 | |
bert-e | 087369b37d | |
Will Toozs | da80e12dab | |
Jonathan Gramain | 2a82095d03 | |
Jonathan Gramain | 44b3d25459 | |
Jonathan Gramain | f1d6e30fb6 | |
bert-e | 37234efd14 | |
Jonathan Gramain | f4e83086d6 | |
Maha Benzekri | 74ff1691a0 | |
bert-e | 2a4ea38301 | |
Francois Ferrand | d800179f86 | |
Francois Ferrand | c1c45a4af9 | |
Francois Ferrand | da536ed037 | |
Nicolas Humbert | 06901104e8 | |
Benoit A. | 863f45d256 | |
KillianG | 4b642cf8b4 | |
KillianG | 2537f8aa9a | |
Maha Benzekri | 7866a1d06f | |
Maha Benzekri | 1509f1bdfe | |
Maha Benzekri | 13d349d211 | |
Maha Benzekri | 34a32c967d | |
bert-e | d79ed1b9c8 | |
williamlardier | 17b5bbc233 | |
williamlardier | 4aa8b5cc6e | |
williamlardier | 5deed6c2e1 | |
Nicolas Humbert | af34571771 | |
Nicolas Humbert | 5fd675a316 | |
Maha Benzekri | dcf0f902ff | |
bert-e | 5b66f8d089 | |
Florent Monjalet | e51b06cfea | |
Florent Monjalet | f2bc701f8c | |
Nicolas Humbert | 4d6b03ba47 | |
Nicolas Humbert | f03f049683 | |
Nicolas Humbert | d7b51de024 | |
Nicolas Humbert | cf51adf1c7 | |
Nicolas Humbert | 2b2667e29a | |
bert-e | 862317703e | |
bert-e | 547ce816e0 | |
bert-e | 15d5e93a2d | |
bert-e | d11bcb56e9 | |
bert-e | 0ed35c3d86 | |
Nicolas Humbert | c0218821ff | |
Nicolas Humbert | 7c4f461196 | |
Nicolas Humbert | 0a4d6f862f | |
bert-e | 8716fee67d | |
williamlardier | 05c93446ab | |
Rahul Padigela | bdb59a0e63 | |
Nicolas Humbert | 96cbaeb821 | |
bert-e | 15b68fa9fa | |
Nicolas Humbert | 51703a65f5 | |
bert-e | 09aaa2d5ee | |
Nicolas Humbert | ad39d90b6f | |
Jonathan Gramain | 20e9fe4adb | |
bert-e | 536d474f57 | |
KillianG | 25be9014c9 | |
KillianG | ed42f24580 | |
KillianG | ce076cb3df | |
KillianG | 4bc3de52ff | |
bert-e | beb5f69be3 | |
Alexander Chan | 4093bf2b04 | |
Alexander Chan | d0bb6d5b0c | |
bert-e | 3f7229eebe | |
bert-e | 7eb9d52da5 | |
Nicolas Humbert | e216c9dd20 | |
williamlardier | 0c1afe535b | |
williamlardier | 73335ae6ec | |
Alexander Chan | 99c514e8f2 | |
Alexander Chan | cfd9fdcfc4 | |
Alexander Chan | d809dac5e3 | |
williamlardier | 53dac8d233 | |
williamlardier | 6d5ef07eee | |
williamlardier | 272166e406 | |
williamlardier | 3af05e672b | |
williamlardier | 8b0c90cb2f | |
Alexander Chan | dfc9b761e2 | |
Alexander Chan | 04f1eb7f04 | |
bert-e | c204b90847 | |
bert-e | 78d6e7fd72 | |
Alexander Chan | 7768fa8d35 | |
KillianG | 4d9a9adc48 | |
KillianG | c4804e52ee | |
KillianG | 671cf3a679 | |
Jonathan Gramain | 9a5e27f97b | |
Jonathan Gramain | a9d003c6f8 | |
Jonathan Gramain | d3bdddeba3 | |
bert-e | 3252f7de03 | |
Nicolas Humbert | 7994bf7b96 | |
Nicolas Humbert | 4be0a06c4a | |
bert-e | da7dbdc51f | |
Will Toozs | 2103ef1237 | |
Will Toozs | dbc1c54246 | |
bert-e | 6c22f8404d | |
KillianG | 00e03f0592 | |
KillianG | d453758b7d | |
KillianG | a964dc99c3 | |
williamlardier | 5074e6c0a4 | |
williamlardier | bd05dd6918 | |
williamlardier | fbda12ce3c | |
Nicolas Humbert | b02934bb39 | |
Nicolas Humbert | c9a444969b | |
Nicolas Humbert | 5d018860ec | |
bert-e | 5838e02096 | |
Nicolas Humbert | ecd600ac4b | |
Naren | ab0324da05 | |
Naren | 2b353b33af | |
bert-e | fd57f47be1 | |
Jonathan Gramain | 58e47e5015 | |
Jonathan Gramain | 4d782ecec6 | |
bert-e | e0df67a115 | |
Naren | 7e18ae77e0 | |
Naren | 4750118f85 | |
Naren | c273c8b823 | |
Jonathan Gramain | d3b50fafa8 | |
Alexander Chan | bf4072151f | |
Jonathan Gramain | 22fa04b7e7 | |
bert-e | 4d71a834d5 | |
Alexander Chan | fa26a487f5 | |
Jonathan Gramain | 66740f5aba | |
williamlardier | 9c46703b89 | |
williamlardier | 47672d60ce | |
Jonathan Gramain | 6d41d103e8 | |
Jonathan Gramain | 890ac08dcd | |
Jonathan Gramain | 4949b7cc35 | |
Jonathan Gramain | 2b6fee4e84 | |
williamlardier | c460338163 | |
williamlardier | f17d52b602 | |
williamlardier | a6b234b7a8 | |
williamlardier | ff353bb4d6 | |
williamlardier | 0f9c9c2f18 | |
williamlardier | f6b2cf2c1a | |
Kerkesni | ecafbae36a | |
Kerkesni | d1cd7e8dba | |
Francois Ferrand | 3da6719200 | |
Francois Ferrand | c0dd54ef51 | |
Francois Ferrand | 7910792390 | |
Francois Ferrand | a4f4c51290 | |
Francois Ferrand | 66c4bc52b5 | |
Francois Ferrand | 81cd6652d6 | |
Francois Ferrand | 2a07f67244 | |
Francois Ferrand | 1a634015ee | |
williamlardier | 7a88a54918 | |
williamlardier | b25e620750 | |
williamlardier | 38ef89cc83 | |
williamlardier | 1a6c828bfc | |
williamlardier | 3d769c6960 | |
williamlardier | 8a27920a85 | |
williamlardier | 7642a22176 | |
bert-e | 8f63687ef3 | |
Kerkesni | 26f45fa81a | |
Kerkesni | 76b59057f7 | |
Kerkesni | ae0da3d605 | |
bert-e | 162d9ec46b | |
Kerkesni | ccd6462015 | |
Kerkesni | 665c77570c | |
Kerkesni | 27307b397c | |
Kerkesni | 414eada32b | |
Kerkesni | fdf0c6fe99 | |
Kerkesni | 8cc0be7da2 | |
bert-e | 65231633a7 | |
Alexander Chan | 92c567414a | |
Alexander Chan | ec55e39175 | |
Jonathan Gramain | c343820cae | |
williamlardier | 8307a1513e | |
williamlardier | 706c2425fe | |
williamlardier | 8618d77de9 | |
Artem Bakalov | 8abe746222 | |
bert-e | e74cca6795 | |
bert-e | 1427abecb7 | |
bert-e | 4771ce3067 | |
williamlardier | 4e8a907d99 | |
Killian Gardahaut | 6f42b3e64c | |
Jonathan Gramain | 237872a5a3 | |
bert-e | 390fd97edf | |
Nicolas Humbert | 1c9e4eb93d | |
bert-e | a4f163f466 | |
Nicolas Humbert | 4d0cc9bc12 | |
bert-e | 657f969d05 | |
bert-e | b43cf22b2c | |
Killian Gardahaut | 46c44ccaa6 | |
bert-e | 90c63168c1 | |
Jonathan Gramain | fe5f868f43 | |
Killian Gardahaut | c0ee81eb7a | |
bert-e | 604a0170f1 | |
Killian Gardahaut | 9d8f4793c9 | |
Killian Gardahaut | 69d33a3341 | |
Jonathan Gramain | 981c9c1a23 | |
KillianG | 806f988334 | |
KillianG | 976a05c3e5 | |
Killian Gardahaut | c5004cb521 | |
KillianG | bc9cfb0b6d | |
KillianG | 4b6e342ff8 | |
Kerkesni | 480f5a4427 | |
bert-e | 852ae9bd0f | |
Taylor McKinnon | 3d77540c47 | |
Taylor McKinnon | 4f0506cf31 | |
Nicolas Humbert | d92a91f076 | |
Nicolas Humbert | 28779db602 | |
Alexander Chan | 8db16c5532 | |
Jordi Bertran de Balanda | 33439ec215 | |
bert-e | 9873c0f112 | |
Nicolas Humbert | 725a492c2c | |
Nicolas Humbert | e446e3e132 | |
bert-e | 25c6b34a1e | |
Jordi Bertran de Balanda | 721d7ede93 | |
Nicolas Humbert | fbbba32d69 | |
Jordi Bertran de Balanda | 56c1ba5c21 | |
Will Toozs | 73431094a3 | |
bert-e | 5919d20fa4 | |
Nicolas Humbert | 56665069c1 | |
Nicolas Humbert | 61fe54bd73 | |
Francois Ferrand | e227d9d5ca | |
Francois Ferrand | cdcc44d272 | |
Xin LI | 5acef6895f | |
williamlardier | f7d360fe0b | |
williamlardier | 0a61b43252 | |
williamlardier | c014e630be | |
williamlardier | a747d5feda | |
KillianG | 765857071a | |
KillianG | 91b39da7e5 | |
williamlardier | 2cc6ebe9b4 | |
williamlardier | 7887d22d0d | |
williamlardier | 2f142aea7f | |
williamlardier | 26a046c9b2 | |
bert-e | ab23d59daf | |
bert-e | 6950df200a | |
williamlardier | 3265d162a7 | |
bert-e | 67200d80ad | |
bert-e | aa2992cd9f | |
williamlardier | 0e2071ed3b | |
williamlardier | ad579b2bd2 | |
Guillaume Hivert | 139da904a7 | |
Guillaume Hivert | e8851b40c0 | |
Naren | cd9456b510 | |
Alexander Chan | 15f07538d8 | |
Guillaume Hivert | e95d07af12 | |
Guillaume Hivert | b21f7f3440 | |
Guillaume Hivert | ca2d23710f | |
Guillaume Hivert | 310fd30266 | |
Guillaume Hivert | 8743e9c3ac | |
bert-e | b2af7c0aea | |
bert-e | 58c24376aa | |
Guillaume Hivert | 62c13c1eed | |
Guillaume Hivert | ee81fa5829 | |
bert-e | d7df1df2b6 | |
bert-e | f59b1b5e07 | |
Guillaume Hivert | a3418603d0 | |
Guillaume Hivert | 947ccd90d9 | |
Guillaume Hivert | f460ffdb21 | |
Guillaume Hivert | dfa49c79c5 | |
Guillaume Hivert | e582882883 | |
Guillaume Hivert | dd61c1abbe | |
Guillaume Hivert | a15f8a56e3 | |
Guillaume Hivert | 43e82f7f33 | |
bert-e | d7625ced17 | |
Guillaume Hivert | a2c1989a5d | |
bert-e | 24755c8472 | |
bert-e | fb39a4095e | |
bert-e | 32dfba2f89 | |
Guillaume Hivert | a2ca197bd8 | |
Xin LI | 3ed46f2d16 | |
williamlardier | 5c936c94ee | |
Xin LI | f87101eef6 | |
Xin LI | 14f86282b6 | |
Xin LI | f9dba52d38 | |
Yutaka Oishi | 6714aed351 | |
williamlardier | 99f96dd377 | |
williamlardier | ae08d89d7d | |
williamlardier | c48e2948f0 | |
williamlardier | fc942febca | |
williamlardier | a4fe998c34 | |
williamlardier | 1460e94488 | |
williamlardier | dcc7117d88 | |
williamlardier | 99cee367aa | |
williamlardier | ad5a4c152d | |
bert-e | b608c043f5 | |
bert-e | 079c09e1ec | |
bert-e | 75f07440ef | |
bert-e | 3a6bac1158 | |
bert-e | f2d119326a | |
Guillaume Hivert | 2a019f3788 | |
bert-e | 5e22900c0f | |
Guillaume Hivert | e62ed598e8 | |
bert-e | a217ad58e8 | |
bert-e | 10cf10daa4 | |
Guillaume Hivert | 6ec2f99a91 | |
bert-e | dfd8f20bf2 | |
Guillaume Hivert | fc17ab4299 | |
Guillaume Hivert | 44f398b01f | |
Guillaume Hivert | dc32d78b0f | |
Guillaume Hivert | 073d752ad8 | |
bert-e | 3454e934f5 | |
Jordi Bertran de Balanda | 399fdaaed0 | |
Jordi Bertran de Balanda | 5084c8f971 | |
williamlardier | 3388de6fb6 | |
Alexander Chan | 86e9d4a356 | |
williamlardier | a0010efbdd | |
Nicolas Humbert | 8eb7efd58a | |
williamlardier | 25ae7e443b | |
williamlardier | 4afa1ed78d | |
williamlardier | 706dfddf5f | |
williamlardier | 4cce306a12 | |
williamlardier | f3bf6f2615 | |
williamlardier | bbe51b2e5e | |
williamlardier | 3cd06256d6 | |
Yutaka Oishi | 6e42216549 | |
williamlardier | e37712e94f | |
williamlardier | ac30d29509 | |
Xin LI | 1f235d569d | |
williamlardier | 320713a764 | |
Artem Bakalov | fbf686feab | |
Guillaume Hivert | 4b795a245c | |
Guillaume Hivert | 983d59d565 | |
Guillaume Hivert | fd7f0a1a91 | |
bert-e | 459fd99316 | |
Guillaume Hivert | 235b2ac6d4 | |
bert-e | 8025ce08fe | |
bert-e | bffb00266f | |
bert-e | a6cd3a67e0 | |
dependabot[bot] | 18605a9546 | |
dependabot[bot] | 74d7fe5e68 | |
dependabot[bot] | e707cf4398 | |
bert-e | 47c34a4f5c | |
bert-e | 59f7e32037 | |
Jordi Bertran de Balanda | fb286c6403 | |
williamlardier | 7f93695300 | |
bert-e | cecb5fc1b1 | |
bert-e | 75ba3733aa | |
dependabot[bot] | 7c6f5d34b8 | |
bert-e | 7e3190a600 | |
Jordi Bertran de Balanda | e9c4a5ce99 | |
Guillaume Hivert | f378a85799 | |
bert-e | 23ea19bcb3 | |
KillianG | d2c1400cb6 | |
bert-e | 6da31dfd18 | |
Yutaka Oishi | ee1e65d778 | |
williamlardier | 3534927ccf | |
Jordi Bertran de Balanda | 0e3edb847e | |
bert-e | a9f9fe99a5 | |
Jordi Bertran de Balanda | a587f78242 | |
Guillaume Hivert | 40e5100cd8 | |
Guillaume Hivert | 0851aa1406 | |
bert-e | 3ce4effafb | |
bert-e | b1897708e5 | |
bert-e | 019907e2ab | |
bert-e | 73729c7bdb | |
Kerkesni | 3f5e553d8a | |
bert-e | efea69ff70 | |
Guillaume Hivert | 8a2b62815b | |
bert-e | 0dbbb80bea | |
Guillaume Hivert | 2eecda3079 | |
bert-e | 011606e146 | |
Guillaume Hivert | 8271b3ba21 | |
Guillaume Hivert | a1b980b95b | |
bert-e | 4c47264a78 | |
bert-e | f69087814e | |
Ronnie Smith | cd432fa920 | |
Ronnie Smith | af0ab673d7 | |
Ronnie Smith | 334edbc17b | |
bert-e | 271b28e59b | |
bert-e | 7f641d2755 | |
bert-e | df91750c5a | |
bert-e | 1f2caf6a01 | |
Ronnie Smith | 1333195dcd | |
bert-e | f822c7bad9 | |
bert-e | b3ce76d7d8 | |
Artem Bakalov | 18887d10b3 | |
Ronnie Smith | 223897bbff | |
bert-e | e4d888c07b | |
bert-e | dece118ba9 | |
Will Toozs | a077cc199f | |
bert-e | b0cb6d9c0f | |
Alexander Chan | e0da963226 | |
bert-e | 209f3bae44 | |
Guillaume Hivert | e311f0d83d | |
Guillaume Hivert | dab763884a | |
Guillaume Hivert | 4f22e526ee | |
Guillaume Hivert | 3951bb289c | |
Guillaume Hivert | b97de6505c | |
Guillaume Hivert | a5ad298c3b | |
bert-e | 6919af95f2 | |
Guillaume Hivert | b94c13a115 | |
Guillaume Hivert | 666da6b1aa | |
Guillaume Hivert | 7192d4bc93 | |
bert-e | 1523f6baa6 | |
bert-e | c517e4531a | |
Kerkesni | 7bcb81985a | |
bert-e | 68ac02ad54 | |
Guillaume Hivert | 0d479c82c5 | |
Guillaume Hivert | f958ed3204 | |
bert-e | 7d80db5d7f | |
bert-e | 34ef6d0434 | |
bert-e | 0ce6a79961 | |
Kerkesni | 7477b881ed | |
Guillaume Hivert | 3874d16f42 | |
Guillaume Hivert | fac5605a18 | |
bert-e | 72057b1efc | |
bert-e | 529840fa37 | |
Guillaume Hivert | 0a5f7c4ea9 | |
bert-e | 0e4ac99d9d | |
KillianG | 218d21b819 | |
bert-e | 9333323301 | |
bert-e | e5929b9f91 | |
bert-e | 8998544c06 | |
KillianG | df33583aea | |
KillianG | 050d649db5 | |
bert-e | de81f65306 | |
bert-e | 5eaf67ac93 | |
bert-e | 193a399ae2 | |
bert-e | 4de18e5b26 | |
Jordi Bertran de Balanda | c7e2743bf9 | |
Jordi Bertran de Balanda | a8029d8779 | |
bert-e | d639f4cffe | |
Guillaume Hivert | b2ec34c8f2 | |
KillianG | fb31f93829 | |
bert-e | 6c6ee31f34 | |
Kerkesni | 64351cf20d | |
Ronnie Smith | b58b4d0773 | |
Ronnie Smith | 9a0915d40e | |
Ronnie Smith | 36d3a67a68 | |
Xin LI | 3d156a58dd | |
Xin LI | 7737ec4904 | |
Kerkesni | d18f4d10bd | |
Kerkesni | e0bc4383cd | |
bert-e | de17f221bf | |
Kerkesni | d46301b498 | |
Kerkesni | 0bb2a44912 | |
Guillaume Hivert | 2c1fb773fd | |
Xin.LI | 3528c24276 | |
Xin LI | 6d8294d0c0 | |
Xin LI | 23bfc17a26 | |
bert-e | 0f6a1f2982 | |
Nicolas Humbert | bff13f1190 | |
bert-e | c857e743c8 | |
Kerkesni | 5f8edd35e9 | |
Kerkesni | 3c4359b696 | |
Kerkesni | 8ecf1d9808 | |
Kerkesni | 74e4934654 | |
Kerkesni | eac87fc9de | |
Kerkesni | e2be4d895d | |
bert-e | c0f7ebbaa9 | |
Kerkesni | 60fcedc251 | |
Kerkesni | 10ef395501 | |
Kerkesni | d1c8e67901 | |
Kerkesni | 266aabef37 | |
Kerkesni | b63c909808 | |
Kerkesni | 02ee339214 | |
Kerkesni | 5ca7f86350 | |
Kerkesni | 50a4fd8dc1 | |
bert-e | 5de0c2a7da | |
Kerkesni | b942516dca | |
Kerkesni | 54181af522 | |
Kerkesni | 21af204956 | |
Kerkesni | 68a27be345 | |
Kerkesni | 06350ffe15 | |
Taylor McKinnon | 5da4cd88ff | |
bert-e | 6bb68ee0e3 | |
Taylor McKinnon | 9a4bae40e6 | |
bert-e | 54e9635cab | |
Vianney Rancurel | b8f803338b | |
Guillaume Hivert | 4a1215adb5 | |
Guillaume Hivert | fc8d7532c6 | |
Guillaume Hivert | 1818bfe6c8 | |
Guillaume Hivert | 5cd929ea8a | |
Guillaume Hivert | 1138ce43af | |
Guillaume Hivert | 8b4e9cc0aa | |
Guillaume Hivert | ff6ea2a6d5 | |
Guillaume Hivert | 3b3600db92 | |
bert-e | 51c5247d01 | |
Vianney Rancurel | 7813a312b5 | |
Thomas Carmet | 35a4552c0f | |
Vianney Rancurel | 0dbdff3a00 | |
bert-e | 80b91d724d | |
bert-e | 40843d4bed | |
bert-e | b3fd77d08f | |
Taylor McKinnon | ed6bc63e75 | |
Rached Ben Mustapha | c95f84e887 | |
Nicolas Humbert | 3c9ab1bb99 | |
Nicolas Humbert | 3c30adaf85 | |
bert-e | 98edeae3f2 | |
bert-e | 4f15e4f267 | |
Xin LI | 68c5b42e6f | |
Xin LI | 6933bb8422 | |
Xin LI | 7e180fcad8 | |
Naren | 41d482cf7d | |
Nicolas Humbert | 1e334924f9 | |
Naren | 49239cc76e | |
williamlardier | 8d17fcac0f | |
williamlardier | 1c3fcc5a65 | |
Ronnie Smith | f5b0f1e082 | |
williamlardier | 708aab707d | |
williamlardier | 3a1cbdeedb | |
bert-e | faf5701248 | |
Ronnie Smith | 4cbb5a5dd6 | |
bert-e | 22eca9b61c | |
Naren | 59a679831b | |
bert-e | 26da124e27 | |
bert-e | 47b121c17b | |
Ronnie Smith | c605c1e1a2 | |
bert-e | 994bd0a6be | |
Ronnie Smith | 1e2a6c387e | |
Ronnie Smith | 1348fc820f | |
Ronnie Smith | 79a363786f | |
bert-e | 86e3c02126 | |
bert-e | 8f6731aa6a | |
Artem Bakalov | ea2f8ebd01 | |
Artem Bakalov | b640bbb45e | |
Taylor McKinnon | d9fcf275ce | |
Ronnie Smith | 66b03695c3 | |
Rahul Padigela | 3575e651e3 | |
Rahul Padigela | fa19a34306 | |
Xin LI | 3ab7ef4e8d | |
Xin LI | e531d3eae1 | |
Nicolas Humbert | 9ebcc9690e | |
Nicolas Humbert | 95759509cb | |
williamlardier | 6cdae52d57 | |
williamlardier | 995cb59db4 | |
Alexander Chan | 385e34b472 | |
Jonathan Gramain | f102c5ec8c | |
bert-e | e912617f02 | |
williamlardier | 3abde0bc74 | |
bert-e | cf49c7d8bf | |
Alexander Chan | e6e49a70c9 | |
Rached Ben Mustapha | 77f971957b | |
Ronnie Smith | ed1d6c12c2 | |
williamlardier | 27f17f9535 | |
williamlardier | 4658651593 | |
Jonathan Gramain | 7af6a73b3b | |
bert-e | 8728ff5c80 | |
Ronnie Smith | 7c16652e57 | |
bert-e | 5a9d667936 | |
Rahul Padigela | 29dd069a5f | |
Rahul Padigela | f1793bfe51 | |
Rahul Padigela | b42f1d3943 | |
Naren | c27b359fba | |
Alexandre Lavigne | bb8bdbc6ea | |
Nicolas Humbert | 413f0c9433 | |
Nicolas Humbert | ab3fa2f13d | |
Naren | bfbda5d38b | |
Naren | 2e6b1791bb | |
Naren | 1f8cfecf43 | |
Alexandre Lavigne | 6a250feea9 | |
Thomas Carmet | 0a33d4b74e | |
Thomas Carmet | 9a544b9890 | |
Ronnie Smith | a2b6846e2e | |
Ronnie Smith | 3fdfc7196b | |
Ronnie Smith | f602fb9601 | |
Thomas Carmet | c237a25448 | |
Thomas Carmet | 5aaec6a4e6 | |
Thomas Carmet | 11278e7334 | |
bert-e | c0fe2efbc2 | |
Jonathan Gramain | b0633d8a13 | |
bert-e | b27caf5814 | |
bert-e | f5f6cb5692 | |
bert-e | 87ba4a7b4a | |
bert-e | 9ff605f875 | |
Thomas Carmet | 4e160db87d | |
bert-e | dc698f4d5c | |
bert-e | 8c7907f753 | |
bert-e | 395a881d92 | |
bert-e | 3d6306d2a3 | |
bert-e | 681740fbe7 | |
Alexander Chan | d381ec14d8 | |
bert-e | 0bdcd866bc | |
Jonathan Gramain | 856a1634d4 | |
Jonathan Gramain | 2921864aac | |
bert-e | 4665f3da5c | |
Jonathan Gramain | 0df0d952d2 | |
bert-e | 54eb3ede5f | |
bert-e | be4dea481d | |
Rached Ben Mustapha | d15e2d5df6 | |
Taylor McKinnon | 93503cf505 | |
bert-e | 0f63de2f05 | |
bert-e | 16a5e6a550 | |
Rached Ben Mustapha | 864d2e8a28 | |
vrancurel | 15703aafca | |
bert-e | db000bc5e1 | |
vrancurel | 06c35c15a5 | |
bert-e | 68c8189f53 | |
bert-e | 041731e6eb | |
Nicolas Humbert | d51361ce06 | |
Nicolas Humbert | 453fd8b722 | |
bert-e | 2621aa7e53 | |
bert-e | b4aeab77b9 | |
bert-e | e1a3b05330 | |
bert-e | 0151504158 | |
bert-e | 048e8b02bc | |
bert-e | 1d899efec8 | |
Taylor McKinnon | 4cb8f715e9 | |
bert-e | 580dda4d48 | |
bert-e | a17054e3a4 | |
bert-e | a8df2b7b96 | |
Taylor McKinnon | d572fc953b | |
Alexander Chan | 2a78d4f413 | |
Alexander Chan | d2c7165214 | |
bert-e | 1999a586fd | |
bert-e | a1c0dd2472 | |
bert-e | a22032f9a5 | |
bert-e | dd38e32797 | |
bert-e | 274bf80720 | |
Ronnie Smith | 25bd1f6111 | |
Jonathan Gramain | 2d41b034aa | |
Rached Ben Mustapha | bb8ec629bf | |
Rached Ben Mustapha | 4bbaa83b87 | |
bert-e | 58697f7915 | |
Ronnie Smith | bf4a6fe01b | |
alexandre merle | c703ba66e7 | |
alexandre merle | 20c77f9f85 | |
alexandre merle | edb27cc9a8 | |
alexandre merle | 79e0dfa38f | |
alexandre merle | e1118803e6 | |
bert-e | 1230e72c49 | |
bert-e | 372df634c4 | |
bert-e | 2b96888eb7 | |
bert-e | a0909885f1 | |
alexandre merle | 5d100645aa | |
bert-e | 356edf8478 | |
bert-e | 1cfb869631 | |
bert-e | 0403ca65fc | |
Rahul Padigela | 269e005198 | |
bert-e | 10627f51d1 | |
bert-e | aa5f714081 | |
Jonathan Gramain | d27c0577ee | |
Jonathan Gramain | ff539645ea | |
Jonathan Gramain | e5c3bb188a | |
Jonathan Gramain | 2461b5c2f7 | |
Jonathan Gramain | 747307cac2 | |
Jonathan Gramain | 5942d9d70c | |
bert-e | 8ed84786fc | |
bert-e | 1e40e76bb2 | |
bert-e | f4058dd6ef | |
bert-e | 04f7692bad | |
bert-e | 32752ac504 | |
vrancurel | 549f187893 | |
bert-e | 93cd582e3a | |
vrancurel | 2582108f97 | |
bert-e | b25867f9c2 | |
bert-e | 7b60166d08 | |
bert-e | 8887a67261 | |
Ronnie Smith | 437ecc57f9 | |
bert-e | 759f0ef949 | |
bert-e | 0014aa3467 | |
Dora Korpar | 1727f4bd3f | |
Dora Korpar | d71c8eac86 | |
bert-e | 7eb6304956 | |
bert-e | ce98e9d104 | |
bert-e | 36d932bbce | |
bert-e | 7f2c40cf6d | |
bert-e | 6a78af0f39 | |
bert-e | f73dc3dd68 | |
Jonathan Gramain | 8ec0611d08 | |
Jonathan Gramain | 6baca6f1e2 | |
bert-e | 78d62636c3 | |
Dora Korpar | 9b8f813d02 | |
Dora Korpar | 0f70366774 | |
bert-e | fb8cf65091 | |
Jonathan Gramain | 7792f7c603 | |
bert-e | 668d90b7d0 | |
bert-e | c1cfc59a0e | |
bert-e | f956b02387 | |
Jonathan Gramain | 86bca2502e | |
bert-e | 3aa49eed1d | |
Jonathan Gramain | a9c3b2218f | |
Jonathan Gramain | f459498e18 | |
bert-e | 55323aa7a2 | |
bert-e | a20e875908 | |
bert-e | a3a83f5ec8 | |
bert-e | 51d3312de8 | |
Ilke | 6383d14d49 | |
Jonathan Gramain | 0e4035d45b | |
Jonathan Gramain | a18285ced8 | |
Rahul Padigela | dc4e1829fc | |
bert-e | 3b438e03cd | |
bert-e | f2787ec013 | |
bert-e | 560ccef3ec | |
Dora Korpar | 3f4ed31153 | |
Jonathan Gramain | fc23f68d0f | |
bert-e | 2a4da20c0a | |
bert-e | 14c4696482 | |
bert-e | 275226278f | |
bert-e | b4b5712df7 | |
bert-e | 750c021c37 | |
bert-e | ee4d94c0fb | |
bert-e | 98f1d219a9 | |
Dora Korpar | fb363030c0 | |
Dora Korpar | 7aeb32e223 | |
bert-e | 5bdee7eb8a | |
bert-e | b8fd646097 | |
bert-e | a9d6e05c6e | |
Ilke | dc412e8953 | |
bert-e | 36b68be051 | |
bert-e | 3f19a00b32 | |
bert-e | ea8166cf7a | |
bert-e | c06f735e82 | |
bert-e | b8c4ae4203 | |
Dora Korpar | 0cf9a9cdd5 | |
bert-e | d201e572fd | |
bert-e | 400dc24281 | |
bert-e | f59cea6b34 | |
bert-e | f19feb949d | |
Jonathan Gramain | bbef1964d7 | |
bert-e | 43cd5f59b0 | |
bert-e | dd7390ade6 | |
Dora Korpar | a3739cc836 | |
bert-e | 97682f56bf | |
bert-e | ce4ca533e2 | |
bert-e | 26bff09887 | |
Pepijn Van Eeckhoudt | f6165146ec | |
Ilke | 9f580444f3 | |
Ilke | 93fe6fa94d | |
Jonathan Gramain | d9ff2c2060 | |
bert-e | e553342616 | |
Ilke | 8a9dbc4de7 | |
Jonathan Gramain | 81d05b6ea8 | |
bert-e | 44b8de565f | |
vrancurel | 3ed66c50f6 | |
bert-e | 90e1cff9f9 | |
Jonathan Gramain | 9f323b32ea | |
bert-e | dee53c8ad8 | |
bert-e | 9680071e1a | |
bert-e | 6dd3aa92a4 | |
bert-e | a9618bc0bb | |
bert-e | b6042035c0 | |
bert-e | d2fafe8ef3 | |
bert-e | fb18cba367 | |
bert-e | bab9d5dc24 | |
Alexander Chan | e531e5e711 | |
bert-e | f54d356669 | |
Jonathan Gramain | c1bb2ac058 | |
Jonathan Gramain | d76eeeea89 | |
Alexander Chan | ad58f66981 | |
bert-e | 85b5599ce2 | |
Dora Korpar | 3121d29140 | |
Jonathan Gramain | a75db3122f | |
bert-e | d994e2ae60 | |
Rached Ben Mustapha | c443793968 | |
Rached Ben Mustapha | 517a034291 | |
Rached Ben Mustapha | cc6671f37c | |
Rached Ben Mustapha | 87bb3126a3 | |
bert-e | cedd08686a | |
bert-e | 635d2fe6d9 | |
Jianqin Wang | 9557e36438 | |
bert-e | 2bb0e171d8 | |
bert-e | 68f5d3c9f2 | |
vrancurel | 71caf08c19 | |
Guillaume Gimenez | 38403b84aa | |
Jianqin Wang | 21610dd88d | |
bbuchanan9 | 7566d1f0a9 | |
bbuchanan9 | 28415a5c9b | |
Taylor McKinnon | 506a9ad37d | |
bert-e | 1c6e56e8ef | |
bbuchanan9 | 9d02f86cf5 | |
bert-e | 5c4547a3a9 | |
bbuchanan9 | 5de85713ef | |
Rahul Padigela | 68defde532 | |
Dora Korpar | 9e5d4ae95b | |
Dora Korpar | 633ce2c069 | |
Dora Korpar | 08ddc07d1c | |
Katherine Laue | bc6c9c8c36 | |
bert-e | 3dc9b958f7 | |
vrancurel | 4b5c0ff923 | |
vrancurel | 62536f66df | |
bert-e | 9032b89e6f | |
vrancurel | 9014761c70 | |
bert-e | 8d9864264d | |
Rahul Padigela | 839182292c | |
Rahul Padigela | a197b2b6a4 | |
bert-e | adf6cfc8e4 | |
bert-e | 40aa7d836f | |
bert-e | 4fa15fce2a | |
bert-e | 279f08c870 | |
anurag4dsb | 05a8475f1c | |
anurag4dsb | 8c664d9076 | |
Jianqin Wang | 77172f33f8 | |
Guillaume Gimenez | 0a0fe7f1da | |
Salim | 6d7437a776 | |
bert-e | 1a6174dadf | |
vrancurel | c57cde88bb | |
Rahul Padigela | 6e97c01edd | |
Rahul Padigela | dd6fde61bb | |
Benoit A | 3e8c43e05b | |
Nicolas Humbert | 633efcbc50 | |
Alexander Chan | d99b430ac4 | |
philipyoo | 8f71d4ff03 | |
Rahul Padigela | d0f77cee75 | |
bert-e | 4419db7b23 | |
Rahul Padigela | 3672df0fc4 | |
Dora Korpar | 9b223bea87 | |
Guillaume Gimenez | b7dfc3a9c0 | |
Dora Korpar | 787f66458f | |
Dora Korpar | 618b179d5c | |
bert-e | e6ddad1193 | |
bert-e | 6575be0050 | |
Jianqin Wang | 1f7263c320 | |
Jianqin Wang | 9da1a8e1f7 | |
Jianqin Wang | 14f8690a9a | |
Jianqin Wang | 700cb4eb48 | |
philipyoo | 7dd4dca7e5 | |
bert-e | a5d248000e | |
Taylor McKinnon | dae12b245b | |
bert-e | c0129eb0d7 | |
philipyoo | bd0d6c1942 | |
Jonathan Gramain | ed2d393e98 | |
bert-e | 886110138a | |
Jonathan Gramain | 397eecb370 | |
bert-e | 3623b992da | |
Jonathan Gramain | 78b64bebed | |
Dora Korpar | e857bb5f5a | |
Benoit A | 9c1dab1055 | |
bert-e | e18850911e | |
Jonathan Gramain | 2ff9cf866d | |
bbuchanan9 | cc6ed165dd | |
Dora Korpar | a6b5c21e5d | |
bbuchanan9 | 64426b1450 | |
bert-e | 160fe96b18 | |
Taylor McKinnon | 59290513e3 | |
Rahul Padigela | 6b9be35d8e | |
bbuchanan9 | dffcbefe9b | |
bbuchanan9 | c470cfb5b1 | |
philipyoo | abcff1b04e | |
bbuchanan9 | 6791d1b561 | |
bert-e | a8e0a30918 | |
philipyoo | 487fe8bf35 | |
bert-e | b7c84ef7d3 | |
bert-e | b55295818f | |
philipyoo | 0213bcfd25 | |
bert-e | 32b0946679 | |
JianqinWang | bef886d8ad | |
philipyoo | d44c2f123e | |
bert-e | f199d52c54 | |
bert-e | b9c419dde7 | |
bert-e | 5cf3948ba2 | |
bert-e | 226088c8fb | |
Rahul Padigela | bca10414bc | |
bert-e | 8f0cab8d91 | |
Jonathan Gramain | 40c234bb5f | |
bert-e | 26e2b5e425 | |
bert-e | df5a61cb8d | |
bert-e | b01a390c46 | |
Guillaume Gimenez | 87103f83e1 | |
bert-e | 9ba5d64cd2 | |
bert-e | f4d4c9b76e | |
bert-e | 2c149ea9b1 | |
philipyoo | 735ad74bda | |
bert-e | 1636c87556 | |
bert-e | 8e2d6d42a8 | |
bert-e | f11d6e223d | |
philipyoo | ebe2d1f24d | |
bert-e | 6a1bc69336 | |
bert-e | 0144158a37 | |
bert-e | aea19c9cc2 | |
bert-e | daaeb5637a | |
Dora Korpar | c479933448 | |
JianqinWang | f804aa9657 | |
Jonathan Gramain | ad35b9ec78 | |
Jonathan Gramain | 9fe0ba5c8c | |
bert-e | 2fe1e4da3c | |
bert-e | 6a4784417f | |
bert-e | 0ed8c750c9 | |
bert-e | 0d33e5a69f | |
bert-e | ac470f4233 | |
bert-e | 23d406dc81 | |
JianqinWang | f11ccbfefa | |
bert-e | c8c0527f65 | |
JianqinWang | d81d309420 | |
Dora Korpar | c657b4b469 | |
Dora Korpar | 65c99ff86d | |
Jonathan Gramain | 645433ed0c | |
JianqinWang | f9bb82ce43 | |
bert-e | ab4500b842 | |
bert-e | 40a802b715 | |
Giacomo Guiulfo | 84bf7bd511 | |
Giacomo Guiulfo | b5fa54ec11 | |
Bennett Buchanan | 58e9f26ae0 | |
Giacomo Guiulfo | d6fdd153aa | |
Giacomo Guiulfo | 1e05f0f54e | |
Giacomo Guiulfo | 9c66b7ceba | |
bert-e | 0555d0b41a | |
Guillaume Gimenez | 39f2a53beb | |
Bennett Buchanan | 0a75792ca6 | |
bert-e | 5225fc231d | |
Guillaume Gimenez | 30c3ce1e2b | |
Taylor McKinnon | aa157c6d13 | |
Bennett Buchanan | 699890d2d7 | |
Jonathan Gramain | ea1a7d4d87 | |
bert-e | a9297e707a | |
Bennett Buchanan | 75dccc528d | |
bert-e | 5d7cf78eda | |
Giacomo Guiulfo | 0a364fe379 | |
Rahul Padigela | 345031f2bd | |
greenkeeper[bot] | 0bc1fe1a71 | |
greenkeeper[bot] | f23e457b83 | |
greenkeeper[bot] | 09aca2dcf4 | |
greenkeeper[bot] | d304334e92 | |
greenkeeper[bot] | 7955b97810 | |
Rahul Padigela | d14cef843b | |
Dora Korpar | f2b39fb3d7 | |
Dora Korpar | 9a009746be | |
Jeremy Desanlis | 3e08bad2da | |
philipyoo | 13b156b226 | |
JianqinWang | 07f655c2f8 | |
JianqinWang | f496cec8bf | |
bert-e | 7f5413699d | |
Jonathan Gramain | d620fef517 | |
Jonathan Gramain | 8ac3cf5548 | |
Giacomo Guiulfo | ebd9a74666 | |
bert-e | a1f9bef60e | |
philipyoo | 899107913c | |
Jonathan Gramain | 18dfc6b4fa | |
Rahul Padigela | 9fe16c64fa | |
vrancurel | 3dee6e2d0b | |
vrancurel | 3545eb4d62 | |
Dora Korpar | 0a85eeb8b7 | |
Dora Korpar | 83759870f2 | |
Alexander Chan | 0d4bf3c17f | |
Alexander Chan | 0117b39dcf | |
Bennett Buchanan | 549ca1f683 | |
bert-e | e4a66343fb | |
philipyoo | a89fdde6fd | |
philipyoo | 872a2d88e5 | |
philipyoo | 0c9c462634 | |
philipyoo | a3973ac7d3 | |
bert-e | d1a8693fe5 | |
Jeremy Desanlis | 5687a48599 | |
Nicolas Humbert | 9dca871e1b | |
philipyoo | 7088812c80 | |
philipyoo | 9f742d4921 | |
bert-e | 2c31728905 | |
Bennett Buchanan | 125ccbbfa9 | |
bert-e | 40c8b37b30 | |
bert-e | 879075e4ec | |
philipyoo | 79ed68ce9f | |
bert-e | cbfacb5ec0 | |
philipyoo | 06dfdd9612 | |
philipyoo | bf95506495 | |
Alexander Chan | db743f8269 | |
Alexander Chan | a2311bb69c | |
Alexander Chan | c8f323237f | |
Rahul Padigela | 5cf55fcb68 | |
Rahul Padigela | de94a0e62e | |
Rahul Padigela | 2b13994795 | |
Rahul Padigela | 769a461178 | |
Rahul Padigela | c11fc1d9d8 | |
bert-e | b8ad86a1f1 | |
Giacomo Guiulfo | 12c4df722b | |
bert-e | f566e32322 | |
philipyoo | 6413c92fbc | |
bert-e | 29182cce05 | |
Jonathan Gramain | 9fb5b8b10d | |
vrancurel | 5631a892c6 | |
Rahul Padigela | dfcdea46fc | |
Rahul Padigela | be02e59bfe | |
Rahul Padigela | fdbeed1c4e | |
bert-e | 91fbc3fd23 | |
philipyoo | 241338bcfa | |
Rached Ben Mustapha | 6db80e9411 | |
bert-e | d701352635 | |
Alexander Chan | b291ccc03f | |
Bennett Buchanan | 0426f44dee | |
Rahul Padigela | 1b9242788a | |
Bennett Buchanan | 1a2ea2f353 | |
Bennett Buchanan | c36280a6e8 | |
bert-e | c749725410 | |
Alexander Chan | 3d06ec6230 | |
Jonathan Gramain | 159ebb4283 | |
Alexander Chan | e17333b19e | |
philipyoo | b3b22292c4 | |
bert-e | 68d27ed5bf | |
bert-e | 1e79964253 | |
philipyoo | 5f76343c2e | |
Alexander Chan | d907c9942d | |
Alexander Chan | c63b0713c0 | |
Alexander Chan | 6a9a88800a | |
Dora Korpar | 5834f15397 | |
bert-e | b50f6c4678 | |
bert-e | edeab02107 | |
David Pineau | c64cccdf55 | |
vrancurel | af2b3a4bc3 | |
philipyoo | 1e9ad08830 | |
David Pineau | 9e66fda610 | |
Rahul Padigela | 888e154f0e | |
Nicolas Humbert | 8448f909e4 | |
bert-e | 2b16e84733 | |
philipyoo | a1a6f65364 | |
bert-e | 7cf0c97d8e | |
Taylor McKinnon | 10e7b976d5 | |
vrancurel | e80ea95ad8 | |
Jeremy Desanlis | 7075318dd2 | |
bert-e | 38f68fba1a | |
vrancurel | 16f9a6f5f6 | |
bert-e | c48e4b89bd | |
Bennett Buchanan | 2a8169e936 | |
Alexander Chan | 1af67fffc7 | |
Guillaume Gimenez | e9ac11b1fe | |
bert-e | 30dcd6ef86 | |
Alexander Chan | 2ce9db4e01 | |
philipyoo | 9e234e2b41 | |
philipyoo | 83a831f512 | |
Guillaume Gimenez | 32c2a6fe99 | |
Rahul Padigela | 063361377c | |
Rahul Padigela | ea7f28c82d | |
Rahul Padigela | a9e760b32e | |
Rahul Padigela | 3b16a307b8 | |
Rahul Padigela | f8dfa378a1 | |
Jonathan Gramain | e16eadb474 | |
Rahul Padigela | 5bf7fef53c | |
philipyoo | 659aee2fc2 | |
Rahul Padigela | bde52ab89b | |
Jonathan Gramain | 0ddb4da8a9 | |
Rached Ben Mustapha | 56e280236b | |
Rached Ben Mustapha | f904f04401 | |
Rahul Padigela | db45fee9e8 | |
JianqinWang | ecc431c715 | |
JianqinWang | 6f694ae7f4 | |
Rahul Padigela | e7862d3922 | |
Jonathan Gramain | de7ebf70d7 | |
Rahul Padigela | 1425f03c1e | |
Alexander Chan | ad527911a2 | |
Rahul Padigela | 6c528688ee | |
Nicolas Humbert | e53aa2efd2 | |
Rahul Padigela | 873bc9b647 | |
Nicolas Humbert | 160b960607 | |
Rahul Padigela | 843bd1fe13 | |
Alexander Chan | 93a2a79699 | |
Rahul Padigela | ef32d5e94d | |
Alexander Chan | 45d9c3d999 | |
Rahul Padigela | a2ce46d8d0 | |
anurag4DSB | 0c0bffa2c3 | |
ironman-machine | d966c0bda9 | |
Rahul Padigela | cb86a857cc | |
Alexander Chan | 55c9441bd7 | |
David Pineau | cae55a65c8 | |
philipyoo | 114cbf5571 | |
Alexander Chan | f2bab3b3d6 | |
philipyoo | 3276d235bb | |
philipyoo | ee2aed10f3 | |
Rahul Padigela | 19bee770ea | |
Rahul Padigela | e0c5d03436 | |
Rahul Padigela | c8a7148645 | |
Rahul Padigela | 8ca5dce4fe | |
Bennett Buchanan | 599fb5709b | |
Rahul Padigela | 1161d5f75d | |
Rahul Padigela | 26b6c5d1d9 | |
Bennett Buchanan | 8fd50cd20e | |
Rahul Padigela | 1f6b5bf2bd | |
Rached Ben Mustapha | a7813daea9 | |
Rahul Padigela | 5d4eb84425 | |
Alexander Chan | 9511fff479 | |
Rahul Padigela | d70f64a6d0 | |
Alexander Chan | ee66dc811c | |
Rahul Padigela | 2710471726 | |
Dora Korpar | 9aee9f6cf0 | |
Rahul Padigela | a168fab266 | |
Dora Korpar | 92da4c90e5 | |
Rahul Padigela | a95d5ea15d | |
Salim | aad05faa12 | |
Rahul Padigela | ab230ebfe7 | |
Salim | b3103e1307 | |
Salim | f3b0091210 | |
Rahul Padigela | f633b91072 | |
Alexander Chan | 87807462dc | |
Rahul Padigela | d7f114d504 | |
Rached Ben Mustapha | 5ef168e654 | |
Rahul Padigela | 82b4055c6c | |
Rached Ben Mustapha | 91ccccfe85 | |
Rached Ben Mustapha | 696999874b | |
Rached Ben Mustapha | d2bed3bf9a | |
Rahul Padigela | ad42baa5ff | |
Rached Ben Mustapha | 6ac92b2ad2 | |
Rahul Padigela | 13dbf48867 | |
Rached Ben Mustapha | e79ad68e96 | |
Rahul Padigela | a4a5fe0db0 | |
Bennett Buchanan | f838fcc31f | |
VR | eb9dd23b14 | |
JianqinWang | edbf7ab650 | |
Rahul Padigela | e068950903 | |
Rahul Padigela | 1ceb7b264c | |
vrancurel | 5a29aaa10c | |
Rahul Padigela | 7587f7ba25 | |
Rahul Padigela | 795b145594 | |
Jeremy Desanlis | 58f027a693 | |
Rahul Padigela | e09348d658 | |
Alexander Chan | bddb90c6a1 | |
Rahul Padigela | 94efaaccc2 | |
Rahul Padigela | 463a8ebe15 | |
philipyoo | f17ce17857 | |
Rahul Padigela | 3a5250e2e9 | |
ironman-machine | 48cb7b3b05 | |
Nicolas Humbert | 84c4c147a2 | |
Rahul Padigela | 958e818655 | |
philipyoo | 91dd219c47 | |
Alexander Chan | 5f3d478edb | |
Rahul Padigela | 04d56cfdff | |
Rahul Padigela | 73dd529c29 | |
philipyoo | a9aa40c168 | |
ironman-machine | 189194a4e7 | |
JianqinWang | a9a6b2433d | |
JianqinWang | fa19fc8859 | |
JianqinWang | a269619698 | |
Rahul Padigela | da1da43597 | |
Rahul Padigela | caac4e4e7e | |
Rahul Padigela | 67250133dc | |
JianqinWang | d3f3be03ae | |
ironman-machine | 1a9f1afd2c | |
JianqinWang | 9a5afdbc5c | |
JianqinWang | 83cf54512b | |
ironman-machine | 7e3ad64456 | |
Nicolas Humbert | eba0cb6116 | |
Lauren Spiegel | fd23e82ab9 | |
Lauren Spiegel | d7cf5e8ccf | |
flavien-scality | d0f4f95f0d | |
Alexandre Merle | 0e606b1061 | |
ironman-machine | 44ead88d83 | |
vrancurel | d8e1497940 | |
ThibaultRiviere | 4193394340 | |
Thibault Riviere | 0f1b0dad01 | |
ironman-machine | 393d6edc07 | |
vrancurel | 70638eaf7a | |
Lauren Spiegel | 9d0156dfdf | |
Lauren Spiegel | 8d8028b83f | |
Lauren Spiegel | b99fe2cd8d | |
Lauren Spiegel | cc26f288be |
|
@ -1 +1,6 @@
|
|||
{ "extends": "scality" }
|
||||
{
|
||||
"extends": "scality",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2020
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,24 +25,30 @@ jobs:
|
|||
- 6379:6379
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'yarn'
|
||||
- name: install dependencies
|
||||
run: yarn install --frozen-lockfile --prefer-offline
|
||||
run: yarn install --frozen-lockfile --prefer-offline --network-concurrency 1
|
||||
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
|
||||
- name: lint yaml
|
||||
run: yarn --silent lint_yml
|
||||
- name: lint javascript
|
||||
run: yarn --silent lint -- --max-warnings 0
|
||||
run: yarn --silent lint --max-warnings 0
|
||||
- name: lint markdown
|
||||
run: yarn --silent lint_md
|
||||
- name: run unit tests
|
||||
run: yarn test
|
||||
- name: add hostname
|
||||
run: |
|
||||
sudo sh -c "echo '127.0.0.1 testrequestbucket.localhost' >> /etc/hosts"
|
||||
- name: test and coverage
|
||||
run: yarn --silent coverage
|
||||
- name: run functional tests
|
||||
run: yarn ft_test
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: run executables tests
|
||||
run: yarn install && yarn test
|
||||
working-directory: 'lib/executables/pensieveCreds/'
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"$schema": "https://swc.rs/schema.json",
|
||||
"jsc": {
|
||||
"parser": {
|
||||
"syntax": "typescript"
|
||||
},
|
||||
"target": "es2017"
|
||||
},
|
||||
"module": {
|
||||
"type": "commonjs"
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
# Arsenal
|
||||
|
||||
[![codecov](https://codecov.io/gh/scality/Arsenal/branch/development/8.1/graph/badge.svg?token=X0esXhJSwb)](https://codecov.io/gh/scality/Arsenal)
|
||||
|
||||
Common utilities for the S3 project components
|
||||
|
||||
Within this repository, you will be able to find the shared libraries for the
|
||||
|
|
|
@ -85,6 +85,66 @@ Used to store the bucket lifecycle configuration info
|
|||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._uid = uid || uuid();
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to set a unique identifier on a bucket
|
||||
|
||||
## Model version 8
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._readLocationConstraint = readLocationConstraint || null;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store default read location of the bucket
|
||||
|
||||
## Model version 9
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._isNFS = isNFS || null;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to determine whether the bucket may be accessed through NFS
|
||||
|
||||
## Model version 10
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._ingestion = ingestionConfig || null;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store the ingestion status of a bucket
|
||||
|
||||
## Model version 11
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._azureInfo = azureInfo || null;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store Azure storage account specific information
|
||||
|
||||
## Model version 12
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._objectLockEnabled = objectLockEnabled || false;
|
||||
this._objectLockConfiguration = objectLockConfiguration || null;
|
||||
|
@ -95,7 +155,7 @@ this._objectLockConfiguration = objectLockConfiguration || null;
|
|||
Used to determine whether object lock capabilities are enabled on a bucket and
|
||||
to store the object lock configuration of the bucket
|
||||
|
||||
## Model version 8
|
||||
## Model version 13
|
||||
|
||||
### Properties Added
|
||||
|
||||
|
@ -107,7 +167,7 @@ this._notificationConfiguration = notificationConfiguration || null;
|
|||
|
||||
Used to store the bucket notification configuration info
|
||||
|
||||
## Model version 9
|
||||
## Model version 14
|
||||
|
||||
### Properties Added
|
||||
|
||||
|
@ -119,19 +179,7 @@ this._serverSideEncryption.configuredMasterKeyId = configuredMasterKeyId || unde
|
|||
|
||||
Used to store the users configured KMS key id
|
||||
|
||||
## Model version 10
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._uid = uid || uuid();
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to set a unique identifier on a bucket
|
||||
|
||||
## Model version 11
|
||||
## Model version 15
|
||||
|
||||
### Properties Added
|
||||
|
||||
|
@ -139,6 +187,74 @@ Used to set a unique identifier on a bucket
|
|||
this._tags = tags || null;
|
||||
```
|
||||
|
||||
The Tag Set of a bucket is an array of objects with Key and Value:
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
Key: 'something',
|
||||
Value: 'some_data'
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Model version 16
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._capabilities = capabilities || undefined;
|
||||
```
|
||||
|
||||
For capacity-enabled buckets, contains the following data:
|
||||
|
||||
```javascript
|
||||
{
|
||||
_capabilities: {
|
||||
VeeamSOSApi?: {
|
||||
SystemInfo?: {
|
||||
ProtocolVersion: String,
|
||||
ModelName: String,
|
||||
ProtocolCapabilities: {
|
||||
CapacityInfo: Boolean,
|
||||
UploadSessions: Boolean,
|
||||
IAMSTS: Boolean,
|
||||
},
|
||||
APIEndpoints: {
|
||||
IAMEndpoint: String,
|
||||
STSEndpoint: String,
|
||||
},
|
||||
SystemRecommendations?: {
|
||||
S3ConcurrentTaskLimit: Number,
|
||||
S3MultiObjectDelete: Number,
|
||||
StorageCurrentTasksLimit: Number,
|
||||
KbBlockSize: Number,
|
||||
}
|
||||
LastModified?: String,
|
||||
},
|
||||
CapacityInfo?: {
|
||||
Capacity: Number,
|
||||
Available: Number,
|
||||
Used: Number,
|
||||
LastModified?: String,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store bucket tagging
|
||||
|
||||
## Model version 17
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._quotaMax = quotaMax || 0;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store bucket quota
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"groups": {
|
||||
"default": {
|
||||
"packages": [
|
||||
"lib/executables/pensieveCreds/package.json",
|
||||
"package.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"branchPrefix": "improvement/greenkeeper.io/",
|
||||
"commitMessages": {
|
||||
"initialBadge": "docs(readme): add Greenkeeper badge",
|
||||
"initialDependencies": "chore(package): update dependencies",
|
||||
"initialBranches": "chore(bert-e): whitelist greenkeeper branches",
|
||||
"dependencyUpdate": "fix(package): update ${dependency} to version ${version}",
|
||||
"devDependencyUpdate": "chore(package): update ${dependency} to version ${version}",
|
||||
"dependencyPin": "fix: pin ${dependency} to ${oldVersionResolved}",
|
||||
"devDependencyPin": "chore: pin ${dependency} to ${oldVersionResolved}",
|
||||
"closes": "\n\nCloses #${number}"
|
||||
},
|
||||
"ignore": [
|
||||
"ajv",
|
||||
"eslint",
|
||||
"eslint-plugin-react",
|
||||
"eslint-config-airbnb",
|
||||
"eslint-config-scality"
|
||||
]
|
||||
}
|
32
index.ts
32
index.ts
|
@ -1,14 +1,19 @@
|
|||
import * as evaluators from './lib/policyEvaluator/evaluator';
|
||||
import evaluatePrincipal from './lib/policyEvaluator/principal';
|
||||
import RequestContext from './lib/policyEvaluator/RequestContext';
|
||||
import RequestContext, {
|
||||
actionNeedQuotaCheck,
|
||||
actionNeedQuotaCheckCopy,
|
||||
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
|
||||
import * as requestUtils from './lib/policyEvaluator/requestUtils';
|
||||
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
|
||||
import { validateUserPolicy } from './lib/policy/policyValidator'
|
||||
import * as locationConstraints from './lib/patches/locationConstraints';
|
||||
import * as userMetadata from './lib/s3middleware/userMetadata';
|
||||
import convertToXml from './lib/s3middleware/convertToXml';
|
||||
import escapeForXml from './lib/s3middleware/escapeForXml';
|
||||
import * as objectLegalHold from './lib/s3middleware/objectLegalHold';
|
||||
import * as tagging from './lib/s3middleware/tagging';
|
||||
import { checkDateModifiedHeaders } from './lib/s3middleware/validateConditionalHeaders';
|
||||
import { validateConditionalHeaders } from './lib/s3middleware/validateConditionalHeaders';
|
||||
import MD5Sum from './lib/s3middleware/MD5Sum';
|
||||
import NullStream from './lib/s3middleware/nullStream';
|
||||
|
@ -16,8 +21,10 @@ import * as objectUtils from './lib/s3middleware/objectUtils';
|
|||
import * as mpuUtils from './lib/s3middleware/azureHelpers/mpuUtils';
|
||||
import ResultsCollector from './lib/s3middleware/azureHelpers/ResultsCollector';
|
||||
import SubStreamInterface from './lib/s3middleware/azureHelpers/SubStreamInterface';
|
||||
import { prepareStream } from './lib/s3middleware/prepareStream';
|
||||
import * as processMpuParts from './lib/s3middleware/processMpuParts';
|
||||
import * as retention from './lib/s3middleware/objectRetention';
|
||||
import * as objectRestore from './lib/s3middleware/objectRestore';
|
||||
import * as lifecycleHelpers from './lib/s3middleware/lifecycleHelpers';
|
||||
export { default as errors } from './lib/errors';
|
||||
export { default as Clustering } from './lib/Clustering';
|
||||
|
@ -34,22 +41,15 @@ export * as stream from './lib/stream';
|
|||
export * as jsutil from './lib/jsutil';
|
||||
export { default as stringHash } from './lib/stringHash';
|
||||
export * as db from './lib/db';
|
||||
export * as errorUtils from './lib/errorUtils';
|
||||
export { default as shuffle } from './lib/shuffle';
|
||||
export * as models from './lib/models';
|
||||
|
||||
export const algorithms = {
|
||||
list: {
|
||||
Basic: require('./lib/algos/list/basic').List,
|
||||
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
|
||||
DelimiterVersions: require('./lib/algos/list/delimiterVersions').DelimiterVersions,
|
||||
DelimiterMaster: require('./lib/algos/list/delimiterMaster').DelimiterMaster,
|
||||
MPU: require('./lib/algos/list/MPU').MultipartUploads,
|
||||
DelimiterCurrent: require('./lib/algos/list/delimiterCurrent').DelimiterCurrent,
|
||||
DelimiterNonCurrent: require('./lib/algos/list/delimiterNonCurrent').DelimiterNonCurrent,
|
||||
DelimiterOrphanDeleteMarker: require('./lib/algos/list/delimiterOrphanDeleteMarker').DelimiterOrphanDeleteMarker,
|
||||
},
|
||||
list: require('./lib/algos/list/exportAlgos'),
|
||||
listTools: {
|
||||
DelimiterTools: require('./lib/algos/list/tools'),
|
||||
Skip: require('./lib/algos/list/skip'),
|
||||
},
|
||||
cache: {
|
||||
GapSet: require('./lib/algos/cache/GapSet'),
|
||||
|
@ -70,6 +70,9 @@ export const policies = {
|
|||
RequestContext,
|
||||
requestUtils,
|
||||
actionMaps,
|
||||
actionNeedQuotaCheck,
|
||||
actionWithDataDeletion,
|
||||
actionNeedQuotaCheckCopy,
|
||||
};
|
||||
|
||||
export const testing = {
|
||||
|
@ -82,6 +85,7 @@ export const s3middleware = {
|
|||
escapeForXml,
|
||||
objectLegalHold,
|
||||
tagging,
|
||||
checkDateModifiedHeaders,
|
||||
validateConditionalHeaders,
|
||||
MD5Sum,
|
||||
NullStream,
|
||||
|
@ -91,8 +95,10 @@ export const s3middleware = {
|
|||
ResultsCollector,
|
||||
SubStreamInterface,
|
||||
},
|
||||
prepareStream,
|
||||
processMpuParts,
|
||||
retention,
|
||||
objectRestore,
|
||||
lifecycleHelpers,
|
||||
};
|
||||
|
||||
|
@ -163,3 +169,7 @@ export const storage = {
|
|||
export const pensieve = {
|
||||
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
|
||||
};
|
||||
|
||||
export const patches = {
|
||||
locationConstraints,
|
||||
};
|
||||
|
|
|
@ -196,6 +196,9 @@ export class Delimiter extends Extension {
|
|||
}
|
||||
|
||||
getCommonPrefix(key: string): string | undefined {
|
||||
if (!this.delimiter) {
|
||||
return undefined;
|
||||
}
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex === -1) {
|
||||
|
|
|
@ -183,6 +183,13 @@ export class DelimiterMaster extends Delimiter {
|
|||
id: DelimiterFilterStateId.NotSkipping,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
// save base implementation of the `NotSkipping` state in
|
||||
// Delimiter before overriding it with ours, to be able to call it from there
|
||||
this.keyHandler_NotSkipping_Delimiter = this.keyHandlers[DelimiterFilterStateId.NotSkipping];
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
|
||||
}
|
||||
// in v1, we can directly use Delimiter's implementation,
|
||||
// which is already set to the proper state
|
||||
|
@ -416,6 +423,20 @@ export class DelimiterMaster extends Delimiter {
|
|||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
}
|
||||
|
||||
filter_onNewMasterKeyV1(key: string, value: string): FilterReturnValue {
|
||||
// if this master key is a delete marker, accept it without
|
||||
// adding the version to the contents
|
||||
if (Version.isDeleteMarker(value)) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// use base Delimiter's implementation
|
||||
return this.keyHandler_NotSkipping_Delimiter(key, value);
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingPrefixNorVersionsV1(key: string, value: string): FilterReturnValue {
|
||||
return this.filter_onNewMasterKeyV1(key, value);
|
||||
}
|
||||
|
||||
keyHandler_SkippingVersionsV0(key: string, value: string): FilterReturnValue {
|
||||
/* In the SkippingVersionsV0 state, skip all version keys
|
||||
* (<key><versionIdSeparator><version>) */
|
||||
|
|
|
@ -396,6 +396,11 @@ export class DelimiterVersions extends Extension {
|
|||
}
|
||||
|
||||
keyHandler_NotSkippingV1(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
// NOTE: this check on PHD is only useful for Artesca, S3C
|
||||
// does not use PHDs in V1 format
|
||||
if (Version.isPHD(value)) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return this.filter_onNewKey(key, versionId, value);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ function vaultSignatureCb(
|
|||
err: Error | null,
|
||||
authInfo: { message: { body: any } },
|
||||
log: Logger,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any) => void,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
|
||||
streamingV4Params?: any
|
||||
) {
|
||||
// vaultclient API guarantees that it returns:
|
||||
|
@ -38,7 +38,9 @@ function vaultSignatureCb(
|
|||
}
|
||||
// @ts-ignore
|
||||
log.addDefaultFields(auditLog);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params, {
|
||||
accountQuota: info.accountQuota || {},
|
||||
});
|
||||
}
|
||||
|
||||
export type AuthV4RequestParams = {
|
||||
|
@ -384,4 +386,19 @@ export default class Vault {
|
|||
return callback(null, respBody);
|
||||
});
|
||||
}
|
||||
|
||||
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
|
||||
// call the report function of the client
|
||||
if (!this.client.report) {
|
||||
return callback(null, {});
|
||||
}
|
||||
// @ts-ignore
|
||||
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
|
||||
if (err) {
|
||||
log.debug(`error from ${this.implName}`, { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,10 +9,12 @@ import * as constants from '../constants';
|
|||
import constructStringToSignV2 from './v2/constructStringToSign';
|
||||
import constructStringToSignV4 from './v4/constructStringToSign';
|
||||
import { convertUTCtoISO8601 } from './v4/timeUtils';
|
||||
import * as vaultUtilities from './in_memory/vaultUtilities';
|
||||
import * as backend from './in_memory/Backend';
|
||||
import validateAuthConfig from './in_memory/validateAuthConfig';
|
||||
import AuthLoader from './in_memory/AuthLoader';
|
||||
import * as vaultUtilities from './backends/in_memory/vaultUtilities';
|
||||
import * as inMemoryBackend from './backends/in_memory/Backend';
|
||||
import baseBackend from './backends/base';
|
||||
import chainBackend from './backends/ChainBackend';
|
||||
import validateAuthConfig from './backends/in_memory/validateAuthConfig';
|
||||
import AuthLoader from './backends/in_memory/AuthLoader';
|
||||
import Vault from './Vault';
|
||||
|
||||
let vault: Vault | null = null;
|
||||
|
@ -233,7 +235,7 @@ function generateV4Headers(
|
|||
headerName.startsWith('x-amz-')
|
||||
|| headerName.startsWith('x-scal-')
|
||||
|| headerName === 'content-md5'
|
||||
|| headerName === 'host'
|
||||
|| headerName === 'host',
|
||||
).sort().join(';');
|
||||
const params = { request, signedHeaders, payloadChecksum,
|
||||
credentialScope, timestamp, query: data,
|
||||
|
@ -254,7 +256,8 @@ function generateV4Headers(
|
|||
|
||||
export const server = { extractParams, doAuth }
|
||||
export const client = { generateV4Headers, constructStringToSignV2 }
|
||||
export const inMemory = { backend, validateAuthConfig, AuthLoader }
|
||||
export const inMemory = { backend: inMemoryBackend, validateAuthConfig, AuthLoader }
|
||||
export const backends = { baseBackend, chainBackend }
|
||||
export {
|
||||
setAuthHandler as setHandler,
|
||||
AuthInfo,
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
import assert from 'assert';
|
||||
import async from 'async';
|
||||
import errors from '../../errors';
|
||||
import BaseBackend from './base';
|
||||
|
||||
/**
|
||||
* Class that provides an authentication backend that will verify signatures
|
||||
* and retrieve emails and canonical ids associated with an account using a
|
||||
* given list of authentication backends and vault clients.
|
||||
*
|
||||
* @class ChainBackend
|
||||
*/
|
||||
export default class ChainBackend extends BaseBackend {
|
||||
_clients: any[];
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {string} service - service id
|
||||
* @param {object[]} clients - list of authentication backends or vault clients
|
||||
*/
|
||||
constructor(service: string, clients: any[]) {
|
||||
super(service);
|
||||
|
||||
assert(Array.isArray(clients) && clients.length > 0, 'invalid client list');
|
||||
assert(clients.every(client =>
|
||||
typeof client.verifySignatureV4 === 'function' &&
|
||||
typeof client.verifySignatureV2 === 'function' &&
|
||||
typeof client.getCanonicalIds === 'function' &&
|
||||
typeof client.getEmailAddresses === 'function' &&
|
||||
typeof client.checkPolicies === 'function' &&
|
||||
typeof client.healthcheck === 'function',
|
||||
), 'invalid client: missing required auth backend methods');
|
||||
this._clients = clients;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* try task against each client for one to be successful
|
||||
*/
|
||||
_tryEachClient(task: any, cb: any) {
|
||||
// @ts-ignore
|
||||
async.tryEach(this._clients.map(client => done => task(client, done)), cb);
|
||||
}
|
||||
|
||||
/*
|
||||
* apply task to all clients
|
||||
*/
|
||||
_forEachClient(task: any, cb: any) {
|
||||
async.map(this._clients, task, cb);
|
||||
}
|
||||
|
||||
verifySignatureV2(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
options: any,
|
||||
callback: any,
|
||||
) {
|
||||
this._tryEachClient((client, done) => client.verifySignatureV2(
|
||||
stringToSign,
|
||||
signatureFromRequest,
|
||||
accessKey,
|
||||
options,
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
verifySignatureV4(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
region: string,
|
||||
scopeDate: string,
|
||||
options: any,
|
||||
callback: any,
|
||||
) {
|
||||
this._tryEachClient((client, done) => client.verifySignatureV4(
|
||||
stringToSign,
|
||||
signatureFromRequest,
|
||||
accessKey,
|
||||
region,
|
||||
scopeDate,
|
||||
options,
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
static _mergeObjects(objectResponses: any) {
|
||||
return objectResponses.reduce(
|
||||
(retObj, resObj) => Object.assign(retObj, resObj.message.body),
|
||||
{});
|
||||
}
|
||||
|
||||
getCanonicalIds(emailAddresses: string[], options: any, callback: any) {
|
||||
this._forEachClient(
|
||||
(client, done) => client.getCanonicalIds(emailAddresses, options, done),
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
// TODO: atm naive merge, better handling of conflicting email results
|
||||
return callback(null, {
|
||||
message: {
|
||||
body: ChainBackend._mergeObjects(res),
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
|
||||
this._forEachClient(
|
||||
(client, done) => client.getEmailAddresses(canonicalIDs, options, done),
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, {
|
||||
message: {
|
||||
body: ChainBackend._mergeObjects(res),
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
* merge policy responses into a single message
|
||||
*/
|
||||
static _mergePolicies(policyResponses: any) {
|
||||
const policyMap: any = {};
|
||||
|
||||
policyResponses.forEach(resp => {
|
||||
if (!resp.message || !Array.isArray(resp.message.body)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const check = (policy) => {
|
||||
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
|
||||
if (!policyMap[key] || !policyMap[key].isAllowed) {
|
||||
policyMap[key] = policy;
|
||||
}
|
||||
// else is duplicate policy
|
||||
};
|
||||
|
||||
resp.message.body.forEach(policy => {
|
||||
if (Array.isArray(policy)) {
|
||||
policy.forEach(authResult => check(authResult));
|
||||
} else {
|
||||
check(policy);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return Object.keys(policyMap).map(key => {
|
||||
const policyRes: any = { isAllowed: policyMap[key].isAllowed };
|
||||
if (policyMap[key].arn !== '') {
|
||||
policyRes.arn = policyMap[key].arn;
|
||||
}
|
||||
if (policyMap[key].versionId) {
|
||||
policyRes.versionId = policyMap[key].versionId;
|
||||
}
|
||||
if (policyMap[key].isImplicit !== undefined) {
|
||||
policyRes.isImplicit = policyMap[key].isImplicit;
|
||||
}
|
||||
if (policyMap[key].action) {
|
||||
policyRes.action = policyMap[key].action;
|
||||
}
|
||||
return policyRes;
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
response format:
|
||||
{ message: {
|
||||
body: [{}],
|
||||
code: number,
|
||||
message: string,
|
||||
} }
|
||||
*/
|
||||
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
|
||||
this._forEachClient((client, done) => client.checkPolicies(
|
||||
requestContextParams,
|
||||
userArn,
|
||||
options,
|
||||
done,
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, {
|
||||
message: {
|
||||
body: ChainBackend._mergePolicies(res),
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
healthcheck(reqUid: string, callback: any) {
|
||||
this._forEachClient((client, done) =>
|
||||
client.healthcheck(reqUid, (err, res) => done(null, {
|
||||
error: !!err ? err : null,
|
||||
status: res,
|
||||
}),
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
const isError = res.some(results => !!results.error);
|
||||
if (isError) {
|
||||
return callback(errors.InternalError, res);
|
||||
}
|
||||
return callback(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
report(reqUid: string, callback: any) {
|
||||
this._forEachClient((client, done) =>
|
||||
client.report(reqUid, done),
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const mergedRes = res.reduce((acc, val) => {
|
||||
Object.keys(val).forEach(k => {
|
||||
acc[k] = val[k];
|
||||
});
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return callback(null, mergedRes);
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
import errors from '../../errors';
|
||||
|
||||
/**
|
||||
* Base backend class
|
||||
*
|
||||
* @class BaseBackend
|
||||
*/
|
||||
export default class BaseBackend {
|
||||
service: string;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {string} service - service identifer for construction arn
|
||||
*/
|
||||
constructor(service: string) {
|
||||
this.service = service;
|
||||
}
|
||||
|
||||
/** verifySignatureV2
|
||||
* @param stringToSign - string to sign built per AWS rules
|
||||
* @param signatureFromRequest - signature sent with request
|
||||
* @param accessKey - account accessKey
|
||||
* @param options - contains algorithm (SHA1 or SHA256)
|
||||
* @param callback - callback with either error or user info
|
||||
* @return calls callback
|
||||
*/
|
||||
verifySignatureV2(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
options: any,
|
||||
callback: any
|
||||
) {
|
||||
return callback(errors.AuthMethodNotImplemented);
|
||||
}
|
||||
|
||||
|
||||
/** verifySignatureV4
|
||||
* @param stringToSign - string to sign built per AWS rules
|
||||
* @param signatureFromRequest - signature sent with request
|
||||
* @param accessKey - account accessKey
|
||||
* @param region - region specified in request credential
|
||||
* @param scopeDate - date specified in request credential
|
||||
* @param options - options to send to Vault
|
||||
* (just contains reqUid for logging in Vault)
|
||||
* @param callback - callback with either error or user info
|
||||
* @return calls callback
|
||||
*/
|
||||
verifySignatureV4(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
region: string,
|
||||
scopeDate: string,
|
||||
options: any,
|
||||
callback: any
|
||||
) {
|
||||
return callback(errors.AuthMethodNotImplemented);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets canonical ID's for a list of accounts
|
||||
* based on email associated with account
|
||||
* @param emails - list of email addresses
|
||||
* @param options - to send log id to vault
|
||||
* @param callback - callback to calling function
|
||||
* @returns callback with either error or
|
||||
* object with email addresses as keys and canonical IDs
|
||||
* as values
|
||||
*/
|
||||
getCanonicalIds(emails: string[], options: any, callback: any) {
|
||||
return callback(errors.AuthMethodNotImplemented);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets email addresses (referred to as diplay names for getACL's)
|
||||
* for a list of accounts based on canonical IDs associated with account
|
||||
* @param canonicalIDs - list of canonicalIDs
|
||||
* @param options - to send log id to vault
|
||||
* @param callback - callback to calling function
|
||||
* @returns callback with either error or
|
||||
* an object from Vault containing account canonicalID
|
||||
* as each object key and an email address as the value (or "NotFound")
|
||||
*/
|
||||
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
|
||||
return callback(errors.AuthMethodNotImplemented);
|
||||
}
|
||||
|
||||
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
|
||||
return callback(null, { message: { body: [] } });
|
||||
}
|
||||
|
||||
healthcheck(reqUid: string, callback: any) {
|
||||
return callback(null, { code: 200, message: 'OK' });
|
||||
}
|
||||
}
|
|
@ -4,7 +4,7 @@ import joi from 'joi';
|
|||
import werelogs from 'werelogs';
|
||||
import * as types from './types';
|
||||
import { Account, Accounts } from './types';
|
||||
import ARN from '../../models/ARN';
|
||||
import ARN from '../../../models/ARN';
|
||||
|
||||
/** Load authentication information from files or pre-loaded account objects */
|
||||
export default class AuthLoader {
|
|
@ -1,7 +1,9 @@
|
|||
import * as crypto from 'crypto';
|
||||
import errors from '../../errors';
|
||||
import crypto from 'crypto';
|
||||
import { Logger } from 'werelogs';
|
||||
import errors from '../../../errors';
|
||||
import { calculateSigningKey, hashSignature } from './vaultUtilities';
|
||||
import Indexer from './Indexer';
|
||||
import BaseBackend from '../base';
|
||||
import { Accounts } from './types';
|
||||
|
||||
function _formatResponse(userInfoToSend: any) {
|
||||
|
@ -15,26 +17,32 @@ function _formatResponse(userInfoToSend: any) {
|
|||
/**
|
||||
* Class that provides a memory backend for verifying signatures and getting
|
||||
* emails and canonical ids associated with an account.
|
||||
*
|
||||
* @class InMemoryBackend
|
||||
*/
|
||||
class Backend {
|
||||
class InMemoryBackend extends BaseBackend {
|
||||
indexer: Indexer;
|
||||
service: string;
|
||||
formatResponse: any;
|
||||
|
||||
constructor(service: string, indexer: Indexer) {
|
||||
this.service = service;
|
||||
/**
|
||||
* @constructor
|
||||
* @param service - service identifer for construction arn
|
||||
* @param indexer - indexer instance for retrieving account info
|
||||
* @param formatter - function which accepts user info to send
|
||||
* back and returns it in an object
|
||||
*/
|
||||
constructor(service: string, indexer: Indexer, formatter: typeof _formatResponse) {
|
||||
super(service);
|
||||
this.indexer = indexer;
|
||||
this.formatResponse = formatter;
|
||||
}
|
||||
|
||||
// CODEQUALITY-TODO-SYNC Should be synchronous
|
||||
verifySignatureV2(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
options: { algo: 'SHA256' | 'SHA1' },
|
||||
callback: (
|
||||
error: Error | null,
|
||||
data?: ReturnType<typeof _formatResponse>
|
||||
) => void
|
||||
options: any,
|
||||
callback: any,
|
||||
) {
|
||||
const entity = this.indexer.getEntityByKey(accessKey);
|
||||
if (!entity) {
|
||||
|
@ -50,27 +58,21 @@ class Backend {
|
|||
accountDisplayName: this.indexer.getAcctDisplayName(entity),
|
||||
canonicalID: entity.canonicalID,
|
||||
arn: entity.arn,
|
||||
// TODO Why?
|
||||
// @ts-ignore
|
||||
IAMdisplayName: entity.IAMdisplayName,
|
||||
};
|
||||
const vaultReturnObject = _formatResponse(userInfoToSend);
|
||||
const vaultReturnObject = this.formatResponse(userInfoToSend);
|
||||
return callback(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
// TODO Options not used. Why ?
|
||||
// CODEQUALITY-TODO-SYNC Should be synchronous
|
||||
verifySignatureV4(
|
||||
stringToSign: string,
|
||||
signatureFromRequest: string,
|
||||
accessKey: string,
|
||||
region: string,
|
||||
scopeDate: string,
|
||||
_options: { algo: 'SHA256' | 'SHA1' },
|
||||
callback: (
|
||||
err: Error | null,
|
||||
data?: ReturnType<typeof _formatResponse>
|
||||
) => void
|
||||
options: any,
|
||||
callback: any,
|
||||
) {
|
||||
const entity = this.indexer.getEntityByKey(accessKey);
|
||||
if (!entity) {
|
||||
|
@ -87,21 +89,14 @@ class Backend {
|
|||
accountDisplayName: this.indexer.getAcctDisplayName(entity),
|
||||
canonicalID: entity.canonicalID,
|
||||
arn: entity.arn,
|
||||
// TODO Why?
|
||||
// @ts-ignore
|
||||
IAMdisplayName: entity.IAMdisplayName,
|
||||
};
|
||||
const vaultReturnObject = _formatResponse(userInfoToSend);
|
||||
const vaultReturnObject = this.formatResponse(userInfoToSend);
|
||||
return callback(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
// TODO log not used. Why ?
|
||||
// CODEQUALITY-TODO-SYNC Should be synchronous
|
||||
getCanonicalIds(
|
||||
emails: string[],
|
||||
_log: any,
|
||||
cb: (err: null, data: { message: { body: any } }) => void
|
||||
) {
|
||||
getCanonicalIds(emails: string[], log: Logger, cb: any) {
|
||||
const results = {};
|
||||
emails.forEach(email => {
|
||||
const lowercasedEmail = email.toLowerCase();
|
||||
|
@ -121,13 +116,7 @@ class Backend {
|
|||
return cb(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
// TODO options not used. Why ?
|
||||
// CODEQUALITY-TODO-SYNC Should be synchronous
|
||||
getEmailAddresses(
|
||||
canonicalIDs: string[],
|
||||
_options: any,
|
||||
cb: (err: null, data: { message: { body: any } }) => void
|
||||
) {
|
||||
getEmailAddresses(canonicalIDs: string[], options: any, cb: any) {
|
||||
const results = {};
|
||||
canonicalIDs.forEach(canonicalId => {
|
||||
const foundEntity = this.indexer.getEntityByCanId(canonicalId);
|
||||
|
@ -145,24 +134,17 @@ class Backend {
|
|||
return cb(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
// TODO options not used. Why ?
|
||||
// CODEQUALITY-TODO-SYNC Should be synchronous
|
||||
/**
|
||||
* Gets accountIds for a list of accounts based on
|
||||
* the canonical IDs associated with the account
|
||||
* @param canonicalIDs - list of canonicalIDs
|
||||
* @param _options - to send log id to vault
|
||||
* @param options - to send log id to vault
|
||||
* @param cb - callback to calling function
|
||||
* @returns The next is wrong. Here to keep archives.
|
||||
* callback with either error or
|
||||
* @returns callback with either error or
|
||||
* an object from Vault containing account canonicalID
|
||||
* as each object key and an accountId as the value (or "NotFound")
|
||||
*/
|
||||
getAccountIds(
|
||||
canonicalIDs: string[],
|
||||
_options: any,
|
||||
cb: (err: null, data: { message: { body: any } }) => void
|
||||
) {
|
||||
getAccountIds(canonicalIDs: string[], options: any, cb: any) {
|
||||
const results = {};
|
||||
canonicalIDs.forEach(canonicalID => {
|
||||
const foundEntity = this.indexer.getEntityByCanId(canonicalID);
|
||||
|
@ -179,16 +161,34 @@ class Backend {
|
|||
};
|
||||
return cb(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
report(log: Logger, callback: any) {
|
||||
return callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
class S3AuthBackend extends Backend {
|
||||
constructor(authdata: Accounts) {
|
||||
super('s3', new Indexer(authdata));
|
||||
|
||||
class S3AuthBackend extends InMemoryBackend {
|
||||
/**
|
||||
* @constructor
|
||||
* @param authdata - the authentication config file's data
|
||||
* @param authdata.accounts - array of account objects
|
||||
* @param authdata.accounts[].name - account name
|
||||
* @param authdata.accounts[].email - account email
|
||||
* @param authdata.accounts[].arn - IAM resource name
|
||||
* @param authdata.accounts[].canonicalID - account canonical ID
|
||||
* @param authdata.accounts[].shortid - short account ID
|
||||
* @param authdata.accounts[].keys - array of key objects
|
||||
* @param authdata.accounts[].keys[].access - access key
|
||||
* @param authdata.accounts[].keys[].secret - secret key
|
||||
*/
|
||||
constructor(authdata?: Accounts) {
|
||||
super('s3', new Indexer(authdata), _formatResponse);
|
||||
}
|
||||
|
||||
refreshAuthData(authData: Accounts) {
|
||||
refreshAuthData(authData?: Accounts) {
|
||||
this.indexer = new Indexer(authData);
|
||||
}
|
||||
}
|
||||
|
||||
export { S3AuthBackend as s3 };
|
||||
export { S3AuthBackend as s3 }
|
|
@ -42,37 +42,40 @@ export default function awsURIencode(
|
|||
if (typeof input !== 'string') {
|
||||
return '';
|
||||
}
|
||||
|
||||
// precalc slash and star based on configs
|
||||
let encoded = "";
|
||||
const slash = encodeSlash === undefined || encodeSlash ? '%2F' : '/';
|
||||
const star = noEncodeStar !== undefined && noEncodeStar ? '*' : '%2A';
|
||||
const encoded: string[] = [];
|
||||
|
||||
const charArray = Array.from(input);
|
||||
for (const ch of charArray) {
|
||||
switch (true) {
|
||||
case ch >= 'A' && ch <= 'Z':
|
||||
case ch >= 'a' && ch <= 'z':
|
||||
case ch >= '0' && ch <= '9':
|
||||
case ch === '-':
|
||||
case ch === '_':
|
||||
case ch === '~':
|
||||
case ch === '.':
|
||||
encoded.push(ch);
|
||||
break;
|
||||
case ch === '/':
|
||||
encoded.push(slash);
|
||||
break;
|
||||
case ch === '*':
|
||||
encoded.push(star);
|
||||
break;
|
||||
case ch === ' ':
|
||||
encoded.push('%20');
|
||||
break;
|
||||
default:
|
||||
encoded.push(_toHexUTF8(ch));
|
||||
break;
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
let ch = input.charAt(i);
|
||||
if ((ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch === '_' || ch === '-' ||
|
||||
ch === '~' || ch === '.') {
|
||||
encoded = encoded.concat(ch);
|
||||
} else if (ch === ' ') {
|
||||
encoded = encoded.concat('%20');
|
||||
} else if (ch === '/') {
|
||||
encoded = encoded.concat(slash);
|
||||
} else if (ch === '*') {
|
||||
encoded = encoded.concat(star);
|
||||
} else {
|
||||
if (ch >= '\uD800' && ch <= '\uDBFF') {
|
||||
// If this character is a high surrogate peek the next character
|
||||
// and join it with this one if the next character is a low
|
||||
// surrogate.
|
||||
// Otherwise the encoded URI will contain the two surrogates as
|
||||
// two distinct UTF-8 sequences which is not valid UTF-8.
|
||||
if (i + 1 < input.length) {
|
||||
const ch2 = input.charAt(i + 1);
|
||||
if (ch2 >= '\uDC00' && ch2 <= '\uDFFF') {
|
||||
i++;
|
||||
ch += ch2;
|
||||
}
|
||||
}
|
||||
return encoded.join('');
|
||||
}
|
||||
encoded = encoded.concat(_toHexUTF8(ch));
|
||||
}
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
|
|
@ -132,6 +132,17 @@ export function check(
|
|||
return { err: errors.RequestTimeTooSkewed };
|
||||
}
|
||||
|
||||
let proxyPath: string | undefined;
|
||||
if (request.headers.proxy_path) {
|
||||
try {
|
||||
proxyPath = decodeURIComponent(request.headers.proxy_path);
|
||||
} catch (err) {
|
||||
log.debug('invalid proxy_path header', { proxyPath, err });
|
||||
return { err: errors.InvalidArgument.customizeDescription(
|
||||
'invalid proxy_path header') };
|
||||
}
|
||||
}
|
||||
|
||||
const stringToSign = constructStringToSign({
|
||||
log,
|
||||
request,
|
||||
|
@ -141,6 +152,7 @@ export function check(
|
|||
timestamp,
|
||||
payloadChecksum,
|
||||
awsService: service,
|
||||
proxyPath,
|
||||
});
|
||||
log.trace('constructed stringToSign', { stringToSign });
|
||||
if (stringToSign instanceof Error) {
|
||||
|
|
|
@ -56,6 +56,17 @@ export function check(request: any, log: Logger, data: { [key: string]: string }
|
|||
return { err: errors.RequestTimeTooSkewed };
|
||||
}
|
||||
|
||||
let proxyPath: string | undefined;
|
||||
if (request.headers.proxy_path) {
|
||||
try {
|
||||
proxyPath = decodeURIComponent(request.headers.proxy_path);
|
||||
} catch (err) {
|
||||
log.debug('invalid proxy_path header', { proxyPath });
|
||||
return { err: errors.InvalidArgument.customizeDescription(
|
||||
'invalid proxy_path header') };
|
||||
}
|
||||
}
|
||||
|
||||
// In query v4 auth, the canonical request needs
|
||||
// to include the query params OTHER THAN
|
||||
// the signature so create a
|
||||
|
@ -81,6 +92,7 @@ export function check(request: any, log: Logger, data: { [key: string]: string }
|
|||
credentialScope:
|
||||
`${scopeDate}/${region}/${service}/${requestType}`,
|
||||
awsService: service,
|
||||
proxyPath,
|
||||
});
|
||||
if (stringToSign instanceof Error) {
|
||||
return { err: stringToSign };
|
||||
|
|
|
@ -3,7 +3,7 @@ import async from 'async';
|
|||
import errors from '../../../errors';
|
||||
import { Logger } from 'werelogs';
|
||||
import Vault, { AuthV4RequestParams } from '../../Vault';
|
||||
import { Callback } from '../../in_memory/types';
|
||||
import { Callback } from '../../backends/in_memory/types';
|
||||
|
||||
import constructChunkStringToSign from './constructChunkStringToSign';
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ export type ResultObject = {
|
|||
export type CommandPromise = {
|
||||
resolve: (results?: ResultObject[]) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timer | null;
|
||||
timeout: NodeJS.Timeout | null;
|
||||
};
|
||||
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
|
||||
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
|
||||
|
@ -254,7 +254,7 @@ export async function sendWorkerCommand(
|
|||
}
|
||||
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
|
||||
return new Promise((resolve, reject) => {
|
||||
let timeout: NodeJS.Timer | null = null;
|
||||
let timeout: NodeJS.Timeout | null = null;
|
||||
if (timeoutMs) {
|
||||
timeout = setTimeout(() => {
|
||||
delete uidsToCommandPromise[uids];
|
||||
|
|
|
@ -2,18 +2,18 @@ import * as crypto from 'crypto';
|
|||
|
||||
// The min value here is to manage further backward compat if we
|
||||
// need it
|
||||
const iamSecurityTokenSizeMin = 128;
|
||||
const iamSecurityTokenSizeMax = 128;
|
||||
// Security token is an hex string (no real format from amazon)
|
||||
const iamSecurityTokenPattern = new RegExp(
|
||||
`^[a-f0-9]{${iamSecurityTokenSizeMin},${iamSecurityTokenSizeMax}}$`,
|
||||
);
|
||||
// Default value
|
||||
export const vaultGeneratedIamSecurityTokenSizeMin = 128;
|
||||
// Safe to assume that a typical token size is less than 8192 bytes
|
||||
export const vaultGeneratedIamSecurityTokenSizeMax = 8192;
|
||||
// Base-64
|
||||
export const vaultGeneratedIamSecurityTokenPattern = /^[A-Za-z0-9/+=]*$/;
|
||||
|
||||
// info about the iam security token
|
||||
export const iamSecurityToken = {
|
||||
min: iamSecurityTokenSizeMin,
|
||||
max: iamSecurityTokenSizeMax,
|
||||
pattern: iamSecurityTokenPattern,
|
||||
min: vaultGeneratedIamSecurityTokenSizeMin,
|
||||
max: vaultGeneratedIamSecurityTokenSizeMax,
|
||||
pattern: vaultGeneratedIamSecurityTokenPattern,
|
||||
};
|
||||
// PublicId is used as the canonicalID for a request that contains
|
||||
// no authentication information. Requestor can access
|
||||
|
@ -22,6 +22,7 @@ export const publicId = 'http://acs.amazonaws.com/groups/global/AllUsers';
|
|||
export const zenkoServiceAccount = 'http://acs.zenko.io/accounts/service';
|
||||
export const metadataFileNamespace = '/MDFile';
|
||||
export const dataFileURL = '/DataFile';
|
||||
export const passthroughFileURL = '/PassthroughFile';
|
||||
// AWS states max size for user-defined metadata
|
||||
// (x-amz-meta- headers) is 2 KB:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||
|
@ -31,7 +32,16 @@ export const maximumMetaHeadersSize = 2136;
|
|||
export const emptyFileMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
|
||||
// Version 2 changes the format of the data location property
|
||||
// Version 3 adds the dataStoreName attribute
|
||||
export const mdModelVersion = 3;
|
||||
// Version 4 add the Creation-Time and Content-Language attributes,
|
||||
// and add support for x-ms-meta-* headers in UserMetadata
|
||||
// Version 5 adds the azureInfo structure
|
||||
// Version 6 adds a "deleted" flag that is updated to true before
|
||||
// the object gets deleted. This is done to keep object metadata in the
|
||||
// oplog when deleting the object, as oplog deletion events don't contain
|
||||
// any metadata of the object.
|
||||
// version 6 also adds the "isPHD" flag that is used to indicate that the master
|
||||
// object is a placeholder and is not up to date.
|
||||
export const mdModelVersion = 6;
|
||||
/*
|
||||
* Splitter is used to build the object name for the overview of a
|
||||
* multipart upload and to build the object names for each part of a
|
||||
|
@ -71,19 +81,45 @@ export const mpuBucketPrefix = 'mpuShadowBucket';
|
|||
export const permittedCapitalizedBuckets = {
|
||||
METADATA: true,
|
||||
};
|
||||
// Setting a lower object key limit to account for:
|
||||
// - Mongo key limit of 1012 bytes
|
||||
// - Version ID in Mongo Key if versioned of 33
|
||||
// - Max bucket name length if bucket match false of 63
|
||||
// - Extra prefix slash for bucket prefix if bucket match of 1
|
||||
export const objectKeyByteLimit = 915;
|
||||
/* delimiter for location-constraint. The location constraint will be able
|
||||
* to include the ingestion flag
|
||||
*/
|
||||
export const zenkoSeparator = ':';
|
||||
/* eslint-disable camelcase */
|
||||
export const externalBackends = { aws_s3: true, azure: true, gcp: true, pfs: true }
|
||||
export const hasCopyPartBackends = { aws_s3: true, gcp: true }
|
||||
export const versioningNotImplBackends = { azure: true, gcp: true }
|
||||
export const mpuMDStoredExternallyBackend = { aws_s3: true, gcp: true }
|
||||
export const externalBackends = { aws_s3: true, azure: true, gcp: true, pfs: true };
|
||||
export const replicationBackends = { aws_s3: true, azure: true, gcp: true };
|
||||
// hex digest of sha256 hash of empty string:
|
||||
export const emptyStringHash = crypto.createHash('sha256')
|
||||
.update('', 'binary').digest('hex');
|
||||
export const mpuMDStoredExternallyBackend = { aws_s3: true, gcp: true };
|
||||
// AWS sets a minimum size limit for parts except for the last part.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
|
||||
export const minimumAllowedPartSize = 5242880;
|
||||
// hex digest of sha256 hash of empty string:
|
||||
export const emptyStringHash = crypto.createHash('sha256').update('', 'binary').digest('hex');
|
||||
export const gcpMaximumAllowedPartCount = 1024;
|
||||
// GCP Object Tagging Prefix
|
||||
export const gcpTaggingPrefix = 'aws-tag-';
|
||||
export const productName = 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko';
|
||||
export const legacyLocations = ['sproxyd', 'legacy'];
|
||||
// healthcheck default call from nginx is every 2 seconds
|
||||
// for external backends, don't call unless at least 1 minute
|
||||
// (60,000 milliseconds) since last call
|
||||
export const externalBackendHealthCheckInterval = 60000;
|
||||
// some of the available data backends (if called directly rather
|
||||
// than through the multiple backend gateway) need a key provided
|
||||
// as a string as first parameter of the get/delete methods.
|
||||
export const clientsRequireStringKey = { sproxyd: true, cdmi: true };
|
||||
export const hasCopyPartBackends = { aws_s3: true, gcp: true };
|
||||
export const versioningNotImplBackends = { azure: true, gcp: true };
|
||||
// user metadata applied on zenko-created objects
|
||||
export const zenkoIDHeader = 'x-amz-meta-zenko-instance-id';
|
||||
// Default expiration value of the S3 pre-signed URL duration
|
||||
// 604800 seconds (seven days).
|
||||
export const legacyLocations = ['sproxyd', 'legacy'];
|
||||
export const defaultPreSignedURLExpiry = 7 * 24 * 60 * 60;
|
||||
// Regex for ISO-8601 formatted date
|
||||
export const shortIso8601Regex = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/;
|
||||
|
@ -96,16 +132,21 @@ export const supportedNotificationEvents = new Set([
|
|||
's3:ObjectRemoved:*',
|
||||
's3:ObjectRemoved:Delete',
|
||||
's3:ObjectRemoved:DeleteMarkerCreated',
|
||||
's3:Replication:OperationFailedReplication',
|
||||
's3:ObjectTagging:*',
|
||||
's3:ObjectTagging:Put',
|
||||
's3:ObjectTagging:Delete',
|
||||
's3:ObjectAcl:Put',
|
||||
's3:ObjectRestore:*',
|
||||
's3:ObjectRestore:Post',
|
||||
's3:ObjectRestore:Completed',
|
||||
's3:ObjectRestore:Delete',
|
||||
's3:LifecycleTransition',
|
||||
's3:LifecycleExpiration:*',
|
||||
's3:LifecycleExpiration:DeleteMarkerCreated',
|
||||
's3:LifecycleExpiration:Delete',
|
||||
]);
|
||||
export const notificationArnPrefix = 'arn:scality:bucketnotif';
|
||||
// some of the available data backends (if called directly rather
|
||||
// than through the multiple backend gateway) need a key provided
|
||||
// as a string as first parameter of the get/delete methods.
|
||||
export const clientsRequireStringKey = { sproxyd: true, cdmi: true };
|
||||
// HTTP server keep-alive timeout is set to a higher value than
|
||||
// client's free sockets timeout to avoid the risk of triggering
|
||||
// ECONNRESET errors if the server closes the connection at the
|
||||
|
@ -122,10 +163,14 @@ export const supportedLifecycleRules = [
|
|||
'expiration',
|
||||
'noncurrentVersionExpiration',
|
||||
'abortIncompleteMultipartUpload',
|
||||
'transitions',
|
||||
'noncurrentVersionTransition',
|
||||
];
|
||||
// Maximum number of buckets to cache (bucket metadata)
|
||||
export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
|
||||
Number(process.env.METADATA_MAX_CACHED_BUCKETS) : 1000;
|
||||
|
||||
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
|
||||
export const maxBatchingConcurrentOperations = 5;
|
||||
|
||||
/** For policy resource arn check we allow empty account ID to not break compatibility */
|
||||
|
|
|
@ -1042,3 +1042,15 @@ export const AuthMethodNotImplemented: ErrorFormat = {
|
|||
description: 'AuthMethodNotImplemented',
|
||||
code: 501,
|
||||
};
|
||||
|
||||
// --------------------- quotaErros ---------------------
|
||||
|
||||
export const NoSuchQuota: ErrorFormat = {
|
||||
code: 404,
|
||||
description: 'The specified resource does not have a quota.',
|
||||
};
|
||||
|
||||
export const QuotaExceeded: ErrorFormat = {
|
||||
code: 429,
|
||||
description: 'The quota set for the resource is exceeded.',
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@ import type { ServerResponse } from 'http';
|
|||
import * as rawErrors from './arsenalErrors';
|
||||
|
||||
/** All possible errors names. */
|
||||
export type Name = keyof typeof rawErrors
|
||||
export type Name = keyof typeof rawErrors;
|
||||
/** Object containing all errors names. It has the format { [Name]: "Name" } */
|
||||
export type Names = { [Name_ in Name]: Name_ };
|
||||
/** Mapping used to determine an error type. It has the format { [Name]: boolean } */
|
||||
|
@ -13,7 +13,7 @@ export type Errors = { [_ in Name]: ArsenalError };
|
|||
// This object is reused constantly through createIs, we store it there
|
||||
// to avoid recomputation.
|
||||
const isBase = Object.fromEntries(
|
||||
Object.keys(rawErrors).map(key => [key, false])
|
||||
Object.keys(rawErrors).map((key) => [key, false])
|
||||
) as Is;
|
||||
|
||||
// This allows to conditionally add the old behavior of errors to properly
|
||||
|
@ -32,7 +32,7 @@ export const allowUnsafeErrComp = (
|
|||
// the Proxy will return false.
|
||||
const createIs = (type: Name): Is => {
|
||||
const get = (is: Is, value: string | symbol) => is[value] ?? false;
|
||||
const final = Object.freeze({ ...isBase, [type]: true })
|
||||
const final = Object.freeze({ ...isBase, [type]: true });
|
||||
return new Proxy(final, { get });
|
||||
};
|
||||
|
||||
|
@ -46,13 +46,18 @@ export class ArsenalError extends Error {
|
|||
/** Object used to determine the error type.
|
||||
* Example: error.is.InternalError */
|
||||
#is: Is;
|
||||
/** A map of error metadata (can be extra fields
|
||||
* that only show in debug mode) */
|
||||
#metadata: Map<string, Object[]>;
|
||||
|
||||
private constructor(type: Name, code: number, description: string) {
|
||||
private constructor(type: Name, code: number, description: string,
|
||||
metadata?: Map<string, Object[]>) {
|
||||
super(type);
|
||||
this.#code = code;
|
||||
this.#description = description;
|
||||
this.#type = type;
|
||||
this.#is = createIs(type);
|
||||
this.#metadata = metadata ?? new Map<string, Object[]>();
|
||||
|
||||
// This restores the old behavior of errors, to make sure they're now
|
||||
// backward-compatible. Fortunately it's handled by TS, but it cannot
|
||||
|
@ -106,7 +111,22 @@ export class ArsenalError extends Error {
|
|||
customizeDescription(description: string): ArsenalError {
|
||||
const type = this.#type;
|
||||
const code = this.#code;
|
||||
return new ArsenalError(type, code, description);
|
||||
const metadata = new Map(this.#metadata);
|
||||
const err = new ArsenalError(type, code, description, metadata);
|
||||
err.stack = this.stack;
|
||||
return err;
|
||||
}
|
||||
|
||||
/** Clone the error with a new metadata field */
|
||||
addMetadataEntry(key: string, value: Object[]): ArsenalError {
|
||||
const type = this.#type;
|
||||
const code = this.#code;
|
||||
const description = this.#description;
|
||||
const metadata = new Map(this.#metadata);
|
||||
metadata.set(key, value);
|
||||
const err = new ArsenalError(type, code, description, metadata);
|
||||
err.stack = this.stack;
|
||||
return err;
|
||||
}
|
||||
|
||||
/** Used to determine the error type. Example: error.is.InternalError */
|
||||
|
@ -131,9 +151,14 @@ export class ArsenalError extends Error {
|
|||
return this.#type;
|
||||
}
|
||||
|
||||
/** A map of error metadata */
|
||||
get metadata() {
|
||||
return this.#metadata;
|
||||
}
|
||||
|
||||
/** Generate all possible errors. An instance is created by default. */
|
||||
static errors() {
|
||||
const errors = {}
|
||||
const errors = {};
|
||||
Object.entries(rawErrors).forEach((value) => {
|
||||
const name = value[0] as Name;
|
||||
const error = value[1];
|
||||
|
@ -141,7 +166,7 @@ export class ArsenalError extends Error {
|
|||
const get = () => new ArsenalError(name, code, description);
|
||||
Object.defineProperty(errors, name, { get });
|
||||
});
|
||||
return errors as Errors
|
||||
return errors as Errors;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
"test": "mocha --recursive --timeout 5500 tests/unit"
|
||||
},
|
||||
"dependencies": {
|
||||
"mocha": "2.5.3",
|
||||
"async": "^2.6.0",
|
||||
"mocha": "5.2.0",
|
||||
"async": "~2.6.1",
|
||||
"node-forge": "^0.7.1"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,32 @@ export default class RedisClient {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** increment value of a key by 1 and set a ttl */
|
||||
/**
|
||||
* scan a pattern and return matching keys
|
||||
* @param pattern - string pattern to match with all existing keys
|
||||
* @param [count=10] - scan count
|
||||
* @param cb - callback (error, result)
|
||||
*/
|
||||
scan(pattern: string, count = 10, cb: Callback) {
|
||||
const params = { match: pattern, count };
|
||||
const keys: any[] = [];
|
||||
|
||||
const stream = this._client.scanStream(params);
|
||||
stream.on('data', resultKeys => {
|
||||
for (let i = 0; i < resultKeys.length; i++) {
|
||||
keys.push(resultKeys[i]);
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
cb(null, keys);
|
||||
});
|
||||
}
|
||||
|
||||
/** increment value of a key by 1 and set a ttl
|
||||
* @param key - key holding the value
|
||||
* @param expiry - expiry in seconds
|
||||
* @param cb - callback
|
||||
*/
|
||||
incrEx(key: string, expiry: number, cb: Callback) {
|
||||
const exp = expiry.toString();
|
||||
return this._client
|
||||
|
@ -28,7 +53,22 @@ export default class RedisClient {
|
|||
.exec(cb);
|
||||
}
|
||||
|
||||
/** increment value of a key by a given amount and set a ttl */
|
||||
/**
|
||||
* increment value of a key by a given amount
|
||||
* @param key - key holding the value
|
||||
* @param amount - amount to increase by
|
||||
* @param cb - callback
|
||||
*/
|
||||
incrby(key: string, amount: number, cb: Callback) {
|
||||
return this._client.incrby(key, amount, cb);
|
||||
}
|
||||
|
||||
/** increment value of a key by a given amount and set a ttl
|
||||
* @param key - key holding the value
|
||||
* @param amount - amount to increase by
|
||||
* @param expiry - expiry in seconds
|
||||
* @param cb - callback
|
||||
*/
|
||||
incrbyEx(key: string, amount: number, expiry: number, cb: Callback) {
|
||||
const am = amount.toString();
|
||||
const exp = expiry.toString();
|
||||
|
@ -37,13 +77,29 @@ export default class RedisClient {
|
|||
.exec(cb);
|
||||
}
|
||||
|
||||
/** execute a batch of commands */
|
||||
/**
|
||||
* decrement value of a key by a given amount
|
||||
* @param key - key holding the value
|
||||
* @param amount - amount to increase by
|
||||
* @param cb - callback
|
||||
*/
|
||||
decrby(key: string, amount: number, cb: Callback) {
|
||||
return this._client.decrby(key, amount, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* execute a batch of commands
|
||||
* @param cmds - list of commands
|
||||
* @param cb - callback
|
||||
* @return
|
||||
*/
|
||||
batch(cmds: string[][], cb: Callback) {
|
||||
return this._client.pipeline(cmds).exec(cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a key exists
|
||||
* @param key - name of key
|
||||
* @param cb - callback
|
||||
* If cb response returns 0, key does not exist.
|
||||
* If cb response returns 1, key exists.
|
||||
|
@ -52,10 +108,22 @@ export default class RedisClient {
|
|||
return this._client.exists(key, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* get value stored at key
|
||||
* @param key - key holding the value
|
||||
* @param cb - callback
|
||||
*/
|
||||
get(key: string, cb: Callback) {
|
||||
return this._client.get(key, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a value and its score to a sorted set. If no sorted set exists, this
|
||||
* will create a new one for the given key.
|
||||
* @param key - name of key
|
||||
* @param score - score used to order set
|
||||
* @param value - value to store
|
||||
* @param cb - callback
|
||||
*/
|
||||
zadd(key: string, score: number, value: string, cb: Callback) {
|
||||
return this._client.zadd(key, score, value, cb);
|
||||
|
@ -66,6 +134,8 @@ export default class RedisClient {
|
|||
* Note: using this on a key that does not exist will return 0.
|
||||
* Note: using this on an existing key that isn't a sorted set will
|
||||
* return an error WRONGTYPE.
|
||||
* @param key - name of key
|
||||
* @param cb - callback
|
||||
*/
|
||||
zcard(key: string, cb: Callback) {
|
||||
return this._client.zcard(key, cb);
|
||||
|
@ -76,6 +146,9 @@ export default class RedisClient {
|
|||
* Note: using this on a key that does not exist will return nil.
|
||||
* Note: using this on a value that does not exist in a valid sorted set key
|
||||
* will return nil.
|
||||
* @param key - name of key
|
||||
* @param value - value within sorted set
|
||||
* @param cb - callback
|
||||
*/
|
||||
zscore(key: string, value: string, cb: Callback) {
|
||||
return this._client.zscore(key, value, cb);
|
||||
|
@ -83,8 +156,10 @@ export default class RedisClient {
|
|||
|
||||
/**
|
||||
* Remove a value from a sorted set
|
||||
* @param value - value within sorted set. Can specify multiple values within an array
|
||||
* @param {function} cb - callback
|
||||
* @param key - name of key
|
||||
* @param value - value within sorted set. Can specify
|
||||
* multiple values within an array
|
||||
* @param cb - callback
|
||||
* The cb response returns number of values removed
|
||||
*/
|
||||
zrem(key: string, value: string | string[], cb: Callback) {
|
||||
|
@ -93,8 +168,10 @@ export default class RedisClient {
|
|||
|
||||
/**
|
||||
* Get specified range of elements in a sorted set
|
||||
* @param key - name of key
|
||||
* @param start - start index (inclusive)
|
||||
* @param end - end index (inclusive) (can use -1)
|
||||
* @param cb - callback
|
||||
*/
|
||||
zrange(key: string, start: number, end: number, cb: Callback) {
|
||||
return this._client.zrange(key, start, end, cb);
|
||||
|
@ -102,10 +179,12 @@ export default class RedisClient {
|
|||
|
||||
/**
|
||||
* Get range of elements in a sorted set based off score
|
||||
* @param key - name of key
|
||||
* @param min - min score value (inclusive)
|
||||
* (can use "-inf")
|
||||
* @param max - max score value (inclusive)
|
||||
* (can use "+inf")
|
||||
* @param cb - callback
|
||||
*/
|
||||
zrangebyscore(
|
||||
key: string,
|
||||
|
@ -116,6 +195,15 @@ export default class RedisClient {
|
|||
return this._client.zrangebyscore(key, min, max, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* get TTL or expiration in seconds
|
||||
* @param key - name of key
|
||||
* @param cb - callback
|
||||
*/
|
||||
ttl(key: string, cb: Callback) {
|
||||
return this._client.ttl(key, cb);
|
||||
}
|
||||
|
||||
clear(cb: Callback) {
|
||||
return this._client.flushdb(cb);
|
||||
}
|
||||
|
@ -123,4 +211,8 @@ export default class RedisClient {
|
|||
disconnect() {
|
||||
this._client.disconnect();
|
||||
}
|
||||
|
||||
listClients(cb: Callback) {
|
||||
return this._client.client('list', cb);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ import async from 'async';
|
|||
import RedisClient from './RedisClient';
|
||||
import { Logger } from 'werelogs';
|
||||
|
||||
export type Callback = (error: Error | null, value?: any) => void;
|
||||
|
||||
export default class StatsClient {
|
||||
_redis: RedisClient;
|
||||
_interval: number;
|
||||
|
@ -48,7 +50,7 @@ export default class StatsClient {
|
|||
* @param d - Date instance
|
||||
* @return key - key for redis
|
||||
*/
|
||||
_buildKey(name: string, d: Date): string {
|
||||
buildKey(name: string, d: Date): string {
|
||||
return `${name}:${this._normalizeTimestamp(d)}`;
|
||||
}
|
||||
|
||||
|
@ -91,11 +93,33 @@ export default class StatsClient {
|
|||
amount = (typeof incr === 'number') ? incr : 1;
|
||||
}
|
||||
|
||||
const key = this._buildKey(`${id}:requests`, new Date());
|
||||
const key = this.buildKey(`${id}:requests`, new Date());
|
||||
|
||||
return this._redis.incrbyEx(key, amount, this._expiry, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the given key by the given value.
|
||||
* @param key - The Redis key to increment
|
||||
* @param incr - The value to increment by
|
||||
* @param [cb] - callback
|
||||
*/
|
||||
incrementKey(key: string, incr: number, cb: Callback) {
|
||||
const callback = cb || this._noop;
|
||||
return this._redis.incrby(key, incr, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the given key by the given value.
|
||||
* @param key - The Redis key to decrement
|
||||
* @param decr - The value to decrement by
|
||||
* @param [cb] - callback
|
||||
*/
|
||||
decrementKey(key: string, decr: number, cb: Callback) {
|
||||
const callback = cb || this._noop;
|
||||
return this._redis.decrby(key, decr, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* report/record a request that ended up being a 500 on the server
|
||||
* @param id - service identifier
|
||||
|
@ -105,10 +129,53 @@ export default class StatsClient {
|
|||
return undefined;
|
||||
}
|
||||
const callback = cb || this._noop;
|
||||
const key = this._buildKey(`${id}:500s`, new Date());
|
||||
const key = this.buildKey(`${id}:500s`, new Date());
|
||||
return this._redis.incrEx(key, this._expiry, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* wrapper on `getStats` that handles a list of keys
|
||||
* @param log - Werelogs request logger
|
||||
* @param ids - service identifiers
|
||||
* @param cb - callback to call with the err/result
|
||||
*/
|
||||
getAllStats(log: Logger, ids: string[], cb: Callback) {
|
||||
if (!this._redis) {
|
||||
return cb(null, {});
|
||||
}
|
||||
|
||||
const statsRes = {
|
||||
'requests': 0,
|
||||
'500s': 0,
|
||||
'sampleDuration': this._expiry,
|
||||
};
|
||||
let requests = 0;
|
||||
let errors = 0;
|
||||
|
||||
// for now set concurrency to default of 10
|
||||
return async.eachLimit(ids, 10, (id: string, done) => {
|
||||
this.getStats(log, id, (err, res) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
requests += res.requests;
|
||||
errors += res['500s'];
|
||||
return done();
|
||||
});
|
||||
}, error => {
|
||||
if (error) {
|
||||
log.error('error getting stats', {
|
||||
error,
|
||||
method: 'StatsClient.getAllStats',
|
||||
});
|
||||
return cb(null, statsRes);
|
||||
}
|
||||
statsRes.requests = requests;
|
||||
statsRes['500s'] = errors;
|
||||
return cb(null, statsRes);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* get stats for the last x seconds, x being the sampling duration
|
||||
* @param log - Werelogs request logger
|
||||
|
@ -123,8 +190,8 @@ export default class StatsClient {
|
|||
const reqsKeys: ['get', string][] = [];
|
||||
const req500sKeys: ['get', string][] = [];
|
||||
for (let i = 0; i < totalKeys; i++) {
|
||||
reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]);
|
||||
req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]);
|
||||
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
|
||||
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
|
||||
this._setPrevInterval(d);
|
||||
}
|
||||
return async.parallel([
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
import StatsClient from './StatsClient';
|
||||
import { Logger } from 'werelogs';
|
||||
import async from 'async';
|
||||
|
||||
export type Callback = (error: Error | null, value?: any) => void;
|
||||
|
||||
/**
|
||||
* @class StatsModel
|
||||
|
@ -7,12 +11,145 @@ import StatsClient from './StatsClient';
|
|||
* rather than by seconds
|
||||
*/
|
||||
export default class StatsModel extends StatsClient {
|
||||
/**
|
||||
* Utility method to convert 2d array rows to columns, and vice versa
|
||||
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
|
||||
* @param arrays - 2d array of integers
|
||||
* @return converted array
|
||||
*/
|
||||
_zip(arrays: number[][]) {
|
||||
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
|
||||
return arrays[0].map((_, i) => arrays.map(a => a[i]));
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* normalize to the nearest interval
|
||||
* @param d - Date instance
|
||||
* @return timestamp - normalized to the nearest interval
|
||||
*/
|
||||
_normalizeTimestamp(d: Date) {
|
||||
const m = d.getMinutes();
|
||||
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* override the method to get the count as an array of integers separated
|
||||
* by each interval
|
||||
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
|
||||
* @param arr - each index contains the result of each batch command
|
||||
* where index 0 signifies the error and index 1 contains the result
|
||||
* @return array of integers, ordered from most recent interval to
|
||||
* oldest interval with length of (expiry / interval)
|
||||
*/
|
||||
// @ts-expect-errors
|
||||
_getCount(arr: [any, string | null][]): number[] {
|
||||
const size = Math.floor(this._expiry / this._interval);
|
||||
const array = arr.reduce((store, i) => {
|
||||
let num = parseInt(i[1] ?? '', 10);
|
||||
num = Number.isNaN(num) ? 0 : num;
|
||||
store.push(num);
|
||||
return store;
|
||||
}, [] as number[]);
|
||||
|
||||
if (array.length < size) {
|
||||
array.push(...Array(size - array.length).fill(0));
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
/**
|
||||
* wrapper on `getStats` that handles a list of keys
|
||||
* override the method to reduce the returned 2d array from `_getCount`
|
||||
* @param log - Werelogs request logger
|
||||
* @param ids - service identifiers
|
||||
* @param cb - callback to call with the err/result
|
||||
*/
|
||||
getAllStats(log: Logger, ids: string[], cb: Callback) {
|
||||
if (!this._redis) {
|
||||
return cb(null, {});
|
||||
}
|
||||
|
||||
const size = Math.floor(this._expiry / this._interval);
|
||||
const statsRes = {
|
||||
'requests': Array(size).fill(0),
|
||||
'500s': Array(size).fill(0),
|
||||
'sampleDuration': this._expiry,
|
||||
};
|
||||
const requests: any[] = [];
|
||||
const errors: any[] = [];
|
||||
|
||||
if (ids.length === 0) {
|
||||
return cb(null, statsRes);
|
||||
}
|
||||
|
||||
// for now set concurrency to default of 10
|
||||
return async.eachLimit(ids, 10, (id, done) => {
|
||||
this.getStats(log, id, (err, res) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
requests.push(res.requests);
|
||||
errors.push(res['500s']);
|
||||
return done();
|
||||
});
|
||||
}, error => {
|
||||
if (error) {
|
||||
log.error('error getting stats', {
|
||||
error,
|
||||
method: 'StatsModel.getAllStats',
|
||||
});
|
||||
return cb(null, statsRes);
|
||||
}
|
||||
|
||||
statsRes.requests = this._zip(requests).map(arr =>
|
||||
arr.reduce((acc, i) => acc + i), 0);
|
||||
statsRes['500s'] = this._zip(errors).map(arr =>
|
||||
arr.reduce((acc, i) => acc + i), 0);
|
||||
|
||||
return cb(null, statsRes);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles getting a list of global keys.
|
||||
* @param ids - Service identifiers
|
||||
* @param log - Werelogs request logger
|
||||
* @param cb - Callback
|
||||
*/
|
||||
getAllGlobalStats(ids: string[], log: Logger, cb: Callback) {
|
||||
const reqsKeys = ids.map(key => (['get', key]));
|
||||
return this._redis.batch(reqsKeys, (err, res) => {
|
||||
const statsRes = { requests: 0 };
|
||||
if (err) {
|
||||
log.error('error getting metrics', {
|
||||
error: err,
|
||||
method: 'StatsClient.getAllGlobalStats',
|
||||
});
|
||||
return cb(null, statsRes);
|
||||
}
|
||||
statsRes.requests = res.reduce((sum, curr) => {
|
||||
const [cmdErr, val] = curr;
|
||||
if (cmdErr) {
|
||||
// Log any individual request errors from the batch request.
|
||||
log.error('error getting metrics', {
|
||||
error: cmdErr,
|
||||
method: 'StatsClient.getAllGlobalStats',
|
||||
});
|
||||
}
|
||||
return sum + (Number.parseInt(val, 10) || 0);
|
||||
}, 0);
|
||||
return cb(null, statsRes);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* normalize date timestamp to the nearest hour
|
||||
* @param d - Date instance
|
||||
* @return timestamp - normalized to the nearest hour
|
||||
*/
|
||||
normalizeTimestampByHour(d: Date): number {
|
||||
normalizeTimestampByHour(d: Date) {
|
||||
return d.setMinutes(0, 0, 0);
|
||||
}
|
||||
|
||||
|
@ -21,40 +158,10 @@ export default class StatsModel extends StatsClient {
|
|||
* @param d - Date instance
|
||||
* @return timestamp - one hour prior to date passed
|
||||
*/
|
||||
_getDatePreviousHour(d: Date): number {
|
||||
_getDatePreviousHour(d: Date) {
|
||||
return d.setHours(d.getHours() - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* normalize to the nearest interval
|
||||
* @param d - Date instance
|
||||
* @return timestamp - normalized to the nearest interval
|
||||
*/
|
||||
_normalizeTimestamp(d: Date): number {
|
||||
const m = d.getMinutes();
|
||||
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* override the method to get the result as an array of integers separated
|
||||
* by each interval
|
||||
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
|
||||
* @param arr - each index contains the result of each batch command
|
||||
* where index 0 signifies the error and index 1 contains the result
|
||||
* @return array of integers, ordered from most recent interval to
|
||||
* oldest interval
|
||||
*/
|
||||
// @ts-ignore
|
||||
// TODO change name or conform to parent class method
|
||||
_getCount(arr: [any, string | null][]) {
|
||||
return arr.reduce<number[]>((store, i) => {
|
||||
let num = parseInt(i[1] ?? '', 10);
|
||||
num = Number.isNaN(num) ? 0 : num;
|
||||
store.push(num);
|
||||
return store;
|
||||
}, []);
|
||||
}
|
||||
|
||||
/**
|
||||
* get list of sorted set key timestamps
|
||||
* @param epoch - epoch time
|
||||
|
|
|
@ -0,0 +1,281 @@
|
|||
export type DeleteRetentionPolicy = {
|
||||
enabled: boolean;
|
||||
days: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper class to ease access to the Azure specific information for
|
||||
* storage accounts mapped to buckets.
|
||||
*/
|
||||
export default class BucketAzureInfo {
|
||||
_data: {
|
||||
sku: string;
|
||||
accessTier: string;
|
||||
kind: string;
|
||||
systemKeys: string[];
|
||||
tenantKeys: string[];
|
||||
subscriptionId: string;
|
||||
resourceGroup: string;
|
||||
deleteRetentionPolicy: DeleteRetentionPolicy;
|
||||
managementPolicies: any[];
|
||||
httpsOnly: boolean;
|
||||
tags: any;
|
||||
networkACL: any[];
|
||||
cname: string;
|
||||
azureFilesAADIntegration: boolean;
|
||||
hnsEnabled: boolean;
|
||||
logging: any;
|
||||
hourMetrics: any;
|
||||
minuteMetrics: any;
|
||||
serviceVersion: string;
|
||||
}
|
||||
/**
|
||||
* @constructor
|
||||
* @param obj - Raw structure for the Azure info on storage account
|
||||
* @param obj.sku - SKU name of this storage account
|
||||
* @param obj.accessTier - Access Tier name of this storage account
|
||||
* @param obj.kind - Kind name of this storage account
|
||||
* @param obj.systemKeys - pair of shared keys for the system
|
||||
* @param obj.tenantKeys - pair of shared keys for the tenant
|
||||
* @param obj.subscriptionId - subscription ID the storage account
|
||||
* belongs to
|
||||
* @param obj.resourceGroup - Resource group name the storage
|
||||
* account belongs to
|
||||
* @param obj.deleteRetentionPolicy - Delete retention policy
|
||||
* @param obj.deleteRetentionPolicy.enabled -
|
||||
* @param obj.deleteRetentionPolicy.days -
|
||||
* @param obj.managementPolicies - Management policies for this
|
||||
* storage account
|
||||
* @param obj.httpsOnly - Server the content of this storage
|
||||
* account through HTTPS only
|
||||
* @param obj.tags - Set of tags applied on this storage account
|
||||
* @param obj.networkACL - Network ACL of this storage account
|
||||
* @param obj.cname - CNAME of this storage account
|
||||
* @param obj.azureFilesAADIntegration - whether or not Azure
|
||||
* Files AAD Integration is enabled for this storage account
|
||||
* @param obj.hnsEnabled - whether or not a hierarchical namespace
|
||||
* is enabled for this storage account
|
||||
* @param obj.logging - service properties: logging
|
||||
* @param obj.hourMetrics - service properties: hourMetrics
|
||||
* @param obj.minuteMetrics - service properties: minuteMetrics
|
||||
* @param obj.serviceVersion - service properties: serviceVersion
|
||||
*/
|
||||
constructor(obj: {
|
||||
sku: string;
|
||||
accessTier: string;
|
||||
kind: string;
|
||||
systemKeys: string[];
|
||||
tenantKeys: string[];
|
||||
subscriptionId: string;
|
||||
resourceGroup: string;
|
||||
deleteRetentionPolicy: DeleteRetentionPolicy;
|
||||
managementPolicies: any[];
|
||||
httpsOnly: boolean;
|
||||
tags: any;
|
||||
networkACL: any[];
|
||||
cname: string;
|
||||
azureFilesAADIntegration: boolean;
|
||||
hnsEnabled: boolean;
|
||||
logging: any;
|
||||
hourMetrics: any;
|
||||
minuteMetrics: any;
|
||||
serviceVersion: string;
|
||||
}) {
|
||||
this._data = {
|
||||
sku: obj.sku,
|
||||
accessTier: obj.accessTier,
|
||||
kind: obj.kind,
|
||||
systemKeys: obj.systemKeys,
|
||||
tenantKeys: obj.tenantKeys,
|
||||
subscriptionId: obj.subscriptionId,
|
||||
resourceGroup: obj.resourceGroup,
|
||||
deleteRetentionPolicy: obj.deleteRetentionPolicy,
|
||||
managementPolicies: obj.managementPolicies,
|
||||
httpsOnly: obj.httpsOnly,
|
||||
tags: obj.tags,
|
||||
networkACL: obj.networkACL,
|
||||
cname: obj.cname,
|
||||
azureFilesAADIntegration: obj.azureFilesAADIntegration,
|
||||
hnsEnabled: obj.hnsEnabled,
|
||||
logging: obj.logging,
|
||||
hourMetrics: obj.hourMetrics,
|
||||
minuteMetrics: obj.minuteMetrics,
|
||||
serviceVersion: obj.serviceVersion,
|
||||
};
|
||||
}
|
||||
|
||||
getSku() {
|
||||
return this._data.sku;
|
||||
}
|
||||
|
||||
setSku(sku: string) {
|
||||
this._data.sku = sku;
|
||||
return this;
|
||||
}
|
||||
|
||||
getAccessTier() {
|
||||
return this._data.accessTier;
|
||||
}
|
||||
|
||||
setAccessTier(accessTier: string) {
|
||||
this._data.accessTier = accessTier;
|
||||
return this;
|
||||
}
|
||||
|
||||
getKind() {
|
||||
return this._data.kind;
|
||||
}
|
||||
|
||||
setKind(kind: string) {
|
||||
this._data.kind = kind;
|
||||
return this;
|
||||
}
|
||||
|
||||
getSystemKeys() {
|
||||
return this._data.systemKeys;
|
||||
}
|
||||
|
||||
setSystemKeys(systemKeys: string[]) {
|
||||
this._data.systemKeys = systemKeys;
|
||||
return this;
|
||||
}
|
||||
|
||||
getTenantKeys() {
|
||||
return this._data.tenantKeys;
|
||||
}
|
||||
|
||||
setTenantKeys(tenantKeys: string[]) {
|
||||
this._data.tenantKeys = tenantKeys;
|
||||
return this;
|
||||
}
|
||||
|
||||
getSubscriptionId() {
|
||||
return this._data.subscriptionId;
|
||||
}
|
||||
|
||||
setSubscriptionId(subscriptionId: string) {
|
||||
this._data.subscriptionId = subscriptionId;
|
||||
return this;
|
||||
}
|
||||
|
||||
getResourceGroup() {
|
||||
return this._data.resourceGroup;
|
||||
}
|
||||
|
||||
setResourceGroup(resourceGroup: string) {
|
||||
this._data.resourceGroup = resourceGroup;
|
||||
return this;
|
||||
}
|
||||
|
||||
getDeleteRetentionPolicy() {
|
||||
return this._data.deleteRetentionPolicy;
|
||||
}
|
||||
|
||||
setDeleteRetentionPolicy(deleteRetentionPolicy: DeleteRetentionPolicy) {
|
||||
this._data.deleteRetentionPolicy = deleteRetentionPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
getManagementPolicies() {
|
||||
return this._data.managementPolicies;
|
||||
}
|
||||
|
||||
setManagementPolicies(managementPolicies: any[]) {
|
||||
this._data.managementPolicies = managementPolicies;
|
||||
return this;
|
||||
}
|
||||
|
||||
getHttpsOnly() {
|
||||
return this._data.httpsOnly;
|
||||
}
|
||||
|
||||
setHttpsOnly(httpsOnly: boolean) {
|
||||
this._data.httpsOnly = httpsOnly;
|
||||
return this;
|
||||
}
|
||||
|
||||
getTags() {
|
||||
return this._data.tags;
|
||||
}
|
||||
|
||||
setTags(tags: any) {
|
||||
this._data.tags = tags;
|
||||
return this;
|
||||
}
|
||||
|
||||
getNetworkACL() {
|
||||
return this._data.networkACL;
|
||||
}
|
||||
|
||||
setNetworkACL(networkACL: any[]) {
|
||||
this._data.networkACL = networkACL;
|
||||
return this;
|
||||
}
|
||||
|
||||
getCname() {
|
||||
return this._data.cname;
|
||||
}
|
||||
|
||||
setCname(cname: string) {
|
||||
this._data.cname = cname;
|
||||
return this;
|
||||
}
|
||||
|
||||
getAzureFilesAADIntegration() {
|
||||
return this._data.azureFilesAADIntegration;
|
||||
}
|
||||
|
||||
setAzureFilesAADIntegration(azureFilesAADIntegration: boolean) {
|
||||
this._data.azureFilesAADIntegration = azureFilesAADIntegration;
|
||||
return this;
|
||||
}
|
||||
|
||||
getHnsEnabled() {
|
||||
return this._data.hnsEnabled;
|
||||
}
|
||||
|
||||
setHnsEnabled(hnsEnabled: boolean) {
|
||||
this._data.hnsEnabled = hnsEnabled;
|
||||
return this;
|
||||
}
|
||||
|
||||
getLogging() {
|
||||
return this._data.logging;
|
||||
}
|
||||
|
||||
setLogging(logging: any) {
|
||||
this._data.logging = logging;
|
||||
return this;
|
||||
}
|
||||
|
||||
getHourMetrics() {
|
||||
return this._data.hourMetrics;
|
||||
}
|
||||
|
||||
setHourMetrics(hourMetrics: any) {
|
||||
this._data.hourMetrics = hourMetrics;
|
||||
return this;
|
||||
}
|
||||
|
||||
getMinuteMetrics() {
|
||||
return this._data.minuteMetrics;
|
||||
}
|
||||
|
||||
setMinuteMetrics(minuteMetrics: any) {
|
||||
this._data.minuteMetrics = minuteMetrics;
|
||||
return this;
|
||||
}
|
||||
|
||||
getServiceVersion() {
|
||||
return this._data.serviceVersion;
|
||||
}
|
||||
|
||||
setServiceVersion(serviceVersion: any) {
|
||||
this._data.serviceVersion = serviceVersion;
|
||||
return this;
|
||||
}
|
||||
|
||||
getValue() {
|
||||
return this._data;
|
||||
}
|
||||
}
|
|
@ -8,10 +8,12 @@ import ObjectLockConfiguration from './ObjectLockConfiguration';
|
|||
import BucketPolicy from './BucketPolicy';
|
||||
import NotificationConfiguration from './NotificationConfiguration';
|
||||
import { ACL as OACL } from './ObjectMD';
|
||||
import { areTagsValid, BucketTag } from '../s3middleware/tagging';
|
||||
|
||||
// WHEN UPDATING THIS NUMBER, UPDATE BucketInfoModelVersion.md CHANGELOG
|
||||
// BucketInfoModelVersion.md can be found in the root of this repository
|
||||
const modelVersion = 10;
|
||||
// BucketInfoModelVersion.md can be found in documentation/ at the root
|
||||
// of this repository
|
||||
const modelVersion = 16;
|
||||
|
||||
export type CORS = {
|
||||
id: string;
|
||||
|
@ -35,6 +37,41 @@ export type VersioningConfiguration = {
|
|||
MfaDelete: any;
|
||||
};
|
||||
|
||||
export type VeeamSOSApi = {
|
||||
SystemInfo?: {
|
||||
ProtocolVersion: string,
|
||||
ModelName: string,
|
||||
ProtocolCapabilities: {
|
||||
CapacityInfo: boolean,
|
||||
UploadSessions: boolean,
|
||||
IAMSTS?: boolean,
|
||||
},
|
||||
APIEndpoints?: {
|
||||
IAMEndpoint: string,
|
||||
STSEndpoint: string,
|
||||
},
|
||||
SystemRecommendations?: {
|
||||
S3ConcurrentTaskLimit: number,
|
||||
S3MultiObjectDelete: number,
|
||||
StorageCurrentTasksLimit: number,
|
||||
KbBlockSize: number,
|
||||
}
|
||||
LastModified?: string,
|
||||
},
|
||||
CapacityInfo?: {
|
||||
Capacity: number,
|
||||
Available: number,
|
||||
Used: number,
|
||||
LastModified?: string,
|
||||
},
|
||||
};
|
||||
|
||||
// Capabilities contains all specifics from external products supported by
|
||||
// our S3 implementation, at bucket level
|
||||
export type Capabilities = {
|
||||
VeeamSOSApi?: VeeamSOSApi,
|
||||
};
|
||||
|
||||
export type ACL = OACL & { WRITE: string[] }
|
||||
|
||||
export default class BucketInfo {
|
||||
|
@ -58,56 +95,70 @@ export default class BucketInfo {
|
|||
_objectLockEnabled?: boolean;
|
||||
_objectLockConfiguration?: any;
|
||||
_notificationConfiguration?: any;
|
||||
_tags?: { key: string; value: string }[] | null;
|
||||
_tags?: Array<BucketTag>;
|
||||
_readLocationConstraint: string | null;
|
||||
_isNFS: boolean | null;
|
||||
_azureInfo: any | null;
|
||||
_ingestion: { status: 'enabled' | 'disabled' } | null;
|
||||
_capabilities?: Capabilities;
|
||||
_quotaMax: number | 0;
|
||||
|
||||
/**
|
||||
* Represents all bucket information.
|
||||
* @constructor
|
||||
* @param {string} name - bucket name
|
||||
* @param {string} owner - bucket owner's name
|
||||
* @param {string} ownerDisplayName - owner's display name
|
||||
* @param {object} creationDate - creation date of bucket
|
||||
* @param {number} mdBucketModelVersion - bucket model version
|
||||
* @param {object} [acl] - bucket ACLs (no need to copy
|
||||
* @param name - bucket name
|
||||
* @param owner - bucket owner's name
|
||||
* @param ownerDisplayName - owner's display name
|
||||
* @param creationDate - creation date of bucket
|
||||
* @param mdBucketModelVersion - bucket model version
|
||||
* @param [acl] - bucket ACLs (no need to copy
|
||||
* ACL object since referenced object will not be used outside of
|
||||
* BucketInfo instance)
|
||||
* @param {boolean} transient - flag indicating whether bucket is transient
|
||||
* @param {boolean} deleted - flag indicating whether attempt to delete
|
||||
* @param {object} serverSideEncryption - sse information for this bucket
|
||||
* @param {number} serverSideEncryption.cryptoScheme -
|
||||
* @param transient - flag indicating whether bucket is transient
|
||||
* @param deleted - flag indicating whether attempt to delete
|
||||
* @param serverSideEncryption - sse information for this bucket
|
||||
* @param serverSideEncryption.cryptoScheme -
|
||||
* cryptoScheme used
|
||||
* @param {string} serverSideEncryption.algorithm -
|
||||
* @param serverSideEncryption.algorithm -
|
||||
* algorithm to use
|
||||
* @param {string} serverSideEncryption.masterKeyId -
|
||||
* @param serverSideEncryption.masterKeyId -
|
||||
* key to get master key
|
||||
* @param {string} serverSideEncryption.configuredMasterKeyId -
|
||||
* @param serverSideEncryption.configuredMasterKeyId -
|
||||
* custom KMS key id specified by user
|
||||
* @param {boolean} serverSideEncryption.mandatory -
|
||||
* @param serverSideEncryption.mandatory -
|
||||
* true for mandatory encryption
|
||||
* bucket has been made
|
||||
* @param {object} versioningConfiguration - versioning configuration
|
||||
* @param {string} versioningConfiguration.Status - versioning status
|
||||
* @param {object} versioningConfiguration.MfaDelete - versioning mfa delete
|
||||
* @param {string} locationConstraint - locationConstraint for bucket
|
||||
* @param {WebsiteConfiguration} [websiteConfiguration] - website
|
||||
* @param versioningConfiguration - versioning configuration
|
||||
* @param versioningConfiguration.Status - versioning status
|
||||
* @param versioningConfiguration.MfaDelete - versioning mfa delete
|
||||
* @param locationConstraint - locationConstraint for bucket that
|
||||
* also includes the ingestion flag
|
||||
* @param [websiteConfiguration] - website
|
||||
* configuration
|
||||
* @param {object[]} [cors] - collection of CORS rules to apply
|
||||
* @param {string} [cors[].id] - optional ID to identify rule
|
||||
* @param {string[]} cors[].allowedMethods - methods allowed for CORS request
|
||||
* @param {string[]} cors[].allowedOrigins - origins allowed for CORS request
|
||||
* @param {string[]} [cors[].allowedHeaders] - headers allowed in an OPTIONS
|
||||
* @param [cors] - collection of CORS rules to apply
|
||||
* @param [cors[].id] - optional ID to identify rule
|
||||
* @param cors[].allowedMethods - methods allowed for CORS request
|
||||
* @param cors[].allowedOrigins - origins allowed for CORS request
|
||||
* @param [cors[].allowedHeaders] - headers allowed in an OPTIONS
|
||||
* request via the Access-Control-Request-Headers header
|
||||
* @param {number} [cors[].maxAgeSeconds] - seconds browsers should cache
|
||||
* @param [cors[].maxAgeSeconds] - seconds browsers should cache
|
||||
* OPTIONS response
|
||||
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications
|
||||
* @param {object} [replicationConfiguration] - replication configuration
|
||||
* @param {object} [lifecycleConfiguration] - lifecycle configuration
|
||||
* @param {object} [bucketPolicy] - bucket policy
|
||||
* @param {string} [uid] - unique identifier for the bucket, necessary
|
||||
* @param {boolean} [objectLockEnabled] - true when object lock enabled
|
||||
* @param {object} [objectLockConfiguration] - object lock configuration
|
||||
* @param {object} [notificationConfiguration] - bucket notification configuration
|
||||
* @param {object[]} [tags] - bucket tags
|
||||
* @param [cors[].exposeHeaders] - headers expose to applications
|
||||
* @param [replicationConfiguration] - replication configuration
|
||||
* @param [lifecycleConfiguration] - lifecycle configuration
|
||||
* @param [bucketPolicy] - bucket policy
|
||||
* @param [uid] - unique identifier for the bucket, necessary
|
||||
* @param readLocationConstraint - readLocationConstraint for bucket
|
||||
* addition for use with lifecycle operations
|
||||
* @param [isNFS] - whether the bucket is on NFS
|
||||
* @param [ingestionConfig] - object for ingestion status: en/dis
|
||||
* @param [azureInfo] - Azure storage account specific info
|
||||
* @param [objectLockEnabled] - true when object lock enabled
|
||||
* @param [objectLockConfiguration] - object lock configuration
|
||||
* @param [notificationConfiguration] - bucket notification configuration
|
||||
* @param [tags] - bucket tag set
|
||||
* @param [capabilities] - capabilities for the bucket
|
||||
* @param quotaMax - bucket quota
|
||||
*/
|
||||
constructor(
|
||||
name: string,
|
||||
|
@ -127,10 +178,16 @@ export default class BucketInfo {
|
|||
lifecycleConfiguration?: any,
|
||||
bucketPolicy?: any,
|
||||
uid?: string,
|
||||
readLocationConstraint?: string,
|
||||
isNFS?: boolean,
|
||||
ingestionConfig?: { status: 'enabled' | 'disabled' },
|
||||
azureInfo?: any,
|
||||
objectLockEnabled?: boolean,
|
||||
objectLockConfiguration?: any,
|
||||
notificationConfiguration?: any,
|
||||
tags?: { key: string; value: string }[],
|
||||
tags?: Array<BucketTag> | [],
|
||||
capabilities?: Capabilities,
|
||||
quotaMax?: number | 0,
|
||||
) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof owner, 'string');
|
||||
|
@ -172,6 +229,15 @@ export default class BucketInfo {
|
|||
if (locationConstraint) {
|
||||
assert.strictEqual(typeof locationConstraint, 'string');
|
||||
}
|
||||
if (ingestionConfig) {
|
||||
assert.strictEqual(typeof ingestionConfig, 'object');
|
||||
}
|
||||
if (azureInfo) {
|
||||
assert.strictEqual(typeof azureInfo, 'object');
|
||||
}
|
||||
if (readLocationConstraint) {
|
||||
assert.strictEqual(typeof readLocationConstraint, 'string');
|
||||
}
|
||||
if (websiteConfiguration) {
|
||||
assert(websiteConfiguration instanceof WebsiteConfiguration);
|
||||
const indexDocument = websiteConfiguration.getIndexDocument();
|
||||
|
@ -217,8 +283,14 @@ export default class BucketInfo {
|
|||
READ: [],
|
||||
READ_ACP: [],
|
||||
};
|
||||
if (tags) {
|
||||
assert(Array.isArray(tags));
|
||||
|
||||
if (tags === undefined) {
|
||||
tags = [] as BucketTag[];
|
||||
}
|
||||
assert.strictEqual(areTagsValid(tags), true);
|
||||
if (quotaMax) {
|
||||
assert.strictEqual(typeof quotaMax, 'number');
|
||||
assert(quotaMax >= 0, 'Quota cannot be negative');
|
||||
}
|
||||
|
||||
// IF UPDATING PROPERTIES, INCREMENT MODELVERSION NUMBER ABOVE
|
||||
|
@ -233,16 +305,22 @@ export default class BucketInfo {
|
|||
this._serverSideEncryption = serverSideEncryption || null;
|
||||
this._versioningConfiguration = versioningConfiguration || null;
|
||||
this._locationConstraint = locationConstraint || null;
|
||||
this._readLocationConstraint = readLocationConstraint || null;
|
||||
this._websiteConfiguration = websiteConfiguration || null;
|
||||
this._replicationConfiguration = replicationConfiguration || null;
|
||||
this._cors = cors || null;
|
||||
this._lifecycleConfiguration = lifecycleConfiguration || null;
|
||||
this._bucketPolicy = bucketPolicy || null;
|
||||
this._uid = uid || uuid();
|
||||
this._isNFS = isNFS || null;
|
||||
this._ingestion = ingestionConfig || null;
|
||||
this._azureInfo = azureInfo || null;
|
||||
this._objectLockEnabled = objectLockEnabled || false;
|
||||
this._objectLockConfiguration = objectLockConfiguration || null;
|
||||
this._notificationConfiguration = notificationConfiguration || null;
|
||||
this._tags = tags || null;
|
||||
this._tags = tags;
|
||||
this._capabilities = capabilities || undefined;
|
||||
this._quotaMax = quotaMax || 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -263,16 +341,22 @@ export default class BucketInfo {
|
|||
serverSideEncryption: this._serverSideEncryption,
|
||||
versioningConfiguration: this._versioningConfiguration,
|
||||
locationConstraint: this._locationConstraint,
|
||||
readLocationConstraint: this._readLocationConstraint,
|
||||
websiteConfiguration: undefined,
|
||||
cors: this._cors,
|
||||
replicationConfiguration: this._replicationConfiguration,
|
||||
lifecycleConfiguration: this._lifecycleConfiguration,
|
||||
bucketPolicy: this._bucketPolicy,
|
||||
uid: this._uid,
|
||||
isNFS: this._isNFS,
|
||||
ingestion: this._ingestion,
|
||||
azureInfo: this._azureInfo,
|
||||
objectLockEnabled: this._objectLockEnabled,
|
||||
objectLockConfiguration: this._objectLockConfiguration,
|
||||
notificationConfiguration: this._notificationConfiguration,
|
||||
tags: this._tags,
|
||||
capabilities: this._capabilities,
|
||||
quotaMax: this._quotaMax,
|
||||
};
|
||||
const final = this._websiteConfiguration
|
||||
? {
|
||||
|
@ -296,8 +380,10 @@ export default class BucketInfo {
|
|||
obj.transient, obj.deleted, obj.serverSideEncryption,
|
||||
obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
|
||||
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
|
||||
obj.bucketPolicy, obj.uid, obj.objectLockEnabled,
|
||||
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags);
|
||||
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
|
||||
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
|
||||
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags,
|
||||
obj.capabilities, obj.quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -321,8 +407,11 @@ export default class BucketInfo {
|
|||
data._versioningConfiguration, data._locationConstraint,
|
||||
data._websiteConfiguration, data._cors,
|
||||
data._replicationConfiguration, data._lifecycleConfiguration,
|
||||
data._bucketPolicy, data._uid, data._objectLockEnabled,
|
||||
data._objectLockConfiguration, data._notificationConfiguration, data._tags);
|
||||
data._bucketPolicy, data._uid, data._readLocationConstraint,
|
||||
data._isNFS, data._ingestion, data._azureInfo,
|
||||
data._objectLockEnabled, data._objectLockConfiguration,
|
||||
data._notificationConfiguration, data._tags, data._capabilities,
|
||||
data._quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -619,6 +708,17 @@ export default class BucketInfo {
|
|||
return this._locationConstraint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get read location constraint.
|
||||
* @return - bucket read location constraint
|
||||
*/
|
||||
getReadLocationConstraint() {
|
||||
if (this._readLocationConstraint) {
|
||||
return this._readLocationConstraint;
|
||||
}
|
||||
return this._locationConstraint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Bucket model version
|
||||
*
|
||||
|
@ -707,6 +807,85 @@ export default class BucketInfo {
|
|||
this._uid = uid;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Check if the bucket is an NFS bucket.
|
||||
* @return - Wether the bucket is NFS or not
|
||||
*/
|
||||
isNFS() {
|
||||
return this._isNFS;
|
||||
}
|
||||
/**
|
||||
* Set whether the bucket is an NFS bucket.
|
||||
* @param isNFS - Wether the bucket is NFS or not
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setIsNFS(isNFS: boolean) {
|
||||
this._isNFS = isNFS;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* enable ingestion, set 'this._ingestion' to { status: 'enabled' }
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
enableIngestion() {
|
||||
this._ingestion = { status: 'enabled' };
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* disable ingestion, set 'this._ingestion' to { status: 'disabled' }
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
disableIngestion() {
|
||||
this._ingestion = { status: 'disabled' };
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Get ingestion configuration
|
||||
* @return - bucket ingestion configuration: Enabled or Disabled
|
||||
*/
|
||||
getIngestion() {
|
||||
return this._ingestion;
|
||||
}
|
||||
|
||||
/**
|
||||
** Check if bucket is an ingestion bucket
|
||||
* @return - 'true' if bucket is ingestion bucket, 'false' if
|
||||
* otherwise
|
||||
*/
|
||||
isIngestionBucket() {
|
||||
const ingestionConfig = this.getIngestion();
|
||||
if (ingestionConfig) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Check if ingestion is enabled
|
||||
* @return - 'true' if ingestion is enabled, otherwise 'false'
|
||||
*/
|
||||
isIngestionEnabled() {
|
||||
const ingestionConfig = this.getIngestion();
|
||||
return ingestionConfig ? ingestionConfig.status === 'enabled' : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the Azure specific storage account information for this bucket
|
||||
* @return - a structure suitable for {@link BucketAzureIno}
|
||||
* constructor
|
||||
*/
|
||||
getAzureInfo() {
|
||||
return this._azureInfo;
|
||||
}
|
||||
/**
|
||||
* Set the Azure specific storage account information for this bucket
|
||||
* @param azureInfo - a structure suitable for
|
||||
* {@link BucketAzureInfo} construction
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setAzureInfo(azureInfo: any) {
|
||||
this._azureInfo = azureInfo;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Check if object lock is enabled.
|
||||
* @return - depending on whether object lock is enabled
|
||||
|
@ -726,7 +905,7 @@ export default class BucketInfo {
|
|||
|
||||
/**
|
||||
* Get the value of bucket tags
|
||||
* @return - Array of bucket tags as {"key" : "key", "value": "value"}
|
||||
* @return - Array of bucket tags
|
||||
*/
|
||||
getTags() {
|
||||
return this._tags;
|
||||
|
@ -734,13 +913,58 @@ export default class BucketInfo {
|
|||
|
||||
/**
|
||||
* Set bucket tags
|
||||
* @param tags - collection of tags
|
||||
* @param tags[].key - key of the tag
|
||||
* @param tags[].value - value of the tag
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setTags(tags: { key: string; value: string }[]) {
|
||||
setTags(tags: Array<BucketTag>) {
|
||||
this._tags = tags;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value of bucket capabilities
|
||||
* @return - capabilities of the bucket
|
||||
*/
|
||||
getCapabilities() {
|
||||
return this._capabilities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific bucket capability
|
||||
*
|
||||
* @param capability? - if provided, will return a specific capacity
|
||||
* @return - capability of the bucket
|
||||
*/
|
||||
getCapability(capability: string) : VeeamSOSApi | undefined {
|
||||
if (capability && this._capabilities && this._capabilities[capability]) {
|
||||
return this._capabilities[capability];
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket capabilities
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setCapabilities(capabilities: Capabilities) {
|
||||
this._capabilities = capabilities;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the bucket quota information
|
||||
* @return quotaMax
|
||||
*/
|
||||
getQuota() {
|
||||
return this._quotaMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket quota
|
||||
* @param quota - quota to be set
|
||||
* @return - bucket quota info
|
||||
*/
|
||||
setQuota(quota: number) {
|
||||
this._quotaMax = quota || 0;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ import escapeForXml from '../s3middleware/escapeForXml';
|
|||
import type { XMLRule } from './ReplicationConfiguration';
|
||||
import { Status } from './LifecycleRule';
|
||||
|
||||
const MAX_DAYS = 2147483647; // Max 32-bit signed binary integer.
|
||||
|
||||
/**
|
||||
* Format of xml request:
|
||||
|
||||
|
@ -87,6 +89,7 @@ export default class LifecycleConfiguration {
|
|||
_parsedXML: any;
|
||||
_ruleIDs: string[];
|
||||
_tagKeys: string[];
|
||||
_storageClasses: string[];
|
||||
_config: {
|
||||
error?: ArsenalError;
|
||||
rules?: any[];
|
||||
|
@ -95,10 +98,13 @@ export default class LifecycleConfiguration {
|
|||
/**
|
||||
* Create a Lifecycle Configuration instance
|
||||
* @param xml - the parsed xml
|
||||
* @param config - the CloudServer config
|
||||
* @return - LifecycleConfiguration instance
|
||||
*/
|
||||
constructor(xml: any) {
|
||||
constructor(xml: any, config: { replicationEndpoints: { site: string }[] }) {
|
||||
this._parsedXML = xml;
|
||||
this._storageClasses =
|
||||
config.replicationEndpoints.map(endpoint => endpoint.site);
|
||||
this._ruleIDs = [];
|
||||
this._tagKeys = [];
|
||||
this._config = {};
|
||||
|
@ -219,11 +225,6 @@ export default class LifecycleConfiguration {
|
|||
* }
|
||||
*/
|
||||
_parseRule(rule: XMLRule) {
|
||||
if (rule.Transition || rule.NoncurrentVersionTransition) {
|
||||
const msg = 'Transition lifecycle action not yet implemented';
|
||||
const error = errors.NotImplemented.customizeDescription(msg);
|
||||
return { error };
|
||||
}
|
||||
// Either Prefix or Filter must be included, but can be empty string
|
||||
if ((!rule.Filter && rule.Filter !== '') &&
|
||||
(!rule.Prefix && rule.Prefix !== '')) {
|
||||
|
@ -492,6 +493,172 @@ export default class LifecycleConfiguration {
|
|||
return { ...base, ruleStatus: status }
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the prefix and/or tags of the given rule and gets the error message
|
||||
* @param rule - The rule to find the prefix in
|
||||
* @return - The prefix of filter information
|
||||
*/
|
||||
_getRuleFilterDesc(rule: { Prefix?: string[]; Filter?: any[] }) {
|
||||
if (rule.Prefix) {
|
||||
return `prefix '${rule.Prefix[0]}'`;
|
||||
}
|
||||
// There must be a filter if no top-level prefix is provided. First
|
||||
// check if there are multiple filters (i.e. `Filter.And`).
|
||||
if (rule.Filter?.[0] === undefined || rule.Filter[0].And === undefined) {
|
||||
const { Prefix, Tag } = rule.Filter?.[0] || {};
|
||||
if (Prefix) {
|
||||
return `filter '(prefix=${Prefix[0]})'`;
|
||||
}
|
||||
if (Tag) {
|
||||
const { Key, Value } = Tag[0];
|
||||
return `filter '(tag: key=${Key[0]}, value=${Value[0]})'`;
|
||||
}
|
||||
return 'filter (all)';
|
||||
}
|
||||
const filters: string[] = [];
|
||||
const { Prefix, Tag } = rule.Filter[0].And[0];
|
||||
if (Prefix) {
|
||||
filters.push(`prefix=${Prefix[0]}`);
|
||||
}
|
||||
Tag.forEach((tag: { Key: string[]; Value: string[] }) => {
|
||||
const { Key, Value } = tag;
|
||||
filters.push(`tag: key=${Key[0]}, value=${Value[0]}`);
|
||||
});
|
||||
const joinedFilters = filters.join(' and ');
|
||||
return `filter '(${joinedFilters})'`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the validity of the given field
|
||||
* @param params - Given function parameters
|
||||
* @param params.days - The value of the field to check
|
||||
* @param params.field - The field name with the value
|
||||
* @param params.ancestor - The immediate ancestor field
|
||||
* @return Returns an error object or `null`
|
||||
*/
|
||||
_checkDays(params: { days: number; field: string; ancestor: string }) {
|
||||
const { days, field, ancestor } = params;
|
||||
if (days < 0) {
|
||||
const msg = `'${field}' in ${ancestor} action must be nonnegative`;
|
||||
return errors.InvalidArgument.customizeDescription(msg);
|
||||
}
|
||||
if (days > MAX_DAYS) {
|
||||
return errors.MalformedXML.customizeDescription(
|
||||
`'${field}' in ${ancestor} action must not exceed ${MAX_DAYS}`);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the validity of the given storage class
|
||||
* @param params - Given function parameters
|
||||
* @param params.usedStorageClasses - Storage classes used in other
|
||||
* rules
|
||||
* @param params.storageClass - The storage class of the current
|
||||
* rule
|
||||
* @param params.ancestor - The immediate ancestor field
|
||||
* @param params.prefix - The prefix of the rule
|
||||
* @return Returns an error object or `null`
|
||||
*/
|
||||
_checkStorageClasses(params: {
|
||||
usedStorageClasses: string[];
|
||||
storageClass: string;
|
||||
ancestor: string;
|
||||
rule: { Prefix?: string[]; Filter?: any };
|
||||
}) {
|
||||
const { usedStorageClasses, storageClass, ancestor, rule } = params;
|
||||
if (!this._storageClasses.includes(storageClass)) {
|
||||
// This differs from the AWS message. This will help the user since
|
||||
// the StorageClass does not conform to AWS specs.
|
||||
const list = `'${this._storageClasses.join("', '")}'`;
|
||||
const msg = `'StorageClass' must be one of ${list}`;
|
||||
return errors.MalformedXML.customizeDescription(msg);
|
||||
}
|
||||
if (usedStorageClasses.includes(storageClass)) {
|
||||
const msg = `'StorageClass' must be different for '${ancestor}' ` +
|
||||
`actions in same 'Rule' with ${this._getRuleFilterDesc(rule)}`;
|
||||
return errors.InvalidRequest.customizeDescription(msg);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that transition rules are at least a day apart from each other.
|
||||
* @param params - Given function parameters
|
||||
* @param [params.days] - The days of the current transition
|
||||
* @param [params.date] - The date of the current transition
|
||||
* @param params.storageClass - The storage class of the current
|
||||
* rule
|
||||
* @param params.rule - The current rule
|
||||
*/
|
||||
_checkTimeGap(params: {
|
||||
days?: number;
|
||||
date?: string;
|
||||
storageClass: string;
|
||||
rule: { Transition: any[]; Prefix?: string[]; Filter?: any };
|
||||
}) {
|
||||
const { days, date, storageClass, rule } = params;
|
||||
const invalidTransition = rule.Transition.find(transition => {
|
||||
if (storageClass === transition.StorageClass[0]) {
|
||||
return false;
|
||||
}
|
||||
if (days !== undefined) {
|
||||
return Number.parseInt(transition.Days[0], 10) === days;
|
||||
}
|
||||
if (date !== undefined) {
|
||||
const timestamp = new Date(date).getTime();
|
||||
const compareTimestamp = new Date(transition.Date[0]).getTime();
|
||||
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
return Math.abs(timestamp - compareTimestamp) < oneDay;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
if (invalidTransition) {
|
||||
const timeType = days !== undefined ? 'Days' : 'Date';
|
||||
const filterMsg = this._getRuleFilterDesc(rule);
|
||||
const compareStorageClass = invalidTransition.StorageClass[0];
|
||||
const msg = `'${timeType}' in the 'Transition' action for ` +
|
||||
`StorageClass '${storageClass}' for ${filterMsg} must be at ` +
|
||||
`least one day apart from ${filterMsg} in the 'Transition' ` +
|
||||
`action for StorageClass '${compareStorageClass}'`;
|
||||
return errors.InvalidArgument.customizeDescription(msg);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks transition time type (i.e. 'Date' or 'Days') only occurs once
|
||||
* across transitions and across transitions and expiration policies
|
||||
* @param params - Given function parameters
|
||||
* @param params.usedTimeType - The time type that has been used by
|
||||
* another rule
|
||||
* @param params.currentTimeType - the time type used by the
|
||||
* current rule
|
||||
* @param params.rule - The current rule
|
||||
* @return Returns an error object or `null`
|
||||
*/
|
||||
_checkTimeType(params: {
|
||||
usedTimeType: string | null;
|
||||
currentTimeType: string;
|
||||
rule: { Prefix?: string[]; Filter?: any; Expiration?: any[] };
|
||||
}) {
|
||||
const { usedTimeType, currentTimeType, rule } = params;
|
||||
if (usedTimeType && usedTimeType !== currentTimeType) {
|
||||
const msg = "Found mixed 'Date' and 'Days' based Transition " +
|
||||
'actions in lifecycle rule for ' +
|
||||
`${this._getRuleFilterDesc(rule)}`;
|
||||
return errors.InvalidRequest.customizeDescription(msg);
|
||||
}
|
||||
// Transition time type cannot differ from the expiration, if provided.
|
||||
if (rule.Expiration &&
|
||||
rule.Expiration[0][currentTimeType] === undefined) {
|
||||
const msg = "Found mixed 'Date' and 'Days' based Expiration and " +
|
||||
'Transition actions in lifecycle rule for ' +
|
||||
`${this._getRuleFilterDesc(rule)}`;
|
||||
return errors.InvalidRequest.customizeDescription(msg);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the validity of the given date
|
||||
|
@ -533,6 +700,159 @@ export default class LifecycleConfiguration {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the NonCurrentVersionTransition value
|
||||
* @param rule - Rule object from Rule array from this._parsedXml
|
||||
* @return - Contains error if parsing failed, otherwise contains
|
||||
* the parsed nonCurrentVersionTransition array
|
||||
*
|
||||
* Format of result:
|
||||
* result = {
|
||||
* error: <error>,
|
||||
* nonCurrentVersionTransition: [
|
||||
* {
|
||||
* noncurrentDays: <non-current-days>,
|
||||
* storageClass: <storage-class>,
|
||||
* },
|
||||
* ...
|
||||
* ]
|
||||
* }
|
||||
*/
|
||||
_parseNoncurrentVersionTransition(rule: {
|
||||
NoncurrentVersionTransition: any[];
|
||||
Prefix?: string[];
|
||||
Filter?: any;
|
||||
}) {
|
||||
const nonCurrentVersionTransition: {
|
||||
noncurrentDays: number;
|
||||
storageClass: string;
|
||||
}[] = [];
|
||||
const usedStorageClasses: string[] = [];
|
||||
for (let i = 0; i < rule.NoncurrentVersionTransition.length; i++) {
|
||||
const t = rule.NoncurrentVersionTransition[i]; // Transition object
|
||||
const noncurrentDays: number | undefined =
|
||||
t.NoncurrentDays && Number.parseInt(t.NoncurrentDays[0], 10);
|
||||
const storageClass: string | undefined = t.StorageClass && t.StorageClass[0];
|
||||
if (noncurrentDays === undefined || storageClass === undefined) {
|
||||
return { error: errors.MalformedXML };
|
||||
}
|
||||
let error = this._checkDays({
|
||||
days: noncurrentDays,
|
||||
field: 'NoncurrentDays',
|
||||
ancestor: 'NoncurrentVersionTransition',
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
error = this._checkStorageClasses({
|
||||
storageClass,
|
||||
usedStorageClasses,
|
||||
ancestor: 'NoncurrentVersionTransition',
|
||||
rule,
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
nonCurrentVersionTransition.push({ noncurrentDays, storageClass });
|
||||
usedStorageClasses.push(storageClass);
|
||||
}
|
||||
return { nonCurrentVersionTransition };
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the Transition value
|
||||
* @param rule - Rule object from Rule array from this._parsedXml
|
||||
* @return - Contains error if parsing failed, otherwise contains
|
||||
* the parsed transition array
|
||||
*
|
||||
* Format of result:
|
||||
* result = {
|
||||
* error: <error>,
|
||||
* transition: [
|
||||
* {
|
||||
* days: <days>,
|
||||
* date: <date>,
|
||||
* storageClass: <storage-class>,
|
||||
* },
|
||||
* ...
|
||||
* ]
|
||||
* }
|
||||
*/
|
||||
_parseTransition(rule: {
|
||||
Transition: any[];
|
||||
Prefix?: string[];
|
||||
Filter?: any;
|
||||
}) {
|
||||
const transition:
|
||||
({ days: number; storageClass: string }
|
||||
| { date: string; storageClass: string })[] = [];
|
||||
const usedStorageClasses: string[] = [];
|
||||
let usedTimeType: string | null = null;
|
||||
for (let i = 0; i < rule.Transition.length; i++) {
|
||||
const t = rule.Transition[i]; // Transition object
|
||||
const days = t.Days && Number.parseInt(t.Days[0], 10);
|
||||
const date = t.Date && t.Date[0];
|
||||
const storageClass = t.StorageClass && t.StorageClass[0];
|
||||
if ((days === undefined && date === undefined) ||
|
||||
(days !== undefined && date !== undefined) ||
|
||||
(storageClass === undefined)) {
|
||||
return { error: errors.MalformedXML };
|
||||
}
|
||||
let error = this._checkStorageClasses({
|
||||
storageClass,
|
||||
usedStorageClasses,
|
||||
ancestor: 'Transition',
|
||||
rule,
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
usedStorageClasses.push(storageClass);
|
||||
if (days !== undefined) {
|
||||
error = this._checkTimeType({
|
||||
usedTimeType,
|
||||
currentTimeType: 'Days',
|
||||
rule,
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
usedTimeType = 'Days';
|
||||
error = this._checkDays({
|
||||
days,
|
||||
field: 'Days',
|
||||
ancestor: 'Transition',
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
transition.push({ days, storageClass });
|
||||
}
|
||||
if (date !== undefined) {
|
||||
error = this._checkTimeType({
|
||||
usedTimeType,
|
||||
currentTimeType: 'Date',
|
||||
rule,
|
||||
});
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
usedTimeType = 'Date';
|
||||
error = this._checkDate(date);
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
transition.push({ date, storageClass });
|
||||
}
|
||||
error = this._checkTimeGap({ days, date, storageClass, rule });
|
||||
if (error) {
|
||||
return { error };
|
||||
}
|
||||
}
|
||||
return { transition };
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that action component of rule is valid
|
||||
* @param rule - a rule object from Rule array from this._parsedXml
|
||||
|
@ -569,8 +889,13 @@ export default class LifecycleConfiguration {
|
|||
propName: 'actions',
|
||||
actions: [],
|
||||
};
|
||||
const validActions = ['AbortIncompleteMultipartUpload',
|
||||
'Expiration', 'NoncurrentVersionExpiration'];
|
||||
const validActions = [
|
||||
'AbortIncompleteMultipartUpload',
|
||||
'Expiration',
|
||||
'NoncurrentVersionExpiration',
|
||||
'NoncurrentVersionTransition',
|
||||
'Transition',
|
||||
];
|
||||
validActions.forEach(a => {
|
||||
if (rule[a]) {
|
||||
actionsObj.actions.push({ actionName: `${a}` });
|
||||
|
@ -587,7 +912,14 @@ export default class LifecycleConfiguration {
|
|||
if (action.error) {
|
||||
actionsObj.error = action.error;
|
||||
} else {
|
||||
const actionTimes = ['days', 'date', 'deleteMarker', 'newerNoncurrentVersions'];
|
||||
const actionTimes = [
|
||||
'days',
|
||||
'date',
|
||||
'deleteMarker',
|
||||
'transition',
|
||||
'nonCurrentVersionTransition',
|
||||
'newerNoncurrentVersions'
|
||||
];
|
||||
actionTimes.forEach(t => {
|
||||
if (action[t]) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
|
@ -821,6 +1153,26 @@ export default class LifecycleConfiguration {
|
|||
if (a.deleteMarker) {
|
||||
assert.strictEqual(typeof a.deleteMarker, 'string');
|
||||
}
|
||||
if (a.nonCurrentVersionTransition) {
|
||||
assert.strictEqual(
|
||||
typeof a.nonCurrentVersionTransition, 'object');
|
||||
a.nonCurrentVersionTransition.forEach(t => {
|
||||
assert.strictEqual(typeof t.noncurrentDays, 'number');
|
||||
assert.strictEqual(typeof t.storageClass, 'string');
|
||||
});
|
||||
}
|
||||
if (a.transition) {
|
||||
assert.strictEqual(typeof a.transition, 'object');
|
||||
a.transition.forEach(t => {
|
||||
if (t.days || t.days === 0) {
|
||||
assert.strictEqual(typeof t.days, 'number');
|
||||
}
|
||||
if (t.date !== undefined) {
|
||||
assert.strictEqual(typeof t.date, 'string');
|
||||
}
|
||||
assert.strictEqual(typeof t.storageClass, 'string');
|
||||
});
|
||||
}
|
||||
|
||||
if (a.newerNoncurrentVersions) {
|
||||
assert.strictEqual(typeof a.newerNoncurrentVersions, 'number');
|
||||
|
@ -874,7 +1226,15 @@ export default class LifecycleConfiguration {
|
|||
}
|
||||
|
||||
const Actions = actions.map(action => {
|
||||
const { actionName, days, date, deleteMarker, newerNoncurrentVersions } = action;
|
||||
const {
|
||||
actionName,
|
||||
days,
|
||||
date,
|
||||
deleteMarker,
|
||||
nonCurrentVersionTransition,
|
||||
transition,
|
||||
newerNoncurrentVersions,
|
||||
} = action;
|
||||
let Action: any;
|
||||
if (actionName === 'AbortIncompleteMultipartUpload') {
|
||||
Action = `<${actionName}><DaysAfterInitiation>${days}` +
|
||||
|
@ -893,6 +1253,40 @@ export default class LifecycleConfiguration {
|
|||
Action = `<${actionName}>${Days}${Date}${DelMarker}` +
|
||||
`</${actionName}>`;
|
||||
}
|
||||
if (actionName === 'NoncurrentVersionTransition') {
|
||||
const xml: string[] = [];
|
||||
nonCurrentVersionTransition!.forEach(transition => {
|
||||
const { noncurrentDays, storageClass } = transition;
|
||||
xml.push(
|
||||
`<${actionName}>`,
|
||||
`<NoncurrentDays>${noncurrentDays}` +
|
||||
'</NoncurrentDays>',
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
}
|
||||
if (actionName === 'Transition') {
|
||||
const xml: string[] = [];
|
||||
transition!.forEach(transition => {
|
||||
const { days, date, storageClass } = transition;
|
||||
let element: string = '';
|
||||
if (days !== undefined) {
|
||||
element = `<Days>${days}</Days>`;
|
||||
}
|
||||
if (date !== undefined) {
|
||||
element = `<Date>${date}</Date>`;
|
||||
}
|
||||
xml.push(
|
||||
`<${actionName}>`,
|
||||
element,
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
}
|
||||
return Action;
|
||||
}).join('');
|
||||
return `<Rule>${ID}${Status}${Filter}${Actions}</Rule>`;
|
||||
|
@ -975,6 +1369,15 @@ export type Rule = {
|
|||
date?: number;
|
||||
deleteMarker?: boolean;
|
||||
newerNoncurrentVersions?: number;
|
||||
nonCurrentVersionTransition?: {
|
||||
noncurrentDays: number;
|
||||
storageClass: string;
|
||||
}[];
|
||||
transition?: {
|
||||
days?: number;
|
||||
date?: string;
|
||||
storageClass: string;
|
||||
}[];
|
||||
}[];
|
||||
filter?: {
|
||||
rulePrefix?: string;
|
||||
|
|
|
@ -28,6 +28,7 @@ export default class LifecycleRule {
|
|||
ncvExpiration?: NoncurrentExpiration;
|
||||
abortMPU?: { DaysAfterInitiation: number };
|
||||
transitions?: any[];
|
||||
ncvTransitions?: any[];
|
||||
prefix?: string;
|
||||
|
||||
constructor(id: string, status: Status) {
|
||||
|
@ -45,6 +46,7 @@ export default class LifecycleRule {
|
|||
NoncurrentVersionExpiration?: NoncurrentExpiration;
|
||||
AbortIncompleteMultipartUpload?: { DaysAfterInitiation: number };
|
||||
Transitions?: any[];
|
||||
NoncurrentVersionTransitions?: any[];
|
||||
Filter?: Filter;
|
||||
Prefix?: '';
|
||||
} = { ID: this.id, Status: this.status };
|
||||
|
@ -61,6 +63,9 @@ export default class LifecycleRule {
|
|||
if (this.transitions) {
|
||||
rule.Transitions = this.transitions;
|
||||
}
|
||||
if (this.ncvTransitions) {
|
||||
rule.NoncurrentVersionTransitions = this.ncvTransitions;
|
||||
}
|
||||
|
||||
const filter = this.buildFilter();
|
||||
|
||||
|
@ -173,4 +178,13 @@ export default class LifecycleRule {
|
|||
this.transitions = transitions;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* NonCurrentVersionTransitions
|
||||
* @param nvcTransitions - NonCurrentVersionTransitions
|
||||
*/
|
||||
addNCVTransitions(nvcTransitions) {
|
||||
this.ncvTransitions = nvcTransitions;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import * as crypto from 'crypto';
|
||||
import * as constants from '../constants';
|
||||
import * as VersionIDUtils from '../versioning/VersionID';
|
||||
import { VersioningConstants } from '../versioning/constants';
|
||||
|
@ -5,6 +6,8 @@ import ObjectMDLocation, {
|
|||
ObjectMDLocationData,
|
||||
Location,
|
||||
} from './ObjectMDLocation';
|
||||
import ObjectMDAmzRestore from './ObjectMDAmzRestore';
|
||||
import ObjectMDArchive from './ObjectMDArchive';
|
||||
|
||||
export type ACL = {
|
||||
Canned: string;
|
||||
|
@ -29,6 +32,7 @@ export type ReplicationInfo = {
|
|||
role: string;
|
||||
storageType: string;
|
||||
dataStoreVersionId: string;
|
||||
isNFS: boolean | null;
|
||||
};
|
||||
|
||||
export type ObjectMDData = {
|
||||
|
@ -36,24 +40,26 @@ export type ObjectMDData = {
|
|||
'owner-id': string;
|
||||
'cache-control': string;
|
||||
'content-disposition': string;
|
||||
'content-language': string;
|
||||
'content-encoding': string;
|
||||
'creation-time'?: string;
|
||||
'last-modified'?: string;
|
||||
expires: string;
|
||||
'content-length': number;
|
||||
'content-type': string;
|
||||
'content-md5': string;
|
||||
// simple/no version. will expand once object versioning is
|
||||
// introduced
|
||||
'x-amz-version-id': 'null' | string;
|
||||
'x-amz-server-version-id': string;
|
||||
// TODO: Handle this as a utility function for all object puts
|
||||
// similar to normalizing request but after checkAuth so
|
||||
// string to sign is not impacted. This is GH Issue#89.
|
||||
'x-amz-restore'?: ObjectMDAmzRestore;
|
||||
archive?: ObjectMDArchive;
|
||||
'x-amz-storage-class': string;
|
||||
'x-amz-server-side-encryption': string;
|
||||
'x-amz-server-side-encryption-aws-kms-key-id': string;
|
||||
'x-amz-server-side-encryption-customer-algorithm': string;
|
||||
'x-amz-website-redirect-location': string;
|
||||
'x-amz-scal-transition-in-progress'?: boolean;
|
||||
'x-amz-scal-transition-time'?: string;
|
||||
azureInfo?: any;
|
||||
acl: ACL;
|
||||
key: string;
|
||||
location: null | Location[];
|
||||
|
@ -73,6 +79,17 @@ export type ObjectMDData = {
|
|||
replicationInfo: ReplicationInfo;
|
||||
dataStoreName: string;
|
||||
originOp: string;
|
||||
microVersionId?: string;
|
||||
// Deletion flag
|
||||
// Used for keeping object metadata in the oplog event
|
||||
// In case of a deletion the flag is first updated before
|
||||
// deleting the object
|
||||
deleted: boolean;
|
||||
// PHD flag indicates whether the object is a temporary placeholder.
|
||||
// This is the case when the latest version of an object gets deleted
|
||||
// the master is set as a placeholder and gets updated with the new latest
|
||||
// version data after a certain amount of time.
|
||||
isPHD: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -101,9 +118,17 @@ export default class ObjectMD {
|
|||
} else {
|
||||
this._updateFromParsedJSON(objMd);
|
||||
}
|
||||
if (!this._data['creation-time']) {
|
||||
const lastModified = this.getLastModified();
|
||||
if (lastModified) {
|
||||
this.setCreationTime(lastModified);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// set newly-created object md modified time to current time
|
||||
this._data['last-modified'] = new Date().toJSON();
|
||||
const dt = new Date().toJSON();
|
||||
this.setLastModified(dt);
|
||||
this.setCreationTime(dt);
|
||||
}
|
||||
// set latest md model version now that we ensured
|
||||
// backward-compat conversion
|
||||
|
@ -158,6 +183,8 @@ export default class ObjectMD {
|
|||
'content-length': 0,
|
||||
'content-type': '',
|
||||
'content-md5': '',
|
||||
'content-language': '',
|
||||
'creation-time': undefined,
|
||||
// simple/no version. will expand once object versioning is
|
||||
// introduced
|
||||
'x-amz-version-id': 'null',
|
||||
|
@ -170,6 +197,7 @@ export default class ObjectMD {
|
|||
'x-amz-server-side-encryption-aws-kms-key-id': '',
|
||||
'x-amz-server-side-encryption-customer-algorithm': '',
|
||||
'x-amz-website-redirect-location': '',
|
||||
'x-amz-scal-transition-in-progress': false,
|
||||
acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
|
@ -179,6 +207,7 @@ export default class ObjectMD {
|
|||
},
|
||||
key: '',
|
||||
location: null,
|
||||
azureInfo: undefined,
|
||||
// versionId, isNull, nullVersionId and isDeleteMarker
|
||||
// should be undefined when not set explicitly
|
||||
isNull: undefined,
|
||||
|
@ -198,9 +227,12 @@ export default class ObjectMD {
|
|||
role: '',
|
||||
storageType: '',
|
||||
dataStoreVersionId: '',
|
||||
isNFS: null,
|
||||
},
|
||||
dataStoreName: '',
|
||||
originOp: '',
|
||||
deleted: false,
|
||||
isPHD: false,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -430,6 +462,50 @@ export default class ObjectMD {
|
|||
return this._data['content-md5'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set content-language
|
||||
*
|
||||
* @param contentLanguage - content-language
|
||||
* @return itself
|
||||
*/
|
||||
setContentLanguage(contentLanguage: string) {
|
||||
this._data['content-language'] = contentLanguage;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns content-language
|
||||
*
|
||||
* @return content-language
|
||||
*/
|
||||
getContentLanguage() {
|
||||
return this._data['content-language'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Creation Date
|
||||
*
|
||||
* @param creationTime - Creation Date
|
||||
* @return itself
|
||||
*/
|
||||
setCreationTime(creationTime: string) {
|
||||
this._data['creation-time'] = creationTime;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns Creation Date
|
||||
*
|
||||
* @return Creation Date
|
||||
*/
|
||||
getCreationTime() {
|
||||
// If creation-time is not set fallback to LastModified
|
||||
if (!this._data['creation-time']) {
|
||||
return this.getLastModified();
|
||||
}
|
||||
return this._data['creation-time'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set version id
|
||||
*
|
||||
|
@ -570,6 +646,48 @@ export default class ObjectMD {
|
|||
return this._data['x-amz-website-redirect-location'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata transition in progress value
|
||||
*
|
||||
* @param inProgress - True if transition is in progress, false otherwise
|
||||
* @param transitionTime - Date when the transition started
|
||||
* @return itself
|
||||
*/
|
||||
setTransitionInProgress(inProgress: false): this
|
||||
setTransitionInProgress(inProgress: true, transitionTime: Date|string|number): this
|
||||
setTransitionInProgress(inProgress: boolean, transitionTime?: Date|string|number) {
|
||||
this._data['x-amz-scal-transition-in-progress'] = inProgress;
|
||||
if (!inProgress || !transitionTime) {
|
||||
delete this._data['x-amz-scal-transition-time'];
|
||||
} else {
|
||||
if (typeof transitionTime === 'number') {
|
||||
transitionTime = new Date(transitionTime);
|
||||
}
|
||||
if (transitionTime instanceof Date) {
|
||||
transitionTime = transitionTime.toISOString();
|
||||
}
|
||||
this._data['x-amz-scal-transition-time'] = transitionTime;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get metadata transition in progress value
|
||||
*
|
||||
* @return True if transition is in progress, false otherwise
|
||||
*/
|
||||
getTransitionInProgress() {
|
||||
return this._data['x-amz-scal-transition-in-progress'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the transition time of the object.
|
||||
* @returns The transition time of the object.
|
||||
*/
|
||||
getTransitionTime() {
|
||||
return this._data['x-amz-scal-transition-time'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set access control list
|
||||
*
|
||||
|
@ -675,6 +793,29 @@ export default class ObjectMD {
|
|||
return reducedLocations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the Azure specific information
|
||||
* @param azureInfo - a plain JS structure representing the
|
||||
* Azure specific information for a Blob or a Container (see constructor
|
||||
* of {@link ObjectMDAzureInfo} for a description of the fields of this
|
||||
* structure
|
||||
* @return itself
|
||||
*/
|
||||
setAzureInfo(azureInfo: any) {
|
||||
this._data.azureInfo = azureInfo;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Azure specific information
|
||||
* @return a plain JS structure representing the Azure specific
|
||||
* information for a Blob or a Container an suitable for the constructor
|
||||
* of {@link ObjectMDAzureInfo}.
|
||||
*/
|
||||
getAzureInfo() {
|
||||
return this._data.azureInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata isNull value
|
||||
*
|
||||
|
@ -781,6 +922,19 @@ export default class ObjectMD {
|
|||
return this._data.isDeleteMarker || false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get if the object is a multipart upload (MPU)
|
||||
*
|
||||
* The function checks the "content-md5" field: if it contains a
|
||||
* dash ('-') it is a MPU, as the content-md5 string ends with
|
||||
* "-[nbparts]" for MPUs.
|
||||
*
|
||||
* @return Whether object is a multipart upload
|
||||
*/
|
||||
isMultipartUpload() {
|
||||
return this.getContentMd5().includes('-');
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata versionId value
|
||||
*
|
||||
|
@ -860,6 +1014,20 @@ export default class ObjectMD {
|
|||
return this._data.tags;
|
||||
}
|
||||
|
||||
getUserMetadata() {
|
||||
const metaHeaders = {};
|
||||
const data = this.getValue();
|
||||
Object.keys(data).forEach(key => {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
metaHeaders[key] = data[key];
|
||||
}
|
||||
});
|
||||
if (Object.keys(metaHeaders).length > 0) {
|
||||
return JSON.stringify(metaHeaders);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set replication information
|
||||
*
|
||||
|
@ -875,6 +1043,7 @@ export default class ObjectMD {
|
|||
role: string;
|
||||
storageType?: string;
|
||||
dataStoreVersionId?: string;
|
||||
isNFS?: boolean;
|
||||
}) {
|
||||
const {
|
||||
status,
|
||||
|
@ -885,6 +1054,7 @@ export default class ObjectMD {
|
|||
role,
|
||||
storageType,
|
||||
dataStoreVersionId,
|
||||
isNFS,
|
||||
} = replicationInfo;
|
||||
this._data.replicationInfo = {
|
||||
status,
|
||||
|
@ -895,6 +1065,7 @@ export default class ObjectMD {
|
|||
role,
|
||||
storageType: storageType || '',
|
||||
dataStoreVersionId: dataStoreVersionId || '',
|
||||
isNFS: isNFS || null,
|
||||
};
|
||||
return this;
|
||||
}
|
||||
|
@ -913,6 +1084,24 @@ export default class ObjectMD {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether the replication is occurring from an NFS bucket.
|
||||
* @param isNFS - Whether replication from an NFS bucket
|
||||
* @return itself
|
||||
*/
|
||||
setReplicationIsNFS(isNFS: boolean) {
|
||||
this._data.replicationInfo.isNFS = isNFS;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get whether the replication is occurring from an NFS bucket.
|
||||
* @return Whether replication from an NFS bucket
|
||||
*/
|
||||
getReplicationIsNFS() {
|
||||
return this._data.replicationInfo.isNFS;
|
||||
}
|
||||
|
||||
setReplicationSiteStatus(site: string, status: string) {
|
||||
const backend = this._data.replicationInfo.backends.find(
|
||||
(o) => o.site === site
|
||||
|
@ -963,6 +1152,11 @@ export default class ObjectMD {
|
|||
return this;
|
||||
}
|
||||
|
||||
setReplicationStorageType(storageType: string) {
|
||||
this._data.replicationInfo.storageType = storageType;
|
||||
return this;
|
||||
}
|
||||
|
||||
setReplicationStorageClass(storageClass: string) {
|
||||
this._data.replicationInfo.storageClass = storageClass;
|
||||
return this;
|
||||
|
@ -1044,6 +1238,9 @@ export default class ObjectMD {
|
|||
Object.keys(metaHeaders).forEach((key) => {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
this._data[key] = metaHeaders[key];
|
||||
} else if (key.startsWith('x-ms-meta-')) {
|
||||
const _key = key.replace('x-ms-meta-', 'x-amz-meta-');
|
||||
this._data[_key] = metaHeaders[key];
|
||||
}
|
||||
});
|
||||
// If a multipart object and the acl is already parsed, we update it
|
||||
|
@ -1053,6 +1250,20 @@ export default class ObjectMD {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all existing meta headers (used for Azure)
|
||||
*
|
||||
* @return itself
|
||||
*/
|
||||
clearMetadataValues() {
|
||||
Object.keys(this._data).forEach(key => {
|
||||
if (key.startsWith('x-amz-meta')) {
|
||||
delete this._data[key];
|
||||
}
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* overrideMetadataValues (used for complete MPU and object copy)
|
||||
*
|
||||
|
@ -1064,6 +1275,38 @@ export default class ObjectMD {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or update the microVersionId field
|
||||
*
|
||||
* This field can be used to force an update in MongoDB. This can
|
||||
* be needed in the following cases:
|
||||
*
|
||||
* - in case no other metadata field changes
|
||||
*
|
||||
* - to detect a change when fields change but object version does
|
||||
* not change e.g. when ingesting a putObjectTagging coming from
|
||||
* S3C to Zenko
|
||||
*
|
||||
* - to manage conflicts during concurrent updates, using
|
||||
* conditions on the microVersionId field.
|
||||
*
|
||||
* It's a field of 16 hexadecimal characters randomly generated
|
||||
*
|
||||
* @return itself
|
||||
*/
|
||||
updateMicroVersionId() {
|
||||
this._data.microVersionId = crypto.randomBytes(8).toString('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the microVersionId field, or null if not set
|
||||
*
|
||||
* @return the microVersionId field if exists, or {null} if it does not exist
|
||||
*/
|
||||
getMicroVersionId() {
|
||||
return this._data.microVersionId || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set object legal hold status
|
||||
* @param legalHold - true if legal hold is 'ON' false if 'OFF'
|
||||
|
@ -1144,4 +1387,98 @@ export default class ObjectMD {
|
|||
getValue() {
|
||||
return this._data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get x-amz-restore
|
||||
*
|
||||
* @returns x-amz-restore
|
||||
*/
|
||||
getAmzRestore() {
|
||||
return this._data['x-amz-restore'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set x-amz-restore
|
||||
*
|
||||
* @param value x-amz-restore object
|
||||
* @returns itself
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setAmzRestore(value?: ObjectMDAmzRestore) {
|
||||
if (value) {
|
||||
// Accept object instance of ObjectMDAmzRestore and Object
|
||||
if (!(value instanceof ObjectMDAmzRestore) && !ObjectMDAmzRestore.isValid(value)) {
|
||||
throw new Error('x-amz-restore must be type of ObjectMDAmzRestore.');
|
||||
}
|
||||
this._data['x-amz-restore'] = value;
|
||||
} else {
|
||||
delete this._data['x-amz-restore'];
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get archive
|
||||
*
|
||||
* @returns archive
|
||||
*/
|
||||
getArchive() {
|
||||
return this._data.archive;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set archive
|
||||
*
|
||||
* @param value archive object
|
||||
* @returns itself
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setArchive(value: ObjectMDArchive) {
|
||||
if (value) {
|
||||
// Accept object instance of ObjectMDArchive and Object
|
||||
if (!(value instanceof ObjectMDArchive) && !ObjectMDArchive.isValid(value)) {
|
||||
throw new Error('archive is must be type of ObjectMDArchive.');
|
||||
}
|
||||
this._data.archive = value;
|
||||
} else {
|
||||
delete this._data.archive;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set deleted flag
|
||||
* @param {Boolean} value deleted object
|
||||
* @return {ObjectMD}
|
||||
*/
|
||||
setDeleted(value) {
|
||||
this._data.deleted = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get deleted flag
|
||||
* @return {Boolean}
|
||||
*/
|
||||
getDeleted() {
|
||||
return this._data.deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set isPHD flag
|
||||
* @param {Boolean} value isPHD value
|
||||
* @return {ObjectMD}
|
||||
*/
|
||||
setIsPHD(value) {
|
||||
this._data.isPHD = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get isPHD flag
|
||||
* @return {Boolean}
|
||||
*/
|
||||
getIsPHD() {
|
||||
return this._data.isPHD;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Code based on Yutaka Oishi (Fujifilm) contributions
|
||||
* Date: 11 Sep 2020
|
||||
*/
|
||||
|
||||
/**
|
||||
* class representing the x-amz-restore of object metadata.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
export default class ObjectMDAmzRestore {
|
||||
'expiry-date': Date | string;
|
||||
'ongoing-request': boolean;
|
||||
|
||||
/**
|
||||
*
|
||||
* @constructor
|
||||
* @param ongoingRequest ongoing-request
|
||||
* @param [expiryDate] expiry-date
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
constructor(ongoingRequest: boolean, expiryDate?: Date | string) {
|
||||
this.setOngoingRequest(ongoingRequest);
|
||||
this.setExpiryDate(expiryDate);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param data archiveInfo
|
||||
* @returns true if the provided object is valid
|
||||
*/
|
||||
static isValid(data: { 'ongoing-request': boolean; 'expiry-date': Date | string }) {
|
||||
try {
|
||||
// eslint-disable-next-line no-new
|
||||
new ObjectMDAmzRestore(data['ongoing-request'], data['expiry-date']);
|
||||
return true;
|
||||
} catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns ongoing-request
|
||||
*/
|
||||
getOngoingRequest() {
|
||||
return this['ongoing-request'];
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param value ongoing-request
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setOngoingRequest(value?: boolean) {
|
||||
if (value === undefined) {
|
||||
throw new Error('ongoing-request is required.');
|
||||
} else if (typeof value !== 'boolean') {
|
||||
throw new Error('ongoing-request must be type of boolean.');
|
||||
}
|
||||
this['ongoing-request'] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns expiry-date
|
||||
*/
|
||||
getExpiryDate() {
|
||||
return this['expiry-date'];
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param value expiry-date
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setExpiryDate(value?: Date | string) {
|
||||
if (value) {
|
||||
const checkWith = (new Date(value)).getTime();
|
||||
if (Number.isNaN(Number(checkWith))) {
|
||||
throw new Error('expiry-date is must be a valid Date.');
|
||||
}
|
||||
this['expiry-date'] = value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns itself
|
||||
*/
|
||||
getValue() {
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,184 @@
|
|||
/**
|
||||
* class representing the archive of object metadata.
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
export default class ObjectMDArchive {
|
||||
archiveInfo: any;
|
||||
// @ts-ignore
|
||||
restoreRequestedAt: Date | string;
|
||||
// @ts-ignore
|
||||
restoreRequestedDays: number;
|
||||
// @ts-ignore
|
||||
restoreCompletedAt: Date | string;
|
||||
// @ts-ignore
|
||||
restoreWillExpireAt: Date | string;
|
||||
|
||||
/**
|
||||
*
|
||||
* @constructor
|
||||
* @param archiveInfo contains the archive info set by the TLP and returned by the TLP jobs
|
||||
* @param [restoreRequestedAt] set at the time restore request is made by the client
|
||||
* @param [restoreRequestedDays] set at the time restore request is made by the client
|
||||
* @param [restoreCompletedAt] set at the time of successful restore
|
||||
* @param [restoreWillExpireAt] computed and stored at the time of restore
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
constructor(
|
||||
archiveInfo: any,
|
||||
restoreRequestedAt?: Date | string,
|
||||
restoreRequestedDays?: number,
|
||||
restoreCompletedAt?: Date | string,
|
||||
restoreWillExpireAt?: Date | string,
|
||||
) {
|
||||
this.setArchiveInfo(archiveInfo);
|
||||
this.setRestoreRequestedAt(restoreRequestedAt!);
|
||||
this.setRestoreRequestedDays(restoreRequestedDays!);
|
||||
this.setRestoreCompletedAt(restoreCompletedAt!);
|
||||
this.setRestoreWillExpireAt(restoreWillExpireAt!);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param data archiveInfo
|
||||
* @returns true if the provided object is valid
|
||||
*/
|
||||
static isValid(data: {
|
||||
archiveInfo: any;
|
||||
restoreRequestedAt?: Date;
|
||||
restoreRequestedDays?: number;
|
||||
restoreCompletedAt?: Date;
|
||||
restoreWillExpireAt?: Date;
|
||||
}) {
|
||||
try {
|
||||
// eslint-disable-next-line no-new
|
||||
new ObjectMDArchive(
|
||||
data.archiveInfo,
|
||||
data.restoreRequestedAt,
|
||||
data.restoreRequestedDays,
|
||||
data.restoreCompletedAt,
|
||||
data.restoreWillExpireAt,
|
||||
);
|
||||
return true;
|
||||
} catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns archiveInfo
|
||||
*/
|
||||
getArchiveInfo() {
|
||||
return this.archiveInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param value archiveInfo
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setArchiveInfo(value: any) {
|
||||
if (!value) {
|
||||
throw new Error('archiveInfo is required.');
|
||||
} else if (typeof value !== 'object') {
|
||||
throw new Error('archiveInfo must be type of object.');
|
||||
}
|
||||
this.archiveInfo = value;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns restoreRequestedAt
|
||||
*/
|
||||
getRestoreRequestedAt() {
|
||||
return this.restoreRequestedAt;
|
||||
}
|
||||
/**
|
||||
* @param value restoreRequestedAt
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setRestoreRequestedAt(value: Date | string) {
|
||||
if (value) {
|
||||
const checkWith = (new Date(value)).getTime();
|
||||
if (Number.isNaN(Number(checkWith))) {
|
||||
throw new Error('restoreRequestedAt must be a valid Date.');
|
||||
}
|
||||
this.restoreRequestedAt = value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns restoreRequestedDays
|
||||
*/
|
||||
getRestoreRequestedDays() {
|
||||
return this.restoreRequestedDays;
|
||||
}
|
||||
/**
|
||||
* @param value restoreRequestedDays
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setRestoreRequestedDays(value: number) {
|
||||
if (value) {
|
||||
if (isNaN(value)) {
|
||||
throw new Error('restoreRequestedDays must be type of Number.');
|
||||
}
|
||||
this.restoreRequestedDays = value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns restoreCompletedAt
|
||||
*/
|
||||
getRestoreCompletedAt() {
|
||||
return this.restoreCompletedAt;
|
||||
}
|
||||
/**
|
||||
* @param value restoreCompletedAt
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setRestoreCompletedAt(value: Date | string) {
|
||||
if (value) {
|
||||
if (!this.restoreRequestedAt || !this.restoreRequestedDays) {
|
||||
throw new Error('restoreCompletedAt must be set after restoreRequestedAt and restoreRequestedDays.');
|
||||
}
|
||||
const checkWith = (new Date(value)).getTime();
|
||||
if (Number.isNaN(Number(checkWith))) {
|
||||
throw new Error('restoreCompletedAt must be a valid Date.');
|
||||
}
|
||||
this.restoreCompletedAt = value;
|
||||
}
|
||||
}
|
||||
/**
|
||||
*
|
||||
* @returns restoreWillExpireAt
|
||||
*/
|
||||
getRestoreWillExpireAt() {
|
||||
return this.restoreWillExpireAt;
|
||||
}
|
||||
/**
|
||||
* @param value restoreWillExpireAt
|
||||
* @throws case of invalid parameter
|
||||
*/
|
||||
setRestoreWillExpireAt(value: Date | string) {
|
||||
if (value) {
|
||||
if (!this.restoreRequestedAt || !this.restoreRequestedDays) {
|
||||
throw new Error('restoreWillExpireAt must be set after restoreRequestedAt and restoreRequestedDays.');
|
||||
}
|
||||
const checkWith = (new Date(value)).getTime();
|
||||
if (Number.isNaN(Number(checkWith))) {
|
||||
throw new Error('restoreWillExpireAt must be a valid Date.');
|
||||
}
|
||||
this.restoreWillExpireAt = value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @returns itself
|
||||
*/
|
||||
getValue() {
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
/**
|
||||
* Helper class to ease access to the Azure specific information for
|
||||
* Blob and Container objects.
|
||||
*/
|
||||
export default class ObjectMDAzureInfo {
|
||||
_data: {
|
||||
containerPublicAccess: string;
|
||||
containerStoredAccessPolicies: any[];
|
||||
containerImmutabilityPolicy: any;
|
||||
containerLegalHoldStatus: boolean;
|
||||
containerDeletionInProgress: boolean;
|
||||
blobType: string;
|
||||
blobContentMD5: string;
|
||||
blobIssuedETag: string;
|
||||
blobCopyInfo: any;
|
||||
blobSequenceNumber: number;
|
||||
blobAccessTierChangeTime: Date;
|
||||
blobUncommitted: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param obj - Raw structure for the Azure info on Blob/Container
|
||||
* @param obj.containerPublicAccess - Public access authorization
|
||||
* type
|
||||
* @param obj.containerStoredAccessPolicies - Access policies
|
||||
* for Shared Access Signature bearer
|
||||
* @param obj.containerImmutabilityPolicy - data immutability
|
||||
* policy for this container
|
||||
* @param obj.containerLegalHoldStatus - legal hold status for
|
||||
* this container
|
||||
* @param obj.containerDeletionInProgress - deletion in progress
|
||||
* indicator for this container
|
||||
* @param obj.blobType - defines the type of blob for this object
|
||||
* @param obj.blobContentMD5 - whole object MD5 sum set by the
|
||||
* client through the Azure API
|
||||
* @param obj.blobIssuedETag - backup of the issued ETag on MD only
|
||||
* operations like Set Blob Properties and Set Blob Metadata
|
||||
* @param obj.blobCopyInfo - information pertaining to past and
|
||||
* pending copy operation targeting this object
|
||||
* @param obj.blobSequenceNumber - sequence number for a PageBlob
|
||||
* @param obj.blobAccessTierChangeTime - date of change of tier
|
||||
* @param obj.blobUncommitted - A block has been put for a
|
||||
* nonexistent blob which is about to be created
|
||||
*/
|
||||
constructor(obj: {
|
||||
containerPublicAccess: string;
|
||||
containerStoredAccessPolicies: any[];
|
||||
containerImmutabilityPolicy: any;
|
||||
containerLegalHoldStatus: boolean;
|
||||
containerDeletionInProgress: boolean;
|
||||
blobType: string;
|
||||
blobContentMD5: string;
|
||||
blobIssuedETag: string;
|
||||
blobCopyInfo: any;
|
||||
blobSequenceNumber: number;
|
||||
blobAccessTierChangeTime: Date;
|
||||
blobUncommitted: boolean;
|
||||
}) {
|
||||
this._data = {
|
||||
containerPublicAccess: obj.containerPublicAccess,
|
||||
containerStoredAccessPolicies: obj.containerStoredAccessPolicies,
|
||||
containerImmutabilityPolicy: obj.containerImmutabilityPolicy,
|
||||
containerLegalHoldStatus: obj.containerLegalHoldStatus,
|
||||
containerDeletionInProgress: obj.containerDeletionInProgress,
|
||||
blobType: obj.blobType,
|
||||
blobContentMD5: obj.blobContentMD5,
|
||||
blobIssuedETag: obj.blobIssuedETag,
|
||||
blobCopyInfo: obj.blobCopyInfo,
|
||||
blobSequenceNumber: obj.blobSequenceNumber,
|
||||
blobAccessTierChangeTime: obj.blobAccessTierChangeTime,
|
||||
blobUncommitted: obj.blobUncommitted,
|
||||
};
|
||||
}
|
||||
|
||||
getContainerPublicAccess() {
|
||||
return this._data.containerPublicAccess;
|
||||
}
|
||||
|
||||
setContainerPublicAccess(containerPublicAccess: string) {
|
||||
this._data.containerPublicAccess = containerPublicAccess;
|
||||
return this;
|
||||
}
|
||||
|
||||
getContainerStoredAccessPolicies() {
|
||||
return this._data.containerStoredAccessPolicies;
|
||||
}
|
||||
|
||||
setContainerStoredAccessPolicies(containerStoredAccessPolicies: any[]) {
|
||||
this._data.containerStoredAccessPolicies =
|
||||
containerStoredAccessPolicies;
|
||||
return this;
|
||||
}
|
||||
|
||||
getContainerImmutabilityPolicy() {
|
||||
return this._data.containerImmutabilityPolicy;
|
||||
}
|
||||
|
||||
setContainerImmutabilityPolicy(containerImmutabilityPolicy: any) {
|
||||
this._data.containerImmutabilityPolicy = containerImmutabilityPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
getContainerLegalHoldStatus() {
|
||||
return this._data.containerLegalHoldStatus;
|
||||
}
|
||||
|
||||
setContainerLegalHoldStatus(containerLegalHoldStatus: boolean) {
|
||||
this._data.containerLegalHoldStatus = containerLegalHoldStatus;
|
||||
return this;
|
||||
}
|
||||
|
||||
getContainerDeletionInProgress() {
|
||||
return this._data.containerDeletionInProgress;
|
||||
}
|
||||
|
||||
setContainerDeletionInProgress(containerDeletionInProgress: boolean) {
|
||||
this._data.containerDeletionInProgress = containerDeletionInProgress;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobType() {
|
||||
return this._data.blobType;
|
||||
}
|
||||
|
||||
setBlobType(blobType: string) {
|
||||
this._data.blobType = blobType;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobContentMD5() {
|
||||
return this._data.blobContentMD5;
|
||||
}
|
||||
|
||||
setBlobContentMD5(blobContentMD5: string) {
|
||||
this._data.blobContentMD5 = blobContentMD5;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobIssuedETag() {
|
||||
return this._data.blobIssuedETag;
|
||||
}
|
||||
|
||||
setBlobIssuedETag(blobIssuedETag: string) {
|
||||
this._data.blobIssuedETag = blobIssuedETag;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobCopyInfo() {
|
||||
return this._data.blobCopyInfo;
|
||||
}
|
||||
|
||||
setBlobCopyInfo(blobCopyInfo: any) {
|
||||
this._data.blobCopyInfo = blobCopyInfo;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobSequenceNumber() {
|
||||
return this._data.blobSequenceNumber;
|
||||
}
|
||||
|
||||
setBlobSequenceNumber(blobSequenceNumber: number) {
|
||||
this._data.blobSequenceNumber = blobSequenceNumber;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobAccessTierChangeTime() {
|
||||
return this._data.blobAccessTierChangeTime;
|
||||
}
|
||||
|
||||
setBlobAccessTierChangeTime(blobAccessTierChangeTime: Date) {
|
||||
this._data.blobAccessTierChangeTime = blobAccessTierChangeTime;
|
||||
return this;
|
||||
}
|
||||
|
||||
getBlobUncommitted() {
|
||||
return this._data.blobUncommitted;
|
||||
}
|
||||
|
||||
setBlobUncommitted(blobUncommitted: boolean) {
|
||||
this._data.blobUncommitted = blobUncommitted;
|
||||
return this;
|
||||
}
|
||||
|
||||
getValue() {
|
||||
return this._data;
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ export type Location = BaseLocation & {
|
|||
size: number;
|
||||
dataStoreETag: string;
|
||||
dataStoreVersionId: string;
|
||||
blockId?: string;
|
||||
};
|
||||
export type ObjectMDLocationData = {
|
||||
key: string;
|
||||
|
@ -12,6 +13,8 @@ export type ObjectMDLocationData = {
|
|||
size: number;
|
||||
dataStoreName: string;
|
||||
dataStoreETag: string;
|
||||
dataStoreVersionId: string;
|
||||
blockId?: string;
|
||||
cryptoScheme?: number;
|
||||
cipheredDataKey?: string;
|
||||
};
|
||||
|
@ -31,10 +34,14 @@ export default class ObjectMDLocation {
|
|||
* @param locationObj.dataStoreName - type of data store
|
||||
* @param locationObj.dataStoreETag - internal ETag of
|
||||
* data part
|
||||
* @param [locationObj.dataStoreVersionId] - versionId,
|
||||
* needed for cloud backends
|
||||
* @param [location.cryptoScheme] - if location data is
|
||||
* encrypted: the encryption scheme version
|
||||
* @param [location.cipheredDataKey] - if location data
|
||||
* is encrypted: the base64-encoded ciphered data key
|
||||
* @param [locationObj.blockId] - blockId of the part,
|
||||
* set by the Azure Blob Service REST API frontend
|
||||
*/
|
||||
constructor(locationObj: Location | (Location & Ciphered)) {
|
||||
this._data = {
|
||||
|
@ -43,6 +50,8 @@ export default class ObjectMDLocation {
|
|||
size: locationObj.size,
|
||||
dataStoreName: locationObj.dataStoreName,
|
||||
dataStoreETag: locationObj.dataStoreETag,
|
||||
dataStoreVersionId: locationObj.dataStoreVersionId,
|
||||
blockId: locationObj.blockId,
|
||||
};
|
||||
if ('cryptoScheme' in locationObj) {
|
||||
this._data.cryptoScheme = locationObj.cryptoScheme;
|
||||
|
@ -64,6 +73,7 @@ export default class ObjectMDLocation {
|
|||
* @param location - single data location info
|
||||
* @param location.key - data backend key
|
||||
* @param location.dataStoreName - type of data store
|
||||
* @param [location.dataStoreVersionId] - data backend version ID
|
||||
* @param [location.cryptoScheme] - if location data is
|
||||
* encrypted: the encryption scheme version
|
||||
* @param [location.cipheredDataKey] - if location data
|
||||
|
@ -71,15 +81,19 @@ export default class ObjectMDLocation {
|
|||
* @return return this
|
||||
*/
|
||||
setDataLocation(location: BaseLocation | (BaseLocation & Ciphered)) {
|
||||
['key', 'dataStoreName', 'cryptoScheme', 'cipheredDataKey'].forEach(
|
||||
(attrName) => {
|
||||
[
|
||||
'key',
|
||||
'dataStoreName',
|
||||
'dataStoreVersionId',
|
||||
'cryptoScheme',
|
||||
'cipheredDataKey',
|
||||
].forEach(attrName => {
|
||||
if (location[attrName] !== undefined) {
|
||||
this._data[attrName] = location[attrName];
|
||||
} else {
|
||||
delete this._data[attrName];
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -87,6 +101,10 @@ export default class ObjectMDLocation {
|
|||
return this._data.dataStoreETag;
|
||||
}
|
||||
|
||||
getDataStoreVersionId() {
|
||||
return this._data.dataStoreVersionId;
|
||||
}
|
||||
|
||||
getPartNumber() {
|
||||
return Number.parseInt(this._data.dataStoreETag.split(':')[0], 10);
|
||||
}
|
||||
|
@ -121,6 +139,15 @@ export default class ObjectMDLocation {
|
|||
return this._data.cipheredDataKey;
|
||||
}
|
||||
|
||||
getBlockId() {
|
||||
return this._data.blockId;
|
||||
}
|
||||
|
||||
setBlockId(blockId: string) {
|
||||
this._data.blockId = blockId;
|
||||
return this;
|
||||
}
|
||||
|
||||
getValue() {
|
||||
return this._data;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,8 @@ export default class ReplicationConfiguration {
|
|||
_destination: string | null;
|
||||
_rules: Rule[] | null;
|
||||
_prevStorageClass: null;
|
||||
_hasScalityDestination: boolean;
|
||||
_hasScalityDestination: boolean | null;
|
||||
_preferredReadLocation: string | null;
|
||||
|
||||
/**
|
||||
* Create a ReplicationConfiguration instance
|
||||
|
@ -86,7 +87,8 @@ export default class ReplicationConfiguration {
|
|||
this._destination = null;
|
||||
this._rules = null;
|
||||
this._prevStorageClass = null;
|
||||
this._hasScalityDestination = false;
|
||||
this._hasScalityDestination = null;
|
||||
this._preferredReadLocation = null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -113,6 +115,18 @@ export default class ReplicationConfiguration {
|
|||
return this._rules;
|
||||
}
|
||||
|
||||
/**
|
||||
* The preferred read location
|
||||
* @return {string|null} - The preferred read location if defined,
|
||||
* otherwise null
|
||||
*
|
||||
* FIXME ideally we should be able to specify one preferred read
|
||||
* location for each rule
|
||||
*/
|
||||
getPreferredReadLocation() {
|
||||
return this._preferredReadLocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the replication configuration
|
||||
* @return - The replication configuration
|
||||
|
@ -122,6 +136,7 @@ export default class ReplicationConfiguration {
|
|||
role: this.getRole(),
|
||||
destination: this.getDestination(),
|
||||
rules: this.getRules(),
|
||||
preferredReadLocation: this.getPreferredReadLocation(),
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -328,7 +343,15 @@ export default class ReplicationConfiguration {
|
|||
return undefined;
|
||||
}
|
||||
const storageClasses = destination.StorageClass[0].split(',');
|
||||
const isValidStorageClass = storageClasses.every((storageClass) => {
|
||||
const prefReadIndex = storageClasses.findIndex(storageClass =>
|
||||
storageClass.endsWith(':preferred_read'));
|
||||
if (prefReadIndex !== -1) {
|
||||
const prefRead = storageClasses[prefReadIndex].split(':')[0];
|
||||
// remove :preferred_read tag from storage class name
|
||||
storageClasses[prefReadIndex] = prefRead;
|
||||
this._preferredReadLocation = prefRead;
|
||||
}
|
||||
const isValidStorageClass = storageClasses.every(storageClass => {
|
||||
if (validStorageClasses.includes(storageClass)) {
|
||||
this._hasScalityDestination =
|
||||
defaultEndpoint.type === undefined;
|
||||
|
@ -338,6 +361,11 @@ export default class ReplicationConfiguration {
|
|||
(endpoint: any) => endpoint.site === storageClass
|
||||
);
|
||||
if (endpoint) {
|
||||
// We do not support replication to cold location.
|
||||
// Only transition to cold location is supported.
|
||||
if (endpoint.site && this._config.locationConstraints[endpoint.site]?.isCold) {
|
||||
return false;
|
||||
}
|
||||
// If this._hasScalityDestination was not set to true in any
|
||||
// previous iteration or by a prior rule's storage class, then
|
||||
// check if the current endpoint is a Scality destination.
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
export { default as ARN } from './ARN';
|
||||
export { default as BackendInfo } from './BackendInfo';
|
||||
export { default as BucketAzureInfo } from './BucketAzureInfo';
|
||||
export { default as BucketInfo } from './BucketInfo';
|
||||
export { default as ObjectMD } from './ObjectMD';
|
||||
export { default as ObjectMDLocation } from './ObjectMDLocation';
|
||||
export * as WebsiteConfiguration from './WebsiteConfiguration';
|
||||
export { default as ReplicationConfiguration } from './ReplicationConfiguration';
|
||||
export { default as BucketPolicy } from './BucketPolicy';
|
||||
export { default as LifecycleConfiguration } from './LifecycleConfiguration';
|
||||
export { default as LifecycleRule } from './LifecycleRule';
|
||||
export { default as BucketPolicy } from './BucketPolicy';
|
||||
export { default as ObjectLockConfiguration } from './ObjectLockConfiguration';
|
||||
export { default as NotificationConfiguration } from './NotificationConfiguration';
|
||||
export { default as ObjectLockConfiguration } from './ObjectLockConfiguration';
|
||||
export { default as ObjectMD } from './ObjectMD';
|
||||
export { default as ObjectMDAmzRestore } from './ObjectMDAmzRestore';
|
||||
export { default as ObjectMDArchive } from './ObjectMDArchive';
|
||||
export { default as ObjectMDAzureInfo } from './ObjectMDAzureInfo';
|
||||
export { default as ObjectMDLocation } from './ObjectMDLocation';
|
||||
export { default as ReplicationConfiguration } from './ReplicationConfiguration';
|
||||
export * as WebsiteConfiguration from './WebsiteConfiguration';
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import * as http from 'http';
|
||||
import * as https from 'https';
|
||||
import { https as HttpsAgent } from 'httpagent';
|
||||
import * as tls from 'tls';
|
||||
import * as net from 'net';
|
||||
import assert from 'assert';
|
||||
|
@ -371,6 +372,8 @@ export default class Server {
|
|||
error: err.stack || err,
|
||||
address: sock.address(),
|
||||
});
|
||||
// socket is not systematically destroyed
|
||||
sock.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -407,7 +410,11 @@ export default class Server {
|
|||
method: 'arsenal.network.Server.start',
|
||||
port: this._port,
|
||||
});
|
||||
this._https.agent = new https.Agent(this._https);
|
||||
this._https.agent = new HttpsAgent.Agent(this._https, {
|
||||
// Do not enforce the maximum number of sockets for the
|
||||
// main server, as it might be able to serve more clients.
|
||||
maxSockets: false,
|
||||
});
|
||||
this._server = https.createServer(this._https,
|
||||
(req, res) => this._onRequest(req, res));
|
||||
} else {
|
||||
|
@ -428,7 +435,6 @@ export default class Server {
|
|||
this._server.on('connection', sock => {
|
||||
// Setting no delay of the socket to the value configured
|
||||
// TODO fix this
|
||||
// @ts-expect-errors
|
||||
sock.setNoDelay(this.isNoDelay());
|
||||
sock.on('error', err => this._logger.info(
|
||||
'socket error - request rejected', { error: err }));
|
||||
|
|
|
@ -3,10 +3,12 @@ import * as utils from './http/utils';
|
|||
import RESTServer from './rest/RESTServer';
|
||||
import RESTClient from './rest/RESTClient';
|
||||
import * as ProbeServer from './probe/ProbeServer';
|
||||
import HealthProbeServer from './probe/HealthProbeServer';
|
||||
import * as Utils from './probe/Utils';
|
||||
|
||||
export const http = { server, utils };
|
||||
export const rest = { RESTServer, RESTClient };
|
||||
export const probe = { ProbeServer };
|
||||
export const probe = { ProbeServer, HealthProbeServer, Utils };
|
||||
|
||||
export { default as RoundRobin } from './RoundRobin';
|
||||
export { default as kmip } from './kmip';
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
import * as http from 'http';
|
||||
import httpServer from '../http/server';
|
||||
import * as werelogs from 'werelogs';
|
||||
import errors from '../../errors';
|
||||
import ZenkoMetrics from '../../metrics/ZenkoMetrics';
|
||||
import { sendSuccess, sendError } from './Utils';
|
||||
|
||||
function checkStub(_log: any) {
|
||||
// eslint-disable-line
|
||||
return true;
|
||||
}
|
||||
|
||||
export default class HealthProbeServer extends httpServer {
|
||||
logging: werelogs.Logger;
|
||||
_reqHandlers: { [key: string]: any };
|
||||
_livenessCheck: (log: any) => boolean;
|
||||
_readinessCheck: (log: any) => boolean;
|
||||
|
||||
constructor(params: {
|
||||
port: number;
|
||||
bindAddress: string;
|
||||
livenessCheck?: (log: any) => boolean;
|
||||
readinessCheck?: (log: any) => boolean;
|
||||
}) {
|
||||
const logging = new werelogs.Logger('HealthProbeServer');
|
||||
super(params.port, logging);
|
||||
this.logging = logging;
|
||||
this.setBindAddress(params.bindAddress || 'localhost');
|
||||
// hooking our request processing function by calling the
|
||||
// parent's method for that
|
||||
this.onRequest(this._onRequest);
|
||||
this._reqHandlers = {
|
||||
'/_/health/liveness': this._onLiveness.bind(this),
|
||||
'/_/health/readiness': this._onReadiness.bind(this),
|
||||
'/_/monitoring/metrics': this._onMetrics.bind(this),
|
||||
};
|
||||
this._livenessCheck = params.livenessCheck || checkStub;
|
||||
this._readinessCheck = params.readinessCheck || checkStub;
|
||||
}
|
||||
|
||||
onLiveCheck(f: (log: any) => boolean) {
|
||||
this._livenessCheck = f;
|
||||
}
|
||||
|
||||
onReadyCheck(f: (log: any) => boolean) {
|
||||
this._readinessCheck = f;
|
||||
}
|
||||
|
||||
_onRequest(req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
const log = this.logging.newRequestLogger();
|
||||
log.debug('request received', { method: req.method, url: req.url });
|
||||
|
||||
if (req.method !== 'GET') {
|
||||
sendError(res, log, errors.MethodNotAllowed);
|
||||
} else if (req.url && req.url in this._reqHandlers) {
|
||||
this._reqHandlers[req.url](req, res, log);
|
||||
} else {
|
||||
sendError(res, log, errors.InvalidURI);
|
||||
}
|
||||
}
|
||||
|
||||
_onLiveness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._livenessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
} else {
|
||||
sendError(res, log, errors.ServiceUnavailable);
|
||||
}
|
||||
}
|
||||
|
||||
_onReadiness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._readinessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
} else {
|
||||
sendError(res, log, errors.ServiceUnavailable);
|
||||
}
|
||||
}
|
||||
|
||||
// expose metrics to Prometheus
|
||||
async _onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
const metrics = await ZenkoMetrics.asPrometheus();
|
||||
res.writeHead(200, {
|
||||
'Content-Type': ZenkoMetrics.asPrometheusContentType(),
|
||||
});
|
||||
res.end(metrics);
|
||||
}
|
||||
}
|
|
@ -4,19 +4,16 @@ import * as werelogs from 'werelogs';
|
|||
import errors from '../../errors';
|
||||
|
||||
export const DEFAULT_LIVE_ROUTE = '/_/live';
|
||||
export const DEFAULT_READY_ROUTE = '/_/live';
|
||||
export const DEFAULT_METRICS_ROUTE = '/_/metrics';
|
||||
export const DEFAULT_READY_ROUTE = '/_/ready';
|
||||
export const DEFAULT_METRICS_ROUTE = '/metrics';
|
||||
|
||||
/**
|
||||
* ProbeDelegate is used to determine if a probe is successful or
|
||||
* if any errors are present.
|
||||
* If everything is working as intended, it is a no-op.
|
||||
* Otherwise, return a string representing what is failing.
|
||||
* ProbeDelegate is used to handle probe checks.
|
||||
* You can sendSuccess and sendError from Utils to handle success
|
||||
* and failure conditions.
|
||||
* @callback ProbeDelegate
|
||||
* @param res - HTTP response for writing
|
||||
* @param log - Werelogs instance for logging if you choose to
|
||||
* @return String representing issues to report. An empty
|
||||
* string or undefined is used to represent no issues.
|
||||
*/
|
||||
|
||||
export type ProbeDelegate = (res: http.ServerResponse, log: werelogs.RequestLogger) => string | void
|
||||
|
@ -90,6 +87,6 @@ export class ProbeServer extends httpServer {
|
|||
return;
|
||||
}
|
||||
|
||||
this._handlers.get(req.url!)!(res, log);
|
||||
this._handlers.get(req.url ?? '')?.(res, log);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
import * as http from 'http';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { ArsenalError } from '../../errors';
|
||||
|
||||
/**
|
||||
* Send a successful HTTP response of 200 OK
|
||||
* @param res - HTTP response for writing
|
||||
* @param log - Werelogs instance for logging if you choose to
|
||||
* @param [message] - Message to send as response, defaults to OK
|
||||
*/
|
||||
export function sendSuccess(
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
message = 'OK'
|
||||
) {
|
||||
log.debug('replying with success');
|
||||
res.writeHead(200);
|
||||
res.end(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send an Arsenal Error response
|
||||
* @param res - HTTP response for writing
|
||||
* @param log - Werelogs instance for logging if you choose to
|
||||
* @param error - Error to send back to the user
|
||||
* @param [optMessage] - Message to use instead of the errors message
|
||||
*/
|
||||
export function sendError(
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
error: ArsenalError,
|
||||
optMessage?: string
|
||||
) {
|
||||
const message = optMessage || error.description || '';
|
||||
log.debug('sending back error response', {
|
||||
httpCode: error.code,
|
||||
errorType: error.message,
|
||||
error: message,
|
||||
});
|
||||
res.writeHead(error.code);
|
||||
res.end(
|
||||
JSON.stringify({
|
||||
errorType: error.message,
|
||||
errorMessage: message,
|
||||
})
|
||||
);
|
||||
}
|
|
@ -4,7 +4,7 @@ import * as werelogs from 'werelogs';
|
|||
import * as constants from '../../constants';
|
||||
import * as utils from './utils';
|
||||
import errors, { ArsenalError } from '../../errors';
|
||||
import HttpAgent from 'agentkeepalive';
|
||||
import { http as HttpAgent } from 'httpagent';
|
||||
import * as stream from 'stream';
|
||||
|
||||
function setRequestUids(reqHeaders: http.IncomingHttpHeaders, reqUids: string) {
|
||||
|
@ -71,8 +71,9 @@ function makeErrorFromHTTPResponse(response: http.IncomingMessage) {
|
|||
export default class RESTClient {
|
||||
host: string;
|
||||
port: number;
|
||||
httpAgent: HttpAgent;
|
||||
httpAgent: http.Agent;
|
||||
logging: werelogs.Logger;
|
||||
isPassthrough: boolean;
|
||||
|
||||
/**
|
||||
* Interface to the data file server
|
||||
|
@ -88,17 +89,19 @@ export default class RESTClient {
|
|||
host: string;
|
||||
port: number;
|
||||
logApi: { Logger: typeof werelogs.Logger };
|
||||
isPassthrough?: boolean;
|
||||
}) {
|
||||
assert(params.host);
|
||||
assert(params.port);
|
||||
|
||||
this.host = params.host;
|
||||
this.port = params.port;
|
||||
this.isPassthrough = params.isPassthrough || false;
|
||||
this.logging = new (params.logApi || werelogs).Logger('DataFileRESTClient');
|
||||
this.httpAgent = new HttpAgent({
|
||||
this.httpAgent = new HttpAgent.Agent({
|
||||
keepAlive: true,
|
||||
freeSocketTimeout: constants.httpClientFreeSocketTimeout,
|
||||
});
|
||||
}) as http.Agent;
|
||||
}
|
||||
|
||||
/** Destroy the HTTP agent, forcing a close of the remaining open connections */
|
||||
|
@ -121,11 +124,13 @@ export default class RESTClient {
|
|||
) {
|
||||
const reqHeaders = headers || {};
|
||||
const urlKey = key || '';
|
||||
const prefix = this.isPassthrough ?
|
||||
constants.passthroughFileURL : constants.dataFileURL;
|
||||
const reqParams = {
|
||||
hostname: this.host,
|
||||
port: this.port,
|
||||
method,
|
||||
path: `${constants.dataFileURL}/${urlKey}`,
|
||||
path: encodeURI(`${prefix}/${urlKey}`),
|
||||
headers: reqHeaders,
|
||||
agent: this.httpAgent,
|
||||
};
|
||||
|
|
|
@ -4,7 +4,7 @@ import * as werelogs from 'werelogs';
|
|||
import * as http from 'http';
|
||||
import httpServer from '../http/server';
|
||||
import * as constants from '../../constants';
|
||||
import * as utils from './utils';
|
||||
import { parseURL } from './utils';
|
||||
import * as httpUtils from '../http/utils';
|
||||
import errors, { ArsenalError } from '../../errors';
|
||||
|
||||
|
@ -38,42 +38,6 @@ function sendError(
|
|||
errorMessage: message })}\n`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the given url and return a pathInfo object. Sanity checks are
|
||||
* performed.
|
||||
*
|
||||
* @param urlStr - URL to parse
|
||||
* @param expectKey - whether the command expects to see a
|
||||
* key in the URL
|
||||
* @return a pathInfo object with URL items containing the
|
||||
* following attributes:
|
||||
* - pathInfo.service {String} - The name of REST service ("DataFile")
|
||||
* - pathInfo.key {String} - The requested key
|
||||
*/
|
||||
function parseURL(urlStr: string, expectKey: boolean) {
|
||||
const urlObj = url.parse(urlStr);
|
||||
const pathInfo = utils.explodePath(urlObj.path!);
|
||||
if (pathInfo.service !== constants.dataFileURL) {
|
||||
throw errors.InvalidAction.customizeDescription(
|
||||
`unsupported service '${pathInfo.service}'`);
|
||||
}
|
||||
if (expectKey && pathInfo.key === undefined) {
|
||||
throw errors.MissingParameter.customizeDescription(
|
||||
'URL is missing key');
|
||||
}
|
||||
if (!expectKey && pathInfo.key !== undefined) {
|
||||
// note: we may implement rewrite functionality by allowing a
|
||||
// key in the URL, though we may still provide the new key in
|
||||
// the Location header to keep immutability property and
|
||||
// atomicity of the update (we would just remove the old
|
||||
// object when the new one has been written entirely in this
|
||||
// case, saving a request over an equivalent PUT + DELETE).
|
||||
throw errors.InvalidURI.customizeDescription(
|
||||
'PUT url cannot contain a key');
|
||||
}
|
||||
return pathInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @class
|
||||
* @classdesc REST Server interface
|
||||
|
|
|
@ -1,6 +1,16 @@
|
|||
import errors from '../../errors';
|
||||
import * as constants from '../../constants';
|
||||
import * as url from 'url';
|
||||
const passthroughPrefixLength = constants.passthroughFileURL.length;
|
||||
|
||||
export function explodePath(path: string) {
|
||||
if (path.startsWith(constants.passthroughFileURL)) {
|
||||
const key = path.slice(passthroughPrefixLength + 1);
|
||||
return {
|
||||
service: constants.passthroughFileURL,
|
||||
key: key.length > 0 ? key : undefined,
|
||||
};
|
||||
}
|
||||
const pathMatch = /^(\/[a-zA-Z0-9]+)(\/([0-9a-f]*))?$/.exec(path);
|
||||
if (pathMatch) {
|
||||
return {
|
||||
|
@ -10,4 +20,41 @@ export function explodePath(path: string) {
|
|||
};
|
||||
}
|
||||
throw errors.InvalidURI.customizeDescription('malformed URI');
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the given url and return a pathInfo object. Sanity checks are
|
||||
* performed.
|
||||
*
|
||||
* @param urlStr - URL to parse
|
||||
* @param expectKey - whether the command expects to see a
|
||||
* key in the URL
|
||||
* @return a pathInfo object with URL items containing the
|
||||
* following attributes:
|
||||
* - pathInfo.service {String} - The name of REST service ("DataFile")
|
||||
* - pathInfo.key {String} - The requested key
|
||||
*/
|
||||
export function parseURL(urlStr: string, expectKey: boolean) {
|
||||
const urlObj = url.parse(urlStr);
|
||||
const pathInfo = explodePath(decodeURI(urlObj.path!));
|
||||
if ((pathInfo.service !== constants.dataFileURL)
|
||||
&& (pathInfo.service !== constants.passthroughFileURL)) {
|
||||
throw errors.InvalidAction.customizeDescription(
|
||||
`unsupported service '${pathInfo.service}'`);
|
||||
}
|
||||
if (expectKey && pathInfo.key === undefined) {
|
||||
throw errors.MissingParameter.customizeDescription(
|
||||
'URL is missing key');
|
||||
}
|
||||
if (!expectKey && pathInfo.key !== undefined) {
|
||||
// note: we may implement rewrite functionality by allowing a
|
||||
// key in the URL, though we may still provide the new key in
|
||||
// the Location header to keep immutability property and
|
||||
// atomicity of the update (we would just remove the old
|
||||
// object when the new one has been written entirely in this
|
||||
// case, saving a request over an equivalent PUT + DELETE).
|
||||
throw errors.InvalidURI.customizeDescription(
|
||||
'PUT url cannot contain a key');
|
||||
}
|
||||
return pathInfo;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
import { URL } from 'url';
|
||||
import { decryptSecret } from '../executables/pensieveCreds/utils';
|
||||
import { Logger } from 'werelogs';
|
||||
|
||||
export type LocationType =
|
||||
| 'location-mem-v1'
|
||||
| 'location-file-v1'
|
||||
| 'location-azure-v1'
|
||||
| 'location-ceph-radosgw-s3-v1'
|
||||
| 'location-scality-ring-s3-v1'
|
||||
| 'location-aws-s3-v1'
|
||||
| 'location-wasabi-v1'
|
||||
| 'location-do-spaces-v1'
|
||||
| 'location-gcp-v1'
|
||||
| 'location-scality-sproxyd-v1'
|
||||
| 'location-nfs-mount-v1'
|
||||
| 'location-scality-hdclient-v2';
|
||||
|
||||
export interface OverlayLocations {
|
||||
[key: string]: {
|
||||
name: string;
|
||||
objectId: string;
|
||||
details?: any;
|
||||
locationType: string;
|
||||
sizeLimitGB?: number;
|
||||
isTransient?: boolean;
|
||||
legacyAwsBehavior?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
export type Location = {
|
||||
type:
|
||||
| 'mem'
|
||||
| 'file'
|
||||
| 'azure'
|
||||
| 'aws_s3'
|
||||
| 'gcp'
|
||||
| 'scality'
|
||||
| 'pfs'
|
||||
| 'scality';
|
||||
name: string;
|
||||
objectId: string;
|
||||
details: { [key: string]: any };
|
||||
locationType: string;
|
||||
sizeLimitGB: number | null;
|
||||
isTransient: boolean;
|
||||
legacyAwsBehavior: boolean;
|
||||
};
|
||||
|
||||
export function patchLocations(
|
||||
overlayLocations: OverlayLocations | undefined | null,
|
||||
creds: any,
|
||||
log: Logger
|
||||
) {
|
||||
const locs = overlayLocations ?? {};
|
||||
return Object.entries(locs).reduce<{ [key: string]: Location }>(
|
||||
(acc, [k, l]) => {
|
||||
const location: Location = {
|
||||
type: 'mem',
|
||||
name: k,
|
||||
objectId: l.objectId,
|
||||
details: l.details || {},
|
||||
locationType: l.locationType,
|
||||
sizeLimitGB: l.sizeLimitGB || null,
|
||||
isTransient: Boolean(l.isTransient),
|
||||
legacyAwsBehavior: Boolean(l.legacyAwsBehavior),
|
||||
};
|
||||
let supportsVersioning = false;
|
||||
let pathStyle = process.env.CI_CEPH !== undefined;
|
||||
|
||||
switch (l.locationType) {
|
||||
case 'location-mem-v1':
|
||||
location.type = 'mem';
|
||||
location.details = { supportsVersioning: true };
|
||||
break;
|
||||
case 'location-file-v1':
|
||||
location.type = 'file';
|
||||
location.details = { supportsVersioning: true };
|
||||
break;
|
||||
case 'location-azure-v1':
|
||||
location.type = 'azure';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
location.details = {
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
azureStorageEndpoint: l.details.endpoint,
|
||||
azureStorageAccountName: l.details.accessKey,
|
||||
azureStorageAccessKey: decryptSecret(
|
||||
creds,
|
||||
l.details.secretKey
|
||||
),
|
||||
azureContainerName: l.details.bucketName,
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-ceph-radosgw-s3-v1':
|
||||
case 'location-scality-ring-s3-v1':
|
||||
pathStyle = true; // fallthrough
|
||||
case 'location-aws-s3-v1':
|
||||
case 'location-wasabi-v1':
|
||||
supportsVersioning = true; // fallthrough
|
||||
case 'location-do-spaces-v1':
|
||||
location.type = 'aws_s3';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
let https = true;
|
||||
let awsEndpoint =
|
||||
l.details.endpoint || 's3.amazonaws.com';
|
||||
if (awsEndpoint.includes('://')) {
|
||||
const url = new URL(awsEndpoint);
|
||||
awsEndpoint = url.host;
|
||||
https = url.protocol.includes('https');
|
||||
}
|
||||
|
||||
location.details = {
|
||||
credentials: {
|
||||
accessKey: l.details.accessKey,
|
||||
secretKey: decryptSecret(
|
||||
creds,
|
||||
l.details.secretKey
|
||||
),
|
||||
},
|
||||
bucketName: l.details.bucketName,
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
serverSideEncryption: Boolean(
|
||||
l.details.serverSideEncryption
|
||||
),
|
||||
region: l.details.region,
|
||||
awsEndpoint,
|
||||
supportsVersioning,
|
||||
pathStyle,
|
||||
https,
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-gcp-v1':
|
||||
location.type = 'gcp';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
location.details = {
|
||||
credentials: {
|
||||
accessKey: l.details.accessKey,
|
||||
secretKey: decryptSecret(
|
||||
creds,
|
||||
l.details.secretKey
|
||||
),
|
||||
},
|
||||
bucketName: l.details.bucketName,
|
||||
mpuBucketName: l.details.mpuBucketName,
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
gcpEndpoint:
|
||||
l.details.endpoint || 'storage.googleapis.com',
|
||||
https: true,
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-scality-sproxyd-v1':
|
||||
location.type = 'scality';
|
||||
if (
|
||||
l.details &&
|
||||
l.details.bootstrapList &&
|
||||
l.details.proxyPath
|
||||
) {
|
||||
location.details = {
|
||||
supportsVersioning: true,
|
||||
connector: {
|
||||
sproxyd: {
|
||||
chordCos: l.details.chordCos || null,
|
||||
bootstrap: l.details.bootstrapList,
|
||||
path: l.details.proxyPath,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-nfs-mount-v1':
|
||||
location.type = 'pfs';
|
||||
if (l.details) {
|
||||
location.details = {
|
||||
supportsVersioning: true,
|
||||
bucketMatch: true,
|
||||
pfsDaemonEndpoint: {
|
||||
host: `${l.name}-cosmos-pfsd`,
|
||||
port: 80,
|
||||
},
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-scality-hdclient-v2':
|
||||
location.type = 'scality';
|
||||
if (l.details && l.details.bootstrapList) {
|
||||
location.details = {
|
||||
supportsVersioning: true,
|
||||
connector: {
|
||||
hdclient: {
|
||||
bootstrap: l.details.bootstrapList,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
break;
|
||||
default:
|
||||
log.info('unknown location type', {
|
||||
locationType: l.locationType,
|
||||
});
|
||||
return acc;
|
||||
}
|
||||
return { ...acc, [location.name]: location };
|
||||
},
|
||||
{}
|
||||
);
|
||||
}
|
|
@ -38,6 +38,10 @@
|
|||
"type": "string",
|
||||
"pattern": "^arn:aws:iam::[0-9]{12}:saml-provider/[\\w._-]{1,128}$"
|
||||
},
|
||||
"principalFederatedOidcIdp": {
|
||||
"type": "string",
|
||||
"pattern": "^(?:http(s)?:\/\/)?[\\w.-]+(?:\\.[\\w\\.-]+)+[\\w\\-\\._~:/?#[\\]@!\\$&'\\(\\)\\*\\+,;=.]+$"
|
||||
},
|
||||
"principalAWSItem": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -98,6 +102,9 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/definitions/principalFederatedSamlIdp"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/principalFederatedOidcIdp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -15,11 +15,36 @@ import {
|
|||
actionMapScuba,
|
||||
} from './utils/actionMaps';
|
||||
|
||||
const _actionNeedQuotaCheck = {
|
||||
export const actionNeedQuotaCheck = {
|
||||
objectPut: true,
|
||||
objectPutVersion: true,
|
||||
objectPutPart: true,
|
||||
objectRestore: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* This variable describes APIs that change the bytes
|
||||
* stored, requiring quota updates
|
||||
*/
|
||||
export const actionWithDataDeletion = {
|
||||
objectDelete: true,
|
||||
objectDeleteVersion: true,
|
||||
multipartDelete: true,
|
||||
multiObjectDelete: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* The function returns true if the current API call is a copy object
|
||||
* and the action requires a quota evaluation logic, post retrieval
|
||||
* of the object metadata.
|
||||
* @param {string} action - the action being performed
|
||||
* @param {string} currentApi - the current API being called
|
||||
* @return {boolean} - whether the action requires a quota check
|
||||
*/
|
||||
export function actionNeedQuotaCheckCopy(action: string, currentApi: string) {
|
||||
return action === 'objectGet' && (currentApi === 'objectCopy' || currentApi === 'objectPutCopyPart');
|
||||
}
|
||||
|
||||
function _findAction(service: string, method: string) {
|
||||
switch (service) {
|
||||
case 's3':
|
||||
|
@ -126,7 +151,9 @@ export type RequesterInfo = {
|
|||
principalType: string;
|
||||
principaltype: string;
|
||||
userid: string;
|
||||
username: string,
|
||||
username: string;
|
||||
keycloakGroup: string;
|
||||
keycloakRole: string;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -229,7 +256,8 @@ export default class RequestContext {
|
|||
this._securityToken = securityToken;
|
||||
this._policyArn = policyArn;
|
||||
this._action = action;
|
||||
this._needQuota = _actionNeedQuotaCheck[apiMethod] === true;
|
||||
this._needQuota = actionNeedQuotaCheck[apiMethod] === true
|
||||
|| actionWithDataDeletion[apiMethod] === true;
|
||||
this._requestObjTags = requestObjTags || null;
|
||||
this._existingObjTag = existingObjTag || null;
|
||||
this._needTagEval = needTagEval || false;
|
||||
|
|
|
@ -33,6 +33,7 @@ const sharedActionMap = {
|
|||
bypassGovernanceRetention: 's3:BypassGovernanceRetention',
|
||||
listMultipartUploads: 's3:ListBucketMultipartUploads',
|
||||
listParts: 's3:ListMultipartUploadParts',
|
||||
metadataSearch: 's3:MetadataSearch',
|
||||
multipartDelete: 's3:AbortMultipartUpload',
|
||||
objectDelete: 's3:DeleteObject',
|
||||
objectDeleteTagging: 's3:DeleteObjectTagging',
|
||||
|
@ -47,6 +48,14 @@ const sharedActionMap = {
|
|||
objectPutLegalHold: 's3:PutObjectLegalHold',
|
||||
objectPutRetention: 's3:PutObjectRetention',
|
||||
objectPutTagging: 's3:PutObjectTagging',
|
||||
objectRestore: 's3:RestoreObject',
|
||||
objectPutVersion: 's3:PutObjectVersion',
|
||||
};
|
||||
|
||||
const actionMapBucketQuotas = {
|
||||
bucketGetQuota: 'scality:GetBucketQuota',
|
||||
bucketUpdateQuota: 'scality:UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'scality:DeleteBucketQuota',
|
||||
};
|
||||
|
||||
// action map used for request context
|
||||
|
@ -62,6 +71,7 @@ const actionMapRQ = {
|
|||
initiateMultipartUpload: 's3:PutObject',
|
||||
objectDeleteVersion: 's3:DeleteObjectVersion',
|
||||
objectDeleteTaggingVersion: 's3:DeleteObjectVersionTagging',
|
||||
objectGetArchiveInfo: 'scality:GetObjectArchiveInfo',
|
||||
objectGetVersion: 's3:GetObjectVersion',
|
||||
objectGetACLVersion: 's3:GetObjectVersionAcl',
|
||||
objectGetTaggingVersion: 's3:GetObjectVersionTagging',
|
||||
|
@ -70,10 +80,13 @@ const actionMapRQ = {
|
|||
objectPutTaggingVersion: 's3:PutObjectVersionTagging',
|
||||
serviceGet: 's3:ListAllMyBuckets',
|
||||
objectReplicate: 's3:ReplicateObject',
|
||||
objectPutRetentionVersion: 's3:PutObjectVersionRetention',
|
||||
objectPutLegalHoldVersion: 's3:PutObjectVersionLegalHold',
|
||||
objectGetRetentionVersion: 's3:GetObjectRetention',
|
||||
objectPutRetentionVersion: 's3:PutObjectRetention',
|
||||
objectGetLegalHoldVersion: 's3:GetObjectLegalHold',
|
||||
objectPutLegalHoldVersion: 's3:PutObjectLegalHold',
|
||||
listObjectVersions: 's3:ListBucketVersions',
|
||||
...sharedActionMap,
|
||||
...actionMapBucketQuotas,
|
||||
};
|
||||
|
||||
// action map used for bucket policies
|
||||
|
@ -125,6 +138,7 @@ const actionMonitoringMapS3 = {
|
|||
initiateMultipartUpload: 'CreateMultipartUpload',
|
||||
listMultipartUploads: 'ListMultipartUploads',
|
||||
listParts: 'ListParts',
|
||||
metadataSearch: 'MetadataSearch',
|
||||
multiObjectDelete: 'DeleteObjects',
|
||||
multipartDelete: 'AbortMultipartUpload',
|
||||
objectCopy: 'CopyObject',
|
||||
|
@ -143,7 +157,17 @@ const actionMonitoringMapS3 = {
|
|||
objectPutPart: 'UploadPart',
|
||||
objectPutRetention: 'PutObjectRetention',
|
||||
objectPutTagging: 'PutObjectTagging',
|
||||
objectRestore: 'RestoreObject',
|
||||
serviceGet: 'ListBuckets',
|
||||
bucketGetQuota: 'GetBucketQuota',
|
||||
bucketUpdateQuota: 'UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'DeleteBucketQuota',
|
||||
};
|
||||
|
||||
const actionMapAccountQuotas = {
|
||||
UpdateAccountQuota : 'scality:UpdateAccountQuota',
|
||||
DeleteAccountQuota : 'scality:DeleteAccountQuota',
|
||||
GetAccountQuota : 'scality:GetAccountQuota',
|
||||
};
|
||||
|
||||
const actionMapIAM = {
|
||||
|
@ -168,6 +192,7 @@ const actionMapIAM = {
|
|||
getPolicyVersion: 'iam:GetPolicyVersion',
|
||||
getUser: 'iam:GetUser',
|
||||
listAccessKeys: 'iam:ListAccessKeys',
|
||||
listEntitiesForPolicy: 'iam:ListEntitiesForPolicy',
|
||||
listGroupPolicies: 'iam:ListGroupPolicies',
|
||||
listGroups: 'iam:ListGroups',
|
||||
listGroupsForUser: 'iam:ListGroupsForUser',
|
||||
|
@ -186,6 +211,7 @@ const actionMapIAM = {
|
|||
tagUser: 'iam:TagUser',
|
||||
unTagUser: 'iam:UntagUser',
|
||||
listUserTags: 'iam:ListUserTags',
|
||||
...actionMapAccountQuotas,
|
||||
};
|
||||
|
||||
const actionMapSSO = {
|
||||
|
|
|
@ -142,6 +142,8 @@ export function findConditionKey(
|
|||
// header
|
||||
case 's3:ObjLocationConstraint': return headers['x-amz-meta-scal-location-constraint'];
|
||||
case 'sts:ExternalId': return requestContext.getRequesterExternalId();
|
||||
case 'keycloak:groups': return requesterInfo.keycloakGroup;
|
||||
case 'keycloak:roles': return requesterInfo.keycloakRole;
|
||||
case 'iam:PolicyArn': return requestContext.getPolicyArn();
|
||||
// s3:ExistingObjectTag - Used to check that existing object tag has
|
||||
// specific tag key and value. Extraction of correct tag key is done in CloudServer.
|
||||
|
|
|
@ -30,7 +30,7 @@ export default class ResultsCollector extends EventEmitter {
|
|||
* @emits ResultCollector#done
|
||||
* @emits ResultCollector#error
|
||||
*/
|
||||
pushResult(err: Error | undefined, subPartIndex: number) {
|
||||
pushResult(err: Error | null | undefined, subPartIndex: number) {
|
||||
this._results.push({
|
||||
error: err,
|
||||
subPartIndex,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import assert from 'assert';
|
||||
import * as crypto from 'crypto';
|
||||
import * as stream from 'stream';
|
||||
import azure from '@azure/storage-blob';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
|
@ -8,7 +9,7 @@ import ResultsCollector from './ResultsCollector';
|
|||
import SubStreamInterface from './SubStreamInterface';
|
||||
import * as objectUtils from '../objectUtils';
|
||||
import MD5Sum from '../MD5Sum';
|
||||
import errors from '../../errors';
|
||||
import errors, { ArsenalError } from '../../errors';
|
||||
|
||||
export const splitter = '|';
|
||||
export const overviewMpuKey = 'azure_mpu';
|
||||
|
@ -64,7 +65,7 @@ export const getBlockId = (
|
|||
const paddedSubPart = padString(subPartIndex, 'subPart');
|
||||
const blockId = `${uploadId}${splitter}partNumber${paddedPartNumber}` +
|
||||
`${splitter}subPart${paddedSubPart}${splitter}`;
|
||||
return padString(blockId, 'part');
|
||||
return Buffer.from(padString(blockId, 'part')).toString('base64');
|
||||
};
|
||||
|
||||
export const getSummaryPartId = (partNumber: number, eTag: string, size: number) => {
|
||||
|
@ -103,10 +104,17 @@ export const getSubPartIds = (
|
|||
) => [...Array(part.numberSubParts).keys()].map(subPartIndex =>
|
||||
getBlockId(uploadId, part.partNumber, subPartIndex));
|
||||
|
||||
// TODO Better type this
|
||||
type ErrorWrapperFn = (
|
||||
s3Method: string,
|
||||
azureMethod: string,
|
||||
command: (client: azure.ContainerClient) => Promise<any>,
|
||||
log: RequestLogger,
|
||||
cb: (err: ArsenalError | null | undefined) => void,
|
||||
) => void
|
||||
|
||||
export const putSinglePart = (
|
||||
errorWrapperFn: (first: string, second: string, third: any, log: any, cb: any) => void,
|
||||
request: any,
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
request: stream.Readable,
|
||||
params: {
|
||||
bucketName: string;
|
||||
partNumber: number;
|
||||
|
@ -117,44 +125,44 @@ export const putSinglePart = (
|
|||
},
|
||||
dataStoreName: string,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string, size?: number) => void,
|
||||
) => {
|
||||
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
|
||||
= params;
|
||||
const blockId = getBlockId(uploadId, partNumber, 0);
|
||||
const passThrough = new stream.PassThrough();
|
||||
const options = contentMD5
|
||||
? { useTransactionalMD5: true, transactionalContentMD5: contentMD5 }
|
||||
? { transactionalContentMD5: objectUtils.getMD5Buffer(contentMD5) }
|
||||
: {};
|
||||
request.pipe(passThrough);
|
||||
return errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[blockId, bucketName, objectKey, passThrough, size, options,
|
||||
(err: any | null, result: any) => {
|
||||
if (err) {
|
||||
return errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
|
||||
try {
|
||||
const result = await client.getBlockBlobClient(objectKey)
|
||||
.stageBlock(blockId, () => passThrough, size, options);
|
||||
const md5 = result.contentMD5 || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return eTag
|
||||
} catch (err: any) {
|
||||
log.error('Error from Azure data backend uploadPart',
|
||||
{ error: err.message, dataStoreName });
|
||||
if (err.code === 'ContainerNotFound') {
|
||||
return cb(errors.NoSuchBucket);
|
||||
throw errors.NoSuchBucket;
|
||||
}
|
||||
if (err.code === 'InvalidMd5') {
|
||||
return cb(errors.InvalidDigest);
|
||||
throw errors.InvalidDigest;
|
||||
}
|
||||
if (err.code === 'Md5Mismatch') {
|
||||
return cb(errors.BadDigest);
|
||||
throw errors.BadDigest;
|
||||
}
|
||||
return cb(errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`),
|
||||
throw errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`
|
||||
);
|
||||
}
|
||||
const md5 = result.headers['content-md5'] || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return cb(null, eTag, size);
|
||||
}], log, cb);
|
||||
}, log, cb);
|
||||
};
|
||||
|
||||
// TODO type this
|
||||
export const putNextSubPart = (
|
||||
errorWrapperFn: any,
|
||||
const putNextSubPart = (
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
partParams: {
|
||||
uploadId: string;
|
||||
partNumber: number;
|
||||
|
@ -162,11 +170,10 @@ export const putNextSubPart = (
|
|||
objectKey: string;
|
||||
},
|
||||
subPartInfo: { lastPartIndex: number; lastPartSize: number },
|
||||
subPartStream: any,
|
||||
subPartStream: stream.Readable,
|
||||
subPartIndex: number,
|
||||
resultsCollector: ResultsCollector,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
) => {
|
||||
const { uploadId, partNumber, bucketName, objectKey } = partParams;
|
||||
const subPartSize = getSubPartSize(
|
||||
|
@ -174,14 +181,20 @@ export const putNextSubPart = (
|
|||
const subPartId = getBlockId(uploadId, partNumber,
|
||||
subPartIndex);
|
||||
resultsCollector.pushOp();
|
||||
errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[subPartId, bucketName, objectKey, subPartStream, subPartSize,
|
||||
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
|
||||
errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
|
||||
try {
|
||||
const result = await client.getBlockBlobClient(objectKey)
|
||||
.stageBlock(subPartId, () => subPartStream, subPartSize, {});
|
||||
resultsCollector.pushResult(null, subPartIndex);
|
||||
} catch (err: any) {
|
||||
resultsCollector.pushResult(err, subPartIndex);
|
||||
}
|
||||
}, log, () => {});
|
||||
};
|
||||
|
||||
export const putSubParts = (
|
||||
errorWrapperFn: any,
|
||||
request: any,
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
request: stream.Readable,
|
||||
params: {
|
||||
uploadId: string;
|
||||
partNumber: number;
|
||||
|
@ -191,7 +204,7 @@ export const putSubParts = (
|
|||
},
|
||||
dataStoreName: string,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string) => void,
|
||||
) => {
|
||||
const subPartInfo = getSubPartInfo(params.size);
|
||||
const resultsCollector = new ResultsCollector();
|
||||
|
@ -230,14 +243,13 @@ export const putSubParts = (
|
|||
const totalLength = streamInterface.getTotalBytesStreamed();
|
||||
log.trace('successfully put subparts to Azure',
|
||||
{ numberSubParts, totalLength });
|
||||
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash,
|
||||
totalLength));
|
||||
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash));
|
||||
|
||||
// in case the hashed event was already emitted before the
|
||||
// event handler was registered:
|
||||
if (hashedStream.completedHash) {
|
||||
hashedStream.removeAllListeners('hashed');
|
||||
return cb(null, hashedStream.completedHash, totalLength);
|
||||
return cb(null, hashedStream.completedHash);
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
@ -245,7 +257,7 @@ export const putSubParts = (
|
|||
const currentStream = streamInterface.getCurrentStream();
|
||||
// start first put to Azure before we start streaming the data
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo,
|
||||
currentStream, 0, resultsCollector, log, cb);
|
||||
currentStream, 0, resultsCollector, log);
|
||||
|
||||
request.pipe(hashedStream);
|
||||
hashedStream.on('end', () => {
|
||||
|
@ -265,8 +277,8 @@ export const putSubParts = (
|
|||
}
|
||||
const { nextStream, subPartIndex } =
|
||||
streamInterface.transitionToNextStream();
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo,
|
||||
nextStream, subPartIndex, resultsCollector, log, cb);
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo, nextStream,
|
||||
subPartIndex, resultsCollector, log);
|
||||
streamInterface.write(firstChunk);
|
||||
} else {
|
||||
streamInterface.write(data);
|
||||
|
|
|
@ -1,19 +1,25 @@
|
|||
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
import { scaleMsPerDay } from '../objectUtils';
|
||||
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
|
||||
export default class LifecycleDateTime {
|
||||
_transitionOneDayEarlier?: boolean;
|
||||
_expireOneDayEarlier?: boolean;
|
||||
_timeProgressionFactor?: number;
|
||||
_scaledMsPerDay: number;
|
||||
|
||||
constructor(params?: {
|
||||
transitionOneDayEarlier: boolean;
|
||||
expireOneDayEarlier: boolean;
|
||||
timeProgressionFactor: number;
|
||||
}) {
|
||||
this._transitionOneDayEarlier = params?.transitionOneDayEarlier;
|
||||
this._expireOneDayEarlier = params?.expireOneDayEarlier;
|
||||
this._timeProgressionFactor = params?.timeProgressionFactor || 1;
|
||||
this._scaledMsPerDay = scaleMsPerDay(this._timeProgressionFactor);
|
||||
}
|
||||
|
||||
getCurrentDate() {
|
||||
const timeTravel = this._expireOneDayEarlier ? oneDay : 0;
|
||||
const timeTravel = this._expireOneDayEarlier ? msInOneDay : 0;
|
||||
return Date.now() + timeTravel;
|
||||
}
|
||||
|
||||
|
@ -25,7 +31,7 @@ export default class LifecycleDateTime {
|
|||
findDaysSince(date: Date) {
|
||||
const now = this.getCurrentDate();
|
||||
const diff = now - date.getTime();
|
||||
return Math.floor(diff / (1000 * 60 * 60 * 24));
|
||||
return Math.floor(diff / this._scaledMsPerDay);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,8 +58,25 @@ export default class LifecycleDateTime {
|
|||
}
|
||||
if (transition.Days !== undefined) {
|
||||
const lastModifiedTime = this.getTimestamp(lastModified);
|
||||
const timeTravel = this._transitionOneDayEarlier ? -oneDay : 0;
|
||||
return lastModifiedTime + (transition.Days * oneDay) + timeTravel;
|
||||
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
|
||||
return lastModifiedTime + (transition.Days * this._scaledMsPerDay) + timeTravel;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the Unix time at which the non-current version transition should occur.
|
||||
* @param transition - A non-current version transition from the lifecycle non-current version transitions
|
||||
* @param lastModified - The object's last modified date
|
||||
* @return - The normalized transition timestamp
|
||||
*/
|
||||
getNCVTransitionTimestamp(
|
||||
transition: { NoncurrentDays?: number },
|
||||
lastModified: string,
|
||||
) {
|
||||
if (transition.NoncurrentDays !== undefined) {
|
||||
const lastModifiedTime = this.getTimestamp(lastModified);
|
||||
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
|
||||
return lastModifiedTime + (transition.NoncurrentDays * this._scaledMsPerDay) + timeTravel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,47 @@ export default class LifecycleUtils {
|
|||
return trans1 > trans2 ? transition1 : transition2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two non-current version transition rules and return the one that is most recent.
|
||||
* @param params - The function parameters
|
||||
* @param params.transition1 - A non-current version transition from the current rule
|
||||
* @param params.transition2 - A non-current version transition from the previous rule
|
||||
* @param params.lastModified - The object's last modified
|
||||
* date
|
||||
* @return The most applicable transition rule
|
||||
*/
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1: any;
|
||||
transition2?: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1?: any;
|
||||
transition2: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1: any;
|
||||
transition2: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1?: any;
|
||||
transition2?: any;
|
||||
}) {
|
||||
const { transition1, transition2, lastModified } = params;
|
||||
if (transition1 === undefined) {
|
||||
return transition2;
|
||||
}
|
||||
if (transition2 === undefined) {
|
||||
return transition1;
|
||||
}
|
||||
const trans1 = this._datetime.getNCVTransitionTimestamp(transition1!, lastModified)!;
|
||||
const trans2 = this._datetime.getNCVTransitionTimestamp(transition2!, lastModified)!;
|
||||
return trans1 > trans2 ? transition1 : transition2;
|
||||
}
|
||||
|
||||
// TODO Fix This
|
||||
/**
|
||||
* Find the most relevant trantition rule for the given transitions array
|
||||
|
@ -98,6 +139,42 @@ export default class LifecycleUtils {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the most relevant non-current version transition rule for the given transitions array
|
||||
* and any previously stored non-current version transition from another rule.
|
||||
* @param params - The function parameters
|
||||
* @param params.transitions - Array of lifecycle non-current version transitions
|
||||
* @param params.lastModified - The object's last modified
|
||||
* date
|
||||
* @return The most applicable non-current version transition rule
|
||||
*/
|
||||
getApplicableNCVTransition(params: {
|
||||
store: any;
|
||||
currentDate: Date;
|
||||
transitions: any[];
|
||||
lastModified: string;
|
||||
}) {
|
||||
const { transitions, store, lastModified, currentDate } = params;
|
||||
const transition = transitions.reduce((result, transition) => {
|
||||
const isApplicable = // Is the transition time in the past?
|
||||
this._datetime.getTimestamp(currentDate) >=
|
||||
this._datetime.getNCVTransitionTimestamp(transition, lastModified)!;
|
||||
if (!isApplicable) {
|
||||
return result;
|
||||
}
|
||||
return this.compareNCVTransitions({
|
||||
transition1: transition,
|
||||
transition2: result,
|
||||
lastModified,
|
||||
});
|
||||
}, undefined);
|
||||
return this.compareNCVTransitions({
|
||||
transition1: transition,
|
||||
transition2: store.NoncurrentVersionTransition,
|
||||
lastModified,
|
||||
});
|
||||
}
|
||||
|
||||
// TODO
|
||||
/**
|
||||
* Filter out all rules based on `Status` and `Filter` (Prefix and Tags)
|
||||
|
@ -241,7 +318,17 @@ export default class LifecycleUtils {
|
|||
currentDate,
|
||||
});
|
||||
}
|
||||
// TODO: Add support for NoncurrentVersionTransitions.
|
||||
|
||||
const ncvt = 'NoncurrentVersionTransitions';
|
||||
const hasNoncurrentVersionTransitions = Array.isArray(rule[ncvt]) && rule[ncvt].length > 0;
|
||||
if (hasNoncurrentVersionTransitions && this._supportedRules.includes('noncurrentVersionTransition')) {
|
||||
store.NoncurrentVersionTransition = this.getApplicableNCVTransition({
|
||||
transitions: rule.NoncurrentVersionTransitions,
|
||||
lastModified: metadata.LastModified,
|
||||
store,
|
||||
currentDate,
|
||||
});
|
||||
}
|
||||
return store;
|
||||
}, {});
|
||||
// Do not transition to a location where the object is already stored.
|
||||
|
@ -249,6 +336,12 @@ export default class LifecycleUtils {
|
|||
&& applicableRules.Transition.StorageClass === metadata.StorageClass) {
|
||||
applicableRules.Transition = undefined;
|
||||
}
|
||||
|
||||
if (applicableRules.NoncurrentVersionTransition
|
||||
&& applicableRules.NoncurrentVersionTransition.StorageClass === metadata.StorageClass) {
|
||||
applicableRules.NoncurrentVersionTransition = undefined;
|
||||
}
|
||||
|
||||
return applicableRules;
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
import {parseStringPromise} from 'xml2js';
|
||||
import errors, {ArsenalError} from '../errors';
|
||||
import * as werelogs from 'werelogs';
|
||||
import {validRestoreObjectTiers} from "../constants";
|
||||
|
||||
/*
|
||||
Format of xml request:
|
||||
<RestoreRequest>
|
||||
<Days>integer</Days>
|
||||
<Tier>Standard|Bulk|Expedited</Tier>
|
||||
</RestoreRequest>
|
||||
*/
|
||||
|
||||
/**
|
||||
* validate restore request xml
|
||||
* @param restoreRequest - parsed restore request object
|
||||
* @return{ArsenalError|undefined} - error on failure, undefined on success
|
||||
*/
|
||||
function validateRestoreRequest(restoreRequest?: any) {
|
||||
if (!restoreRequest) {
|
||||
const desc = 'request xml does not contain RestoreRequest';
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
if (!restoreRequest.Days || !restoreRequest.Days[0]) {
|
||||
const desc = 'request xml does not contain RestoreRequest.Days';
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
// RestoreRequest.Days must be greater than or equal to 1
|
||||
const daysValue = Number.parseInt(restoreRequest.Days[0], 10);
|
||||
if (Number.isNaN(daysValue)) {
|
||||
const desc = `RestoreRequest.Days is invalid type. [${restoreRequest.Days[0]}]`;
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
if (daysValue < 1) {
|
||||
const desc = `RestoreRequest.Days must be greater than 0. [${restoreRequest.Days[0]}]`;
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
if (daysValue > 2147483647) {
|
||||
const desc = `RestoreRequest.Days must be less than 2147483648. [${restoreRequest.Days[0]}]`;
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
if (restoreRequest.Tier && restoreRequest.Tier[0] && !validRestoreObjectTiers.has(restoreRequest.Tier[0])) {
|
||||
const desc = `RestoreRequest.Tier is invalid value. [${restoreRequest.Tier[0]}]`;
|
||||
return errors.MalformedXML.customizeDescription(desc);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* parseRestoreRequestXml - Parse and validate xml body, returning callback with
|
||||
* object restoreReqObj: { days: <value>, tier: <value> }
|
||||
* @param xml - xml body to parse and validate
|
||||
* @param log - Werelogs logger
|
||||
* @param cb - callback to server
|
||||
* @return - calls callback with object restore request or error
|
||||
*/
|
||||
export async function parseRestoreRequestXml(
|
||||
xml: string,
|
||||
log: werelogs.Logger,
|
||||
cb: (err: ArsenalError | null, data?: any) => void,
|
||||
) {
|
||||
let result;
|
||||
try {
|
||||
result = await parseStringPromise(xml);
|
||||
} catch (err) {
|
||||
log.debug('xml parsing failed', {
|
||||
error: err,
|
||||
method: 'parseRestoreXml',
|
||||
xml,
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
if (!result) {
|
||||
const desc = 'request xml is undefined or empty';
|
||||
return cb(errors.MalformedXML.customizeDescription(desc));
|
||||
}
|
||||
const restoreRequest = result.RestoreRequest;
|
||||
const restoreReqError = validateRestoreRequest(restoreRequest);
|
||||
if (restoreReqError) {
|
||||
log.debug('restore request validation failed', {
|
||||
error: restoreReqError,
|
||||
method: 'validateRestoreRequest',
|
||||
xml,
|
||||
});
|
||||
return cb(restoreReqError);
|
||||
}
|
||||
// If do not specify Tier, set "Standard"
|
||||
return cb(null, {
|
||||
days: Number.parseInt(restoreRequest.Days, 10),
|
||||
tier: restoreRequest.Tier && restoreRequest.Tier[0] ? restoreRequest.Tier[0] : 'Standard',
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* convertToXml - Convert restore request info object to xml
|
||||
* @param days - restore days
|
||||
* @param tier - restore tier
|
||||
* @return - returns restore request information xml string
|
||||
*/
|
||||
export function convertToXml(days: string, tier: string) {
|
||||
if (!(days && tier)) {
|
||||
return '';
|
||||
}
|
||||
return [
|
||||
'<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
`<Days>${days}</Days>`,
|
||||
`<Tier>${tier}</Tier>`,
|
||||
'</RestoreRequest>',
|
||||
].join('');
|
||||
}
|
|
@ -1,5 +1,21 @@
|
|||
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string>) =>
|
||||
Buffer.from(base64MD5, 'base64').toString('hex');
|
||||
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
|
||||
export const getMD5Buffer = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
|
||||
base64MD5 instanceof Uint8Array ? base64MD5 : Buffer.from(base64MD5, 'base64')
|
||||
|
||||
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
|
||||
getMD5Buffer(base64MD5).toString('hex');
|
||||
|
||||
export const getBase64MD5 = (hexMD5: WithImplicitCoercion<string>) =>
|
||||
Buffer.from(hexMD5, 'hex').toString('base64');
|
||||
|
||||
|
||||
/**
|
||||
* Calculates the number of scaled milliseconds per day based on the given time progression factor.
|
||||
* This function is intended for testing and simulation purposes only.
|
||||
* @param {number} timeProgressionFactor - The desired time progression factor for scaling.
|
||||
* @returns {number} The number of scaled milliseconds per day.
|
||||
* If the result is 0, the minimum value of 1 millisecond is returned.
|
||||
*/
|
||||
export const scaleMsPerDay = (timeProgressionFactor: number): number =>
|
||||
Math.round(msInOneDay / (timeProgressionFactor || 1)) || 1;
|
||||
|
|
|
@ -3,6 +3,11 @@ import * as werelogs from 'werelogs';
|
|||
import errors, { ArsenalError } from '../errors';
|
||||
import escapeForXml from './escapeForXml';
|
||||
|
||||
export interface BucketTag {
|
||||
Key: string;
|
||||
Value: string;
|
||||
};
|
||||
|
||||
const errorInvalidArgument = () => errors.InvalidArgument
|
||||
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
|
||||
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
|
||||
|
@ -32,6 +37,15 @@ export const _validator = {
|
|||
&& tag.Key[0] !== undefined && tag.Value[0] !== undefined
|
||||
&& typeof tag.Key[0] === 'string' && typeof tag.Value[0] === 'string',
|
||||
|
||||
// Allowed characters are letters, whitespace, and numbers, plus
|
||||
// the following special characters: + - = . _ : /
|
||||
// Maximum key length: 128 Unicode characters
|
||||
// Maximum value length: 256 Unicode characters
|
||||
validateTagObjectStructure: (tag: BucketTag) => tag
|
||||
&& Object.keys(tag).length === 2
|
||||
&& typeof tag.Key === 'string' && typeof tag.Value === 'string'
|
||||
&& tag.Key.length >= 1 && tag.Value.length >= 1,
|
||||
|
||||
validateXMLStructure: (result: any) =>
|
||||
result && Object.keys(result).length === 1 &&
|
||||
result.Tagging &&
|
||||
|
@ -100,12 +114,47 @@ function _validateTags(tags: Array<{ Key: string[], Value: string[] }>) {
|
|||
}
|
||||
// not repeating keys
|
||||
if (tags.length > Object.keys(tagsResult).length) {
|
||||
return errors.InvalidTag.customizeDescription('Cannot provide ' +
|
||||
'multiple Tags with the same key');
|
||||
return errors.InvalidTag.customizeDescription(
|
||||
'Cannot provide multiple Tags with the same key'
|
||||
);
|
||||
}
|
||||
return tagsResult;
|
||||
}
|
||||
|
||||
/** areTagsValid - Validate bucket tags
|
||||
* @param tags - tags parsed from xml to be validated
|
||||
* @return result - true if the tags are valide, false otherwise
|
||||
*/
|
||||
export function areTagsValid(tags: Array<BucketTag>) {
|
||||
if (tags.length === 0) {
|
||||
return true;
|
||||
}
|
||||
// Maximum number of tags per resource: 50
|
||||
if (tags.length > 50) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const tagsResult = {};
|
||||
for (const tag of tags) {
|
||||
if (!_validator.validateTagObjectStructure(tag)) {
|
||||
return false;
|
||||
}
|
||||
const { Key: key, Value: value } = tag;
|
||||
|
||||
const result = _validator.validateKeyValue(key, value);
|
||||
if (result instanceof Error) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tagsResult[key] = value;
|
||||
}
|
||||
// not repeating keys
|
||||
if (tags.length > Object.keys(tagsResult).length) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** parseTagXml - Parse and validate xml body, returning callback with object
|
||||
* tags : { key: value}
|
||||
* @param xml - xml body to parse and validate
|
||||
|
|
|
@ -77,6 +77,34 @@ export function _checkUnmodifiedSince(
|
|||
return { present: false, error: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
|
||||
* request against last-modified date of object
|
||||
* @param headers - headers from request object
|
||||
* @param lastModified - last modified date of object
|
||||
* @return contains modifiedSince and unmodifiedSince res objects
|
||||
*/
|
||||
export function checkDateModifiedHeaders(
|
||||
headers: http.IncomingHttpHeaders,
|
||||
lastModified: string,
|
||||
) {
|
||||
const lastModifiedDate = new Date(lastModified);
|
||||
lastModifiedDate.setMilliseconds(0);
|
||||
const millis = lastModifiedDate.getTime();
|
||||
|
||||
const ifModifiedSinceHeader = headers['if-modified-since'] ||
|
||||
headers['x-amz-copy-source-if-modified-since'];
|
||||
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
|
||||
headers['x-amz-copy-source-if-unmodified-since'];
|
||||
|
||||
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader?.toString(),
|
||||
millis);
|
||||
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader?.toString(),
|
||||
millis);
|
||||
|
||||
return { modifiedSinceRes, unmodifiedSinceRes };
|
||||
}
|
||||
|
||||
/**
|
||||
* validateConditionalHeaders - validates 'if-modified-since',
|
||||
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
|
||||
|
@ -92,21 +120,14 @@ export function validateConditionalHeaders(
|
|||
lastModified: string,
|
||||
contentMD5: string,
|
||||
): {} | { present: boolean; error: ArsenalError } {
|
||||
const lastModifiedDate = new Date(lastModified);
|
||||
lastModifiedDate.setMilliseconds(0);
|
||||
const millis = lastModifiedDate.getTime();
|
||||
const ifMatchHeader = headers['if-match'] ||
|
||||
headers['x-amz-copy-source-if-match'];
|
||||
const ifNoneMatchHeader = headers['if-none-match'] ||
|
||||
headers['x-amz-copy-source-if-none-match'];
|
||||
const ifModifiedSinceHeader = headers['if-modified-since'] ||
|
||||
headers['x-amz-copy-source-if-modified-since'];
|
||||
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
|
||||
headers['x-amz-copy-source-if-unmodified-since'];
|
||||
const etagMatchRes = _checkEtagMatch(ifMatchHeader?.toString(), contentMD5);
|
||||
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader?.toString(), contentMD5);
|
||||
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader?.toString(), millis);
|
||||
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader?.toString(), millis);
|
||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
||||
checkDateModifiedHeaders(headers, lastModified);
|
||||
// If-Unmodified-Since condition evaluates to false and If-Match
|
||||
// is not present, then return the error. Otherwise, If-Unmodified-Since is
|
||||
// silent when If-Match match, and when If-Match does not match, it's the
|
||||
|
|
|
@ -13,7 +13,7 @@ import * as routesUtils from './routesUtils';
|
|||
import routeWebsite from './routes/routeWebsite';
|
||||
import * as http from 'http';
|
||||
import StatsClient from '../metrics/StatsClient';
|
||||
|
||||
import { objectKeyByteLimit } from '../constants';
|
||||
import * as requestUtils from '../../lib/policyEvaluator/requestUtils';
|
||||
|
||||
const routeMap = {
|
||||
|
@ -67,8 +67,14 @@ function checkBucketAndKey(
|
|||
blacklistedPrefixes.object);
|
||||
if (!result.isValid) {
|
||||
log.debug('invalid object key', { objectKey });
|
||||
return errors.InvalidArgument.customizeDescription('Object key ' +
|
||||
`must not start with "${result.invalidPrefix}".`);
|
||||
if (result.invalidPrefix) {
|
||||
return errors.InvalidArgument.customizeDescription('Invalid ' +
|
||||
'prefix - object key cannot start with ' +
|
||||
`"${result.invalidPrefix}".`);
|
||||
}
|
||||
return errors.KeyTooLong.customizeDescription('Object key is too ' +
|
||||
'long. Maximum number of bytes allowed in keys is ' +
|
||||
`${objectKeyByteLimit}.`);
|
||||
}
|
||||
}
|
||||
if ((reqQuery.partNumber || reqQuery.uploadId)
|
||||
|
@ -213,7 +219,8 @@ export default function routes(
|
|||
// @ts-ignore
|
||||
logger.newRequestLogger());
|
||||
|
||||
if (!req.url!.startsWith('/_/healthcheck')) {
|
||||
if (!req.url!.startsWith('/_/healthcheck') &&
|
||||
!req.url!.startsWith('/_/report')) {
|
||||
log.info('received request', clientInfo);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ export default function routeDELETE(
|
|||
return call('bucketDeleteEncryption');
|
||||
} else if (query?.tagging !== undefined) {
|
||||
return call('bucketDeleteTagging');
|
||||
} else if (query?.quota !== undefined) {
|
||||
return call('bucketDeleteQuota');
|
||||
}
|
||||
call('bucketDelete');
|
||||
} else {
|
||||
|
|
|
@ -58,6 +58,10 @@ export default function routerGET(
|
|||
call('bucketGetNotification');
|
||||
} else if (query.encryption !== undefined) {
|
||||
call('bucketGetEncryption');
|
||||
} else if (query.search !== undefined) {
|
||||
call('metadataSearch')
|
||||
} else if (query.quota !== undefined) {
|
||||
call('bucketGetQuota');
|
||||
} else {
|
||||
// GET bucket
|
||||
call('bucketGet');
|
||||
|
|
|
@ -58,6 +58,14 @@ export default function routePOST(
|
|||
corsHeaders));
|
||||
}
|
||||
|
||||
// POST Object restore
|
||||
if (query.restore !== undefined) {
|
||||
return api.callApiMethod('objectRestore', request, response,
|
||||
log, (err, statusCode, resHeaders) =>
|
||||
routesUtils.responseNoBody(err, resHeaders, response,
|
||||
statusCode, log));
|
||||
}
|
||||
|
||||
return routesUtils.responseNoBody(errors.NotImplemented, null, response,
|
||||
200, log);
|
||||
}
|
||||
|
|
|
@ -105,6 +105,13 @@ export default function routePUT(
|
|||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
} else if (query.quota !== undefined) {
|
||||
api.callApiMethod('bucketUpdateQuota', request, response,
|
||||
log, (err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders, response,
|
||||
200, log);
|
||||
});
|
||||
} else {
|
||||
// PUT bucket
|
||||
return api.callApiMethod('bucketPut', request, response, log,
|
||||
|
|
|
@ -11,7 +11,7 @@ export default function routerWebsite(
|
|||
api: { callApiMethod: routesUtils.CallApiMethod },
|
||||
log: RequestLogger,
|
||||
statsClient?: StatsClient,
|
||||
dataRetrievalFn?: any,
|
||||
dataRetrievalParams?: any,
|
||||
) {
|
||||
const { bucketName, query } = request as any
|
||||
log.debug('routing request', { method: 'routerWebsite' });
|
||||
|
@ -31,7 +31,7 @@ export default function routerWebsite(
|
|||
if (redirectInfo) {
|
||||
if (err && redirectInfo.withError) {
|
||||
return routesUtils.redirectRequestOnError(err,
|
||||
'GET', redirectInfo, dataGetInfo, dataRetrievalFn,
|
||||
'GET', redirectInfo, dataGetInfo, dataRetrievalParams,
|
||||
response, resMetaHeaders, log)
|
||||
}
|
||||
// note that key might have been modified in websiteGet
|
||||
|
@ -45,7 +45,7 @@ export default function routerWebsite(
|
|||
// user has their own error page
|
||||
if (err && dataGetInfo) {
|
||||
return routesUtils.streamUserErrorPage(err, dataGetInfo,
|
||||
dataRetrievalFn, response, resMetaHeaders, log);
|
||||
dataRetrievalParams, response, resMetaHeaders, log);
|
||||
}
|
||||
// send default error html response
|
||||
if (err) {
|
||||
|
@ -55,7 +55,7 @@ export default function routerWebsite(
|
|||
}
|
||||
// no error, stream data
|
||||
return routesUtils.responseStreamData(null, query,
|
||||
resMetaHeaders, dataGetInfo, dataRetrievalFn, response,
|
||||
resMetaHeaders, dataGetInfo, dataRetrievalParams, response,
|
||||
undefined, log);
|
||||
});
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ export default function routerWebsite(
|
|||
if (redirectInfo) {
|
||||
if (err && redirectInfo.withError) {
|
||||
return routesUtils.redirectRequestOnError(err,
|
||||
'HEAD', redirectInfo, null, dataRetrievalFn,
|
||||
'HEAD', redirectInfo, null, dataRetrievalParams,
|
||||
response, resMetaHeaders, log)
|
||||
}
|
||||
return routesUtils.redirectRequest(redirectInfo,
|
||||
|
|
|
@ -9,6 +9,8 @@ import errors, { ArsenalError } from '../errors';
|
|||
import * as constants from '../constants';
|
||||
import DataWrapper from '../storage/data/DataWrapper';
|
||||
import StatsClient from '../metrics/StatsClient';
|
||||
import { objectKeyByteLimit } from '../constants';
|
||||
const jsutil = require('../jsutil');
|
||||
|
||||
export type CallApiMethod = (
|
||||
methodName: string,
|
||||
|
@ -147,6 +149,15 @@ const XMLResponseBackend = {
|
|||
'<Error>',
|
||||
`<Code>${errCode.message}</Code>`,
|
||||
`<Message>${errCode.description}</Message>`,
|
||||
);
|
||||
const invalidArguments = errCode.metadata.get('invalidArguments') || [];
|
||||
invalidArguments.forEach((invalidArgument, index) => {
|
||||
const counter = index + 1;
|
||||
const { ArgumentName, ArgumentValue } = invalidArgument as any;
|
||||
xml.push(`<ArgumentName${counter}>${ArgumentName}</ArgumentName${counter}>`);
|
||||
xml.push(`<ArgumentValue${counter}>${ArgumentValue}</ArgumentValue${counter}>`);
|
||||
});
|
||||
xml.push(
|
||||
'<Resource></Resource>',
|
||||
`<RequestId>${log.getSerializedUids()}</RequestId>`,
|
||||
'</Error>',
|
||||
|
@ -216,9 +227,18 @@ const JSONResponseBackend = {
|
|||
"requestId": "4442587FB7D0A2F9"
|
||||
}
|
||||
*/
|
||||
const invalidArguments = errCode.metadata.get('invalidArguments') || [];
|
||||
const invalids = invalidArguments.reduce((acc, invalidArgument, index) => {
|
||||
const counter = index + 1;
|
||||
const { ArgumentName, ArgumentValue } = invalidArgument as any;
|
||||
const name = `ArgumentName${counter}`;
|
||||
const value = `ArgumentValue${counter}`;
|
||||
return { ...acc, [name]: ArgumentName, [value]: ArgumentValue };
|
||||
}, {});
|
||||
const data = JSON.stringify({
|
||||
code: errCode.message,
|
||||
message: errCode.description,
|
||||
...invalids,
|
||||
resource: null,
|
||||
requestId: log.getSerializedUids(),
|
||||
});
|
||||
|
@ -365,12 +385,18 @@ function retrieveData(
|
|||
response.destroy();
|
||||
responseDestroyed = true;
|
||||
};
|
||||
|
||||
const _destroyReadable = (readable: http.IncomingMessage | null) => {
|
||||
// s3-data sends Readable stream only which does not implement destroy
|
||||
if (readable && readable.destroy) {
|
||||
readable.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
// the S3-client might close the connection while we are processing it
|
||||
response.once('close', () => {
|
||||
responseDestroyed = true;
|
||||
if (currentStream) {
|
||||
currentStream.destroy();
|
||||
}
|
||||
_destroyReadable(currentStream);
|
||||
});
|
||||
|
||||
const {
|
||||
|
@ -387,6 +413,7 @@ function retrieveData(
|
|||
return eachSeries(locations,
|
||||
(current, next) => data.get(current, response, log,
|
||||
(err: any, readable: http.IncomingMessage) => {
|
||||
const cbOnce = jsutil.once(next);
|
||||
// NB: readable is of IncomingMessage type
|
||||
if (err) {
|
||||
log.error('failed to get object', {
|
||||
|
@ -394,7 +421,7 @@ function retrieveData(
|
|||
method: 'retrieveData',
|
||||
});
|
||||
_destroyResponse();
|
||||
return next(err);
|
||||
return cbOnce(err);
|
||||
}
|
||||
// response.isclosed is set by the S3 server. Might happen if
|
||||
// the S3-client closes the connection before the first request
|
||||
|
@ -403,24 +430,24 @@ function retrieveData(
|
|||
if (responseDestroyed || response.isclosed) {
|
||||
log.debug(
|
||||
'response destroyed before readable could stream');
|
||||
readable.destroy();
|
||||
_destroyReadable(readable);
|
||||
const responseErr = new Error();
|
||||
// @ts-ignore
|
||||
responseErr.code = 'ResponseError';
|
||||
responseErr.message = 'response closed by client request before all data sent';
|
||||
return next(responseErr);
|
||||
return cbOnce(responseErr);
|
||||
}
|
||||
// readable stream successfully consumed
|
||||
readable.on('end', () => {
|
||||
currentStream = null;
|
||||
log.debug('readable stream end reached');
|
||||
return next();
|
||||
return cbOnce();
|
||||
});
|
||||
// errors on server side with readable stream
|
||||
readable.on('error', err => {
|
||||
log.error('error piping data from source');
|
||||
_destroyResponse();
|
||||
return next(err);
|
||||
return cbOnce(err);
|
||||
});
|
||||
currentStream = readable;
|
||||
return readable.pipe(response, { end: false });
|
||||
|
@ -1128,6 +1155,9 @@ export function isValidObjectKey(objectKey: string, prefixBlacklist: string[]) {
|
|||
if (invalidPrefix) {
|
||||
return { isValid: false, invalidPrefix };
|
||||
}
|
||||
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
|
||||
return { isValid: false };
|
||||
}
|
||||
return { isValid: true };
|
||||
}
|
||||
|
||||
|
|
|
@ -989,13 +989,14 @@ class DataWrapper {
|
|||
return this.client.delete(objectGetInfo, log.getSerializedUids(),
|
||||
err => {
|
||||
if (err) {
|
||||
if (err.is.ObjNotFound) {
|
||||
// TODO: sproxydclient and hdclient does not return standard Arsenal error yet.
|
||||
if (err.code === 404) {
|
||||
log.info('no such key in datastore', {
|
||||
objectGetInfo,
|
||||
implName: this.implName,
|
||||
moreRetries: 'no',
|
||||
});
|
||||
return cb(err);
|
||||
return cb(errors.ObjNotFound);
|
||||
}
|
||||
log.error('delete error from datastore', {
|
||||
error: err,
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
const https = require('https');
|
||||
const http = require('http');
|
||||
const { http, https } = require('httpagent');
|
||||
const url = require('url');
|
||||
const AWS = require('aws-sdk');
|
||||
const Sproxy = require('sproxydclient');
|
||||
const Hyperdrive = require('hdclient');
|
||||
const HttpsProxyAgent = require('https-proxy-agent');
|
||||
|
||||
require("aws-sdk/lib/maintenance_mode_message").suppress = true;
|
||||
|
||||
const constants = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const inMemory = require('./in_memory/datastore').backend;
|
||||
|
@ -26,8 +25,13 @@ function parseLC(config, vault) {
|
|||
if (locationObj.type === 'file') {
|
||||
clients[location] = new DataFileBackend(config);
|
||||
}
|
||||
if (locationObj.type === 'vitastor') {
|
||||
const VitastorBackend = require('./vitastor/VitastorBackend');
|
||||
clients[location] = new VitastorBackend(location, locationObj.details);
|
||||
}
|
||||
if (locationObj.type === 'scality') {
|
||||
if (locationObj.details.connector.sproxyd) {
|
||||
const Sproxy = require('sproxydclient');
|
||||
clients[location] = new Sproxy({
|
||||
bootstrap: locationObj.details.connector
|
||||
.sproxyd.bootstrap,
|
||||
|
@ -42,6 +46,7 @@ function parseLC(config, vault) {
|
|||
});
|
||||
clients[location].clientType = 'scality';
|
||||
} else if (locationObj.details.connector.hdclient) {
|
||||
const Hyperdrive = require('hdclient');
|
||||
clients[location] = new Hyperdrive.hdcontroller.HDProxydClient(
|
||||
locationObj.details.connector.hdclient);
|
||||
clients[location].clientType = 'scality';
|
||||
|
@ -77,8 +82,8 @@ function parseLC(config, vault) {
|
|||
connectionAgent = new HttpsProxyAgent(options);
|
||||
} else {
|
||||
connectionAgent = sslEnabled ?
|
||||
new https.Agent(httpAgentConfig) :
|
||||
new http.Agent(httpAgentConfig);
|
||||
new https.Agent(httpAgentConfig, { maxSockets: false }) :
|
||||
new http.Agent(httpAgentConfig, { maxSockets: false });
|
||||
}
|
||||
const httpOptions = { agent: connectionAgent, timeout: 0 };
|
||||
const s3Params = {
|
||||
|
|
|
@ -5,6 +5,7 @@ const { parseTagFromQuery } = require('../../s3middleware/tagging');
|
|||
const { externalBackendHealthCheckInterval } = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const { createLogger, checkExternalBackend } = require('./external/utils');
|
||||
const jsutil = require('../../jsutil');
|
||||
|
||||
class MultipleBackendGateway {
|
||||
constructor(clients, metadata, locStorageCheckFn) {
|
||||
|
@ -199,11 +200,12 @@ class MultipleBackendGateway {
|
|||
uploadPart(request, streamingV4Params, stream, size, location, key,
|
||||
uploadId, partNumber, bucketName, log, cb) {
|
||||
const client = this.clients[location];
|
||||
const cbOnce = jsutil.once(cb);
|
||||
|
||||
if (client.uploadPart) {
|
||||
return this.locStorageCheckFn(location, size, log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
}
|
||||
return client.uploadPart(request, streamingV4Params, stream,
|
||||
size, key, uploadId, partNumber, bucketName, log,
|
||||
|
@ -217,14 +219,14 @@ class MultipleBackendGateway {
|
|||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
});
|
||||
}
|
||||
return cb(null, partInfo);
|
||||
return cbOnce(null, partInfo);
|
||||
});
|
||||
});
|
||||
}
|
||||
return cb();
|
||||
return cbOnce();
|
||||
}
|
||||
|
||||
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||
|
|
|
@ -8,6 +8,7 @@ const getMetaHeaders =
|
|||
const { prepareStream } = require('../../../s3middleware/prepareStream');
|
||||
const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
|
||||
require('./utils');
|
||||
const jsutil = require('../../../jsutil');
|
||||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
|
@ -317,9 +318,11 @@ class AwsClient {
|
|||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
const cbOnce = jsutil.once(callback);
|
||||
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
this._vault, log, callback);
|
||||
this._vault, log, cbOnce);
|
||||
hashedStream = new MD5Sum();
|
||||
partStream.pipe(hashedStream);
|
||||
}
|
||||
|
@ -333,7 +336,7 @@ class AwsClient {
|
|||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
return cbOnce(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
|
@ -347,7 +350,7 @@ class AwsClient {
|
|||
dataStoreName: this._dataStoreName,
|
||||
dataStoreETag: noQuotesETag,
|
||||
};
|
||||
return callback(null, dataRetrievalInfo);
|
||||
return cbOnce(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
const url = require('url');
|
||||
|
||||
const azure = require('azure-storage');
|
||||
const { BlobServiceClient, StorageSharedKeyCredential, AnonymousCredential } = require('@azure/storage-blob');
|
||||
const { ClientSecretCredential } = require('@azure/identity');
|
||||
const errors = require('../../../errors').default;
|
||||
const azureMpuUtils = require('../../../s3middleware/azureHelpers/mpuUtils');
|
||||
const { validateAndFilterMpuParts } =
|
||||
|
@ -8,55 +7,103 @@ const { validateAndFilterMpuParts } =
|
|||
|
||||
const { createLogger, logHelper, translateAzureMetaHeaders } =
|
||||
require('./utils');
|
||||
const objectUtils = require('../../../s3middleware/objectUtils');
|
||||
|
||||
const constants = require('../../../constants');
|
||||
const packageVersion = require('../../../../package.json').version;
|
||||
|
||||
azure.Constants.USER_AGENT_PRODUCT_NAME = constants.productName;
|
||||
azure.Constants.USER_AGENT_PRODUCT_VERSION = packageVersion;
|
||||
|
||||
class AzureClient {
|
||||
static addQueryParams(endpoint, token) {
|
||||
const url = new URL(endpoint);
|
||||
const query = token.startsWith('?') ? token.slice(1) : token;
|
||||
if (!url.search) {
|
||||
url.search = `?${query}`;
|
||||
} else if (url.search === '?') {
|
||||
url.search += query;
|
||||
} else {
|
||||
url.search += `&${query}`;
|
||||
}
|
||||
return url.toString();
|
||||
}
|
||||
|
||||
constructor(config) {
|
||||
this._azureStorageEndpoint = config.azureStorageEndpoint;
|
||||
this._azureStorageCredentials = config.azureStorageCredentials;
|
||||
this._azureContainerName = config.azureContainerName;
|
||||
this._client = azure.createBlobService(
|
||||
this._azureStorageCredentials.storageAccountName,
|
||||
this._azureStorageCredentials.storageAccessKey,
|
||||
this._azureStorageEndpoint);
|
||||
this._client.enableGlobalHttpAgent = true;
|
||||
const cred = (credentialsConfig => {
|
||||
switch (credentialsConfig.authMethod) {
|
||||
case 'client-secret':
|
||||
return new ClientSecretCredential(
|
||||
credentialsConfig.tenantId,
|
||||
credentialsConfig.clientId,
|
||||
credentialsConfig.clientKey,
|
||||
);
|
||||
|
||||
case 'shared-access-signature':
|
||||
this._azureStorageEndpoint = AzureClient.addQueryParams(
|
||||
this._azureStorageEndpoint, credentialsConfig.sasToken);
|
||||
return new AnonymousCredential();
|
||||
|
||||
case 'shared-key':
|
||||
default:
|
||||
return new StorageSharedKeyCredential(
|
||||
credentialsConfig.storageAccountName,
|
||||
credentialsConfig.storageAccessKey,
|
||||
);
|
||||
}
|
||||
})(this._azureStorageCredentials);
|
||||
const proxyOptions = (() => {
|
||||
if (!config.proxy || !config.proxy.url) {
|
||||
return undefined;
|
||||
}
|
||||
// NOTE: config.proxy.certs is not supported
|
||||
const parsedUrl = new URL(config.proxy.url);
|
||||
return {
|
||||
host: parsedUrl.host,
|
||||
port: parsedUrl.port || 80,
|
||||
username: parsedUrl.username || undefined,
|
||||
password: parsedUrl.password || undefined,
|
||||
};
|
||||
})();
|
||||
this._client = new BlobServiceClient(this._azureStorageEndpoint, cred, {
|
||||
keepAliveOptions: {
|
||||
enable: false, // Enable use of global HTTP agent
|
||||
},
|
||||
proxyOptions,
|
||||
userAgentOptions: {
|
||||
userAgentPrefix: `${constants.productName}/${packageVersion} `,
|
||||
},
|
||||
}).getContainerClient(this._azureContainerName);
|
||||
this._dataStoreName = config.dataStoreName;
|
||||
this._bucketMatch = config.bucketMatch;
|
||||
if (config.proxy && config.proxy.url) {
|
||||
const parsedUrl = url.parse(config.proxy.url);
|
||||
if (!parsedUrl.port) {
|
||||
parsedUrl.port = 80;
|
||||
}
|
||||
const proxyParams = parsedUrl;
|
||||
if (config.proxy.certs) {
|
||||
Object.assign(proxyParams, config.proxy.certs);
|
||||
}
|
||||
this._client.setProxy(proxyParams);
|
||||
}
|
||||
}
|
||||
|
||||
_errorWrapper(s3Method, azureMethod, args, log, cb) {
|
||||
/**
|
||||
* Run azure method call.
|
||||
* @param {string} [s3Method] S3 method name
|
||||
* @param {string} [azureMethod] Azure method name
|
||||
* @param {ErrorWrapper~Command} [command] Actual command to run
|
||||
* @param {RequestLogger} [log] Logger
|
||||
* @param {ErrorWrapper~Cb} [cb] The final callback
|
||||
* @returns {void}
|
||||
*
|
||||
* @callback ErrorWrapper~Command
|
||||
* @param {azure.ContainerClient} [client] Azure client to use
|
||||
* @returns {Promise<any>}
|
||||
*
|
||||
* @callback ErrorWrapper~Cb
|
||||
* @param {azure.ArsenalError} [arsenalErr] Error returned by the command
|
||||
* @param {any} [result] Result of Azure SDK command
|
||||
* @returns {void}
|
||||
*/
|
||||
_errorWrapper(s3Method, azureMethod, command, log, cb) {
|
||||
if (log) {
|
||||
log.info(`calling azure ${azureMethod}`);
|
||||
}
|
||||
try {
|
||||
this._client[azureMethod].apply(this._client, args);
|
||||
} catch (err) {
|
||||
const error = errors.ServiceUnavailable;
|
||||
if (log) {
|
||||
log.error('error thrown by Azure Storage Client Library',
|
||||
{ error: err.message, stack: err.stack, s3Method,
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
}
|
||||
cb(error.customizeDescription('Error from Azure ' +
|
||||
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
||||
`${err.message}`));
|
||||
log.info(`calling azure ${azureMethod} in ${s3Method}`);
|
||||
}
|
||||
command(this._client).then(
|
||||
result => cb(null, result),
|
||||
cb,
|
||||
);
|
||||
}
|
||||
|
||||
_createAzureKey(requestBucketName, requestObjectKey,
|
||||
|
@ -119,6 +166,32 @@ class AzureClient {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build Azure HTTP headers for content settings
|
||||
* @param {object} [properties] The blob properties to set.
|
||||
* @param {string} [properties.contentType] The MIME content type of the blob.
|
||||
* The default type is application/octet-stream.
|
||||
* @param {string} [properties.contentEncoding] The content encodings that have been applied
|
||||
* to the blob.
|
||||
* @param {string} [properties.contentLanguage] The natural languages used by this resource.
|
||||
* @param {string} [properties.cacheControl] The blob's cache control.
|
||||
* @param {string} [properties.contentDisposition] The blob's content disposition.
|
||||
* @param {string} [properties.contentMD5] The blob's MD5 hash.
|
||||
* @returns {BlobHTTPHeaders} The headers
|
||||
*/
|
||||
_getAzureContentSettingsHeaders(properties) {
|
||||
return {
|
||||
blobContentMD5: properties.contentMD5
|
||||
? objectUtils.getMD5Buffer(properties.contentMD5)
|
||||
: undefined,
|
||||
blobContentType: properties.contentType || undefined,
|
||||
blobCacheControl: properties.cacheControl || undefined,
|
||||
blobContentDisposition: properties.contentDisposition || undefined,
|
||||
blobContentEncoding: properties.contentEncoding || undefined,
|
||||
blobContentLanguage: properties.blobContentLanguage || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
put(stream, size, keyContext, reqUids, callback, skey, metadata) {
|
||||
const log = createLogger(reqUids);
|
||||
// before blob is put, make sure there is no ongoing MPU with same key
|
||||
|
@ -134,50 +207,59 @@ class AzureClient {
|
|||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
undefined,
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
blobHTTPHeaders: this._getAzureContentSettingsHeaders(
|
||||
keyContext || {}),
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
return this._errorWrapper('put', 'uploadData', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).upload('', 0, options);
|
||||
return azureKey;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).upload(() => stream, size, options);
|
||||
return azureKey;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Build BlobRequestConditions from azureStreamingOptions
|
||||
* @param {object} [objectGetInfoOptions] Azure streaming options
|
||||
* @param {object} [objectGetInfoOptions.accessConditions] Access conditions
|
||||
* @param {Date} [objectGetInfoOptions.accessConditions.DateUnModifiedSince] Filter objects not
|
||||
* modified since that date.
|
||||
* @returns {BlobRequestConditions} Request conditions
|
||||
*/
|
||||
_getAzureConditions(objectGetInfoOptions) {
|
||||
const accessConditions = objectGetInfoOptions.accessConditions || {};
|
||||
return {
|
||||
ifUnmodifiedSince: accessConditions.DateUnModifiedSince || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
head(objectGetInfo, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
const { key, azureStreamingOptions } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties',
|
||||
[this._azureContainerName, key, azureStreamingOptions,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
const { key } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties', async client => {
|
||||
try {
|
||||
const data = await client.getBlockBlobClient(key).getProperties();
|
||||
return data;
|
||||
} catch (err) {
|
||||
let logLevel;
|
||||
let retError;
|
||||
if (err.code === 'NotFound') {
|
||||
|
@ -185,42 +267,46 @@ class AzureClient {
|
|||
retError = errors.LocationNotFound;
|
||||
} else {
|
||||
logLevel = 'error';
|
||||
retError = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
retError = errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
logHelper(log, logLevel, 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(retError);
|
||||
throw retError;
|
||||
}
|
||||
return callback(null, data);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
get(objectGetInfo, range, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
// for backwards compatibility
|
||||
const { key, response, azureStreamingOptions } = objectGetInfo;
|
||||
let streamingOptions;
|
||||
let rangeStart = 0;
|
||||
let rangeEnd = undefined;
|
||||
if (azureStreamingOptions) {
|
||||
// option coming from api.get()
|
||||
streamingOptions = azureStreamingOptions;
|
||||
rangeStart = (typeof azureStreamingOptions.rangeStart === 'string')
|
||||
? parseInt(azureStreamingOptions.rangeStart, 10)
|
||||
: azureStreamingOptions.rangeStart;
|
||||
rangeEnd = (typeof azureStreamingOptions.rangeEnd === 'string')
|
||||
? parseInt(azureStreamingOptions.rangeEnd, 10)
|
||||
: azureStreamingOptions.rangeEnd;
|
||||
} else if (range) {
|
||||
// option coming from multipleBackend.upload()
|
||||
const rangeStart = (typeof range[0] === 'number') ? range[0].toString() : undefined;
|
||||
const rangeEnd = range[1] ? range[1].toString() : undefined;
|
||||
streamingOptions = { rangeStart, rangeEnd };
|
||||
rangeStart = (typeof range[0] === 'number') ? range[0] : 0;
|
||||
rangeEnd = range[1] || undefined;
|
||||
}
|
||||
this._errorWrapper('get', 'getBlobToStream',
|
||||
[this._azureContainerName, key, response, streamingOptions,
|
||||
err => {
|
||||
if (err) {
|
||||
this._errorWrapper('get', 'getBlobToStream', async client => {
|
||||
try {
|
||||
const rsp = await client.getBlockBlobClient(key)
|
||||
.download(rangeStart, rangeEnd - rangeStart + 1 || undefined);
|
||||
rsp.readableStreamBody.pipe(response);
|
||||
return response;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
delete(objectGetInfo, reqUids, callback) {
|
||||
|
@ -230,44 +316,46 @@ class AzureClient {
|
|||
objectGetInfo.key;
|
||||
let options;
|
||||
if (typeof objectGetInfo === 'object') {
|
||||
options = objectGetInfo.options;
|
||||
options = {
|
||||
conditions: this._getAzureConditions(objectGetInfo.options || {}),
|
||||
};
|
||||
}
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
||||
[this._azureContainerName, key, options,
|
||||
err => {
|
||||
if (err && err.statusCode === 412) {
|
||||
return callback(errors.PreconditionFailed);
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(key).deleteIfExists(options);
|
||||
} catch (err) {
|
||||
if (err.statusCode === 412) {
|
||||
throw errors.PreconditionFailed;
|
||||
}
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
'Azure datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
logHelper(log, 'error', 'error deleting object from Azure datastore',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
healthcheck(location, callback, flightCheckOnStartUp) {
|
||||
const azureResp = {};
|
||||
const healthCheckAction = flightCheckOnStartUp ?
|
||||
'createContainerIfNotExists' : 'doesContainerExist';
|
||||
this._errorWrapper('checkAzureHealth', healthCheckAction,
|
||||
[this._azureContainerName, err => {
|
||||
/* eslint-disable no-param-reassign */
|
||||
if (err) {
|
||||
azureResp[location] = { error: err.message,
|
||||
external: true };
|
||||
return callback(null, azureResp);
|
||||
this._errorWrapper('healthcheck', 'checkAzureHealth', async client => {
|
||||
try {
|
||||
if (flightCheckOnStartUp) {
|
||||
await client.createIfNotExists();
|
||||
} else {
|
||||
await client.exists();
|
||||
}
|
||||
azureResp[location] = {
|
||||
message:
|
||||
'Congrats! You can access the Azure storage account',
|
||||
message: 'Congrats! You can access the Azure storage account',
|
||||
};
|
||||
return callback(null, azureResp);
|
||||
}], null, callback);
|
||||
} catch (err) {
|
||||
azureResp[location] = {
|
||||
error: err.message,
|
||||
external: true,
|
||||
};
|
||||
}
|
||||
return azureResp;
|
||||
}, null, callback);
|
||||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
||||
|
@ -321,9 +409,7 @@ class AzureClient {
|
|||
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
||||
contentSettings, tagging, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const commitList = {
|
||||
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
||||
};
|
||||
const commitList = jsonList.uncommittedBlocks || [];
|
||||
let filteredPartsObj;
|
||||
if (!jsonList.uncommittedBlocks) {
|
||||
const { storedParts, mpuOverviewKey, splitter } = mdInfo;
|
||||
|
@ -336,60 +422,56 @@ class AzureClient {
|
|||
// part.locations is always array of 1, which contains data info
|
||||
const subPartIds =
|
||||
azureMpuUtils.getSubPartIds(part.locations[0], uploadId);
|
||||
commitList.UncommittedBlocks.push(...subPartIds);
|
||||
commitList.push(...subPartIds);
|
||||
});
|
||||
}
|
||||
const options = {
|
||||
contentSettings,
|
||||
blobHTTPHeaders: this._getAzureContentSettingsHeaders(contentSettings || {}),
|
||||
metadata: translateAzureMetaHeaders(metaHeaders || {}, tagging),
|
||||
};
|
||||
return this._errorWrapper('completeMPU', 'commitBlocks',
|
||||
[this._azureContainerName, azureKey, commitList, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
'datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
const completeObjData = {
|
||||
return this._errorWrapper('completeMPU', 'commitBlocks', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).commitBlockList(commitList, options);
|
||||
return {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure datastore',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
azureMD.tags = JSON.stringify(objectMD.tags);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to Azure backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
objectDeleteTagging(key, bucketName, objectMD, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucketName, key, this._bucketMatch);
|
||||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to Azure backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
|
@ -406,54 +488,50 @@ class AzureClient {
|
|||
|
||||
let options;
|
||||
if (storeMetadataParams.metaHeaders) {
|
||||
options = { metadata:
|
||||
translateAzureMetaHeaders(storeMetadataParams.metaHeaders) };
|
||||
options = {
|
||||
metadata: translateAzureMetaHeaders(storeMetadataParams.metaHeaders),
|
||||
};
|
||||
}
|
||||
|
||||
this._errorWrapper('copyObject', 'startCopyBlob',
|
||||
[`${this._azureStorageEndpoint}` +
|
||||
`${sourceContainerName}/${sourceKey}`,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') {
|
||||
logHelper(log, 'error', 'Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`),
|
||||
// TODO: should we use syncCopyBlob() instead? or use poller.pollUntilDone() to wait until complete?
|
||||
this._errorWrapper('copyObject', 'startCopyBlob', async client => {
|
||||
let res;
|
||||
try {
|
||||
const poller = await client.getBlockBlobClient(destAzureKey).beginCopyFromURL(
|
||||
`${this._azureStorageEndpoint}${sourceContainerName}/${sourceKey}`,
|
||||
options,
|
||||
);
|
||||
|
||||
res = poller.getOperationState().result;
|
||||
if (res.copyProgress !== 'pending') {
|
||||
return destAzureKey;
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (res.copy.status === 'pending') {
|
||||
logHelper(log, 'error', 'Azure copy status is pending',
|
||||
} catch (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') { // TOOD: may use a constant (or type) from SDK ??
|
||||
logHelper(log, 'error',
|
||||
`Unable to access ${sourceContainerName} Azure Container`,
|
||||
err, this._dataStoreName);
|
||||
const copyId = res.copy.id;
|
||||
this._client.abortCopyBlob(this._azureContainerName,
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
'on abortCopyBlob', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`),
|
||||
);
|
||||
throw errors.AccessDenied.customizeDescription(
|
||||
`Error: Unable to access ${sourceContainerName} Azure Container`);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully'),
|
||||
);
|
||||
});
|
||||
logHelper(log, 'error', 'error from data backend on copyObject',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from AWS: ${err.message}`);
|
||||
}
|
||||
return callback(null, destAzureKey);
|
||||
}], log, callback);
|
||||
|
||||
logHelper(log, 'error', 'Azure copy status is pending', {}, this._dataStoreName);
|
||||
try {
|
||||
await client.getBlockBlobClient(destAzureKey).abortCopyFromURL(res.copyId);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'error from data backend on abortCopyBlob',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from AWS on abortCopyBlob: ${err.message}`);
|
||||
}
|
||||
throw errors.InvalidObjectState.customizeDescription(
|
||||
'Error: Azure copy status was pending. It has been aborted successfully');
|
||||
}, log, callback);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,696 @@
|
|||
// Zenko CloudServer Vitastor data storage backend adapter
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
|
||||
const stream = require('stream');
|
||||
|
||||
const vitastor = require('vitastor');
|
||||
|
||||
const VOLUME_MAGIC = 'VstS3Vol';
|
||||
const OBJECT_MAGIC = 'VstS3Obj';
|
||||
const FLAG_DELETED = 2n;
|
||||
|
||||
type Volume = {
|
||||
id: number,
|
||||
partial_sectors: {
|
||||
[key: string]: {
|
||||
buffer: Buffer,
|
||||
refs: number,
|
||||
},
|
||||
},
|
||||
header: {
|
||||
location: string,
|
||||
bucket: string,
|
||||
max_size: number,
|
||||
create_ts: number,
|
||||
used_ts: number,
|
||||
size: number,
|
||||
objects: number,
|
||||
removed_objects: number,
|
||||
object_bytes: number,
|
||||
removed_bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
type ObjectHeader = {
|
||||
size: number,
|
||||
key: string,
|
||||
part_num?: number,
|
||||
};
|
||||
|
||||
class VitastorBackend
|
||||
{
|
||||
locationName: string;
|
||||
config: {
|
||||
pool_id: number,
|
||||
metadata_image: string,
|
||||
metadata_pool_id: number,
|
||||
metadata_inode_num: number,
|
||||
size_buckets: number[],
|
||||
size_bucket_mul: number,
|
||||
id_batch_size: number,
|
||||
sector_size: number,
|
||||
write_chunk_size: number,
|
||||
read_chunk_size: number,
|
||||
pack_objects: boolean,
|
||||
// and also other parameters for vitastor itself
|
||||
};
|
||||
next_id: number;
|
||||
alloc_id: number;
|
||||
opened: boolean;
|
||||
on_open: ((...args: any[]) => void)[] | null;
|
||||
open_error: Error | null;
|
||||
cli: any;
|
||||
kv: any;
|
||||
volumes: {
|
||||
[bucket: string]: {
|
||||
[max_size: string]: Volume,
|
||||
},
|
||||
};
|
||||
volumes_by_id: {
|
||||
[id: string]: Volume,
|
||||
};
|
||||
volume_delete_stats: {
|
||||
[id: string]: {
|
||||
count: number,
|
||||
bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
constructor(locationName, config)
|
||||
{
|
||||
this.locationName = locationName;
|
||||
this.config = config;
|
||||
// validate config
|
||||
this.config.pool_id = Number(this.config.pool_id) || 0;
|
||||
if (!this.config.pool_id)
|
||||
throw new Error('pool_id is required for Vitastor');
|
||||
if (!this.config.metadata_image && (!this.config.metadata_pool_id || !this.config.metadata_inode_num))
|
||||
throw new Error('metadata_image or metadata_inode is required for Vitastor');
|
||||
if (!this.config.size_buckets || !this.config.size_buckets.length)
|
||||
this.config.size_buckets = [ 32*1024, 128*1024, 512*1024, 2*1024, 8*1024 ];
|
||||
this.config.size_bucket_mul = Number(this.config.size_bucket_mul) || 2;
|
||||
this.config.id_batch_size = Number(this.config.id_batch_size) || 100;
|
||||
this.config.sector_size = Number(this.config.sector_size) || 0;
|
||||
if (this.config.sector_size < 4096)
|
||||
this.config.sector_size = 4096;
|
||||
this.config.write_chunk_size = Number(this.config.write_chunk_size) || 0;
|
||||
if (this.config.write_chunk_size < this.config.sector_size)
|
||||
this.config.write_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.read_chunk_size = Number(this.config.read_chunk_size) || 0;
|
||||
if (this.config.read_chunk_size < this.config.sector_size)
|
||||
this.config.read_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.pack_objects = !!this.config.pack_objects;
|
||||
// state
|
||||
this.next_id = 1;
|
||||
this.alloc_id = 0;
|
||||
this.opened = false;
|
||||
this.on_open = null;
|
||||
this.open_error = null;
|
||||
this.cli = new vitastor.Client(config);
|
||||
this.kv = new vitastor.KV(this.cli);
|
||||
// we group objects into volumes by bucket and size
|
||||
this.volumes = {};
|
||||
this.volumes_by_id = {};
|
||||
this.volume_delete_stats = {};
|
||||
}
|
||||
|
||||
async _makeVolumeId()
|
||||
{
|
||||
if (this.next_id <= this.alloc_id)
|
||||
{
|
||||
return this.next_id++;
|
||||
}
|
||||
const id_key = 'id'+this.config.pool_id;
|
||||
const [ err, prev ] = await new Promise<[ any, string ]>(ok => this.kv.get(id_key, (err, value) => ok([ err, value ])));
|
||||
if (err && err != vitastor.ENOENT)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
const new_id = (parseInt(prev) || 0) + 1;
|
||||
this.next_id = new_id;
|
||||
this.alloc_id = this.next_id + this.config.id_batch_size - 1;
|
||||
await new Promise((ok, no) => this.kv.set(id_key, this.alloc_id, err => (err ? no(new Error(err)) : ok(null)), cas_old => cas_old === prev));
|
||||
return this.next_id;
|
||||
}
|
||||
|
||||
async _getVolume(bucketName, size)
|
||||
{
|
||||
if (!this.opened)
|
||||
{
|
||||
if (this.on_open)
|
||||
{
|
||||
await new Promise(ok => this.on_open!.push(ok));
|
||||
}
|
||||
else
|
||||
{
|
||||
this.on_open = [];
|
||||
if (this.config.metadata_image)
|
||||
{
|
||||
const img = new vitastor.Image(this.cli, this.config.metadata_image);
|
||||
const info = await new Promise<{ pool_id: number, inode_num: number }>(ok => img.get_info(ok));
|
||||
this.config.metadata_pool_id = info.pool_id;
|
||||
this.config.metadata_inode_num = info.inode_num;
|
||||
}
|
||||
const kv_config = {};
|
||||
for (const key in this.config)
|
||||
{
|
||||
if (key.substr(0, 3) === 'kv_')
|
||||
kv_config[key] = this.config[key];
|
||||
}
|
||||
this.open_error = await new Promise(ok => this.kv.open(
|
||||
this.config.metadata_pool_id, this.config.metadata_inode_num,
|
||||
kv_config, err => ok(err ? new Error(err) : null)
|
||||
));
|
||||
this.opened = true;
|
||||
this.on_open.map(cb => setImmediate(cb));
|
||||
this.on_open = null;
|
||||
}
|
||||
}
|
||||
if (this.open_error)
|
||||
{
|
||||
throw this.open_error;
|
||||
}
|
||||
let i;
|
||||
for (i = 0; i < this.config.size_buckets.length && size >= this.config.size_buckets[i]; i++) {}
|
||||
let s;
|
||||
if (i < this.config.size_buckets.length)
|
||||
s = this.config.size_buckets[i];
|
||||
else if (this.config.size_bucket_mul > 1)
|
||||
{
|
||||
while (size >= s)
|
||||
s = Math.floor(this.config.size_bucket_mul * s);
|
||||
}
|
||||
if (!this.volumes[bucketName])
|
||||
{
|
||||
this.volumes[bucketName] = {};
|
||||
}
|
||||
if (this.volumes[bucketName][s])
|
||||
{
|
||||
return this.volumes[bucketName][s];
|
||||
}
|
||||
const new_id = await this._makeVolumeId();
|
||||
const new_vol = this.volumes[bucketName][s] = {
|
||||
id: new_id,
|
||||
// FIXME: partial_sectors should be written with CAS because otherwise we may lose quick deletes
|
||||
partial_sectors: {},
|
||||
header: {
|
||||
location: this.locationName,
|
||||
bucket: bucketName,
|
||||
max_size: s,
|
||||
create_ts: Date.now(),
|
||||
used_ts: Date.now(),
|
||||
size: this.config.sector_size, // initial position is right after header
|
||||
objects: 0,
|
||||
removed_objects: 0,
|
||||
object_bytes: 0,
|
||||
removed_bytes: 0,
|
||||
},
|
||||
};
|
||||
this.volumes_by_id[new_id] = new_vol;
|
||||
const header_text = JSON.stringify(this.volumes[bucketName][s].header);
|
||||
const buf = Buffer.alloc(this.config.sector_size);
|
||||
buf.write(VOLUME_MAGIC + header_text, 0);
|
||||
await new Promise((ok, no) => this.cli.write(
|
||||
this.config.pool_id, new_id, 0, buf, err => (err ? no(new Error(err)) : ok(null))
|
||||
));
|
||||
await new Promise((ok, no) => this.kv.set(
|
||||
'vol_'+this.config.pool_id+'_'+new_id, header_text, err => (err ? no(new Error(err)) : ok(null)), cas_old => !cas_old
|
||||
));
|
||||
return new_vol;
|
||||
}
|
||||
|
||||
toObjectGetInfo(objectKey, bucketName, storageLocation)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
_bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs)
|
||||
{
|
||||
if ((cur_pos % this.config.sector_size) ||
|
||||
Math.floor((cur_pos + cur_size) / this.config.sector_size) == Math.floor(cur_pos / this.config.sector_size))
|
||||
{
|
||||
const sect_pos = Math.floor(cur_pos / this.config.sector_size) * this.config.sector_size;
|
||||
const sect = vol.partial_sectors[sect_pos]
|
||||
? vol.partial_sectors[sect_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[sect_pos])
|
||||
vol.partial_sectors[sect_pos] = { buffer: sect, refs: 0 };
|
||||
vol.partial_sectors[sect_pos].refs++;
|
||||
sector_refs.push(sect_pos);
|
||||
}
|
||||
let off = cur_pos % this.config.sector_size;
|
||||
let i = 0;
|
||||
for (; i < cur_chunks.length; i++)
|
||||
{
|
||||
let copy_len = this.config.sector_size - off;
|
||||
copy_len = copy_len > cur_chunks[i].length ? cur_chunks[i].length : copy_len;
|
||||
cur_chunks[i].copy(sect, off, 0, copy_len);
|
||||
off += copy_len;
|
||||
if (copy_len < cur_chunks[i].length)
|
||||
{
|
||||
cur_chunks[i] = cur_chunks[i].slice(copy_len);
|
||||
cur_size -= copy_len;
|
||||
break;
|
||||
}
|
||||
else
|
||||
cur_size -= cur_chunks[i].length;
|
||||
}
|
||||
cur_chunks.splice(0, i, sect);
|
||||
cur_size += this.config.sector_size;
|
||||
cur_pos = sect_pos;
|
||||
}
|
||||
return [ cur_pos, cur_size ];
|
||||
}
|
||||
|
||||
_bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, write_all)
|
||||
{
|
||||
const write_pos = cur_pos;
|
||||
const write_chunks = cur_chunks;
|
||||
let write_size = cur_size;
|
||||
cur_chunks = [];
|
||||
cur_pos += cur_size;
|
||||
cur_size = 0;
|
||||
let remain = (cur_pos % this.config.sector_size);
|
||||
if (remain > 0)
|
||||
{
|
||||
cur_pos -= remain;
|
||||
let last_sect = null;
|
||||
if (write_all)
|
||||
{
|
||||
last_sect = vol.partial_sectors[cur_pos]
|
||||
? vol.partial_sectors[cur_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[cur_pos])
|
||||
vol.partial_sectors[cur_pos] = { buffer: last_sect, refs: 0 };
|
||||
vol.partial_sectors[cur_pos].refs++;
|
||||
sector_refs.push(cur_pos);
|
||||
}
|
||||
}
|
||||
write_size -= remain;
|
||||
if (write_size < 0)
|
||||
write_size = 0;
|
||||
for (let i = write_chunks.length-1; i >= 0 && remain > 0; i--)
|
||||
{
|
||||
if (write_chunks[i].length <= remain)
|
||||
{
|
||||
remain -= write_chunks[i].length;
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i]);
|
||||
write_chunks.pop();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, 0, write_chunks[i].length - remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i].slice(write_chunks[i].length - remain));
|
||||
write_chunks[i] = write_chunks[i].slice(0, write_chunks[i].length - remain);
|
||||
remain = 0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (write_all)
|
||||
{
|
||||
write_chunks.push(last_sect);
|
||||
write_size += this.config.sector_size;
|
||||
}
|
||||
}
|
||||
for (const chunk of cur_chunks)
|
||||
{
|
||||
cur_size += chunk.length;
|
||||
}
|
||||
return [ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ];
|
||||
}
|
||||
|
||||
/**
|
||||
* reqUids: string, // request-ids for log, usually joined by ':'
|
||||
* keyContext: {
|
||||
* // a lot of shit, basically all metadata
|
||||
* bucketName,
|
||||
* objectKey,
|
||||
* owner?,
|
||||
* namespace?,
|
||||
* partNumber?,
|
||||
* uploadId?,
|
||||
* metaHeaders?,
|
||||
* isDeleteMarker?,
|
||||
* tagging?,
|
||||
* contentType?,
|
||||
* cacheControl?,
|
||||
* contentDisposition?,
|
||||
* contentEncoding?,
|
||||
* },
|
||||
* callback: (error, objectGetInfo: any) => void,
|
||||
*/
|
||||
put(stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._getVolume(keyContext.bucketName, size)
|
||||
.then(vol => this._put(vol, stream, size, keyContext, reqUids, callback))
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
_put(vol, stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
const object_header: ObjectHeader = {
|
||||
size,
|
||||
key: keyContext.objectKey,
|
||||
};
|
||||
if (keyContext.partNumber)
|
||||
{
|
||||
object_header.part_num = keyContext.partNumber;
|
||||
}
|
||||
// header is: <8 bytes magic> <8 bytes flags> <8 bytes json length> <json>
|
||||
const hdr_begin_buf = Buffer.alloc(24);
|
||||
const hdr_json_buf = Buffer.from(JSON.stringify(object_header), 'utf-8');
|
||||
hdr_begin_buf.write(OBJECT_MAGIC);
|
||||
hdr_begin_buf.writeBigInt64LE(BigInt(hdr_json_buf.length), 16);
|
||||
const object_header_buf = Buffer.concat([ hdr_begin_buf, hdr_json_buf ]);
|
||||
const object_pos = vol.header.size;
|
||||
const object_get_info = { volume: vol.id, offset: object_pos, hdrlen: object_header_buf.length, size };
|
||||
let cur_pos = object_pos;
|
||||
let cur_chunks = [ object_header_buf ];
|
||||
let cur_size = object_header_buf.length;
|
||||
let err: Error|null = null;
|
||||
let waiting = 1; // 1 for end or error, 1 for each write request
|
||||
vol.header.size += object_header_buf.length + size;
|
||||
if (!this.config.pack_objects && (vol.header.size % this.config.sector_size))
|
||||
{
|
||||
vol.header.size += this.config.sector_size - (vol.header.size % this.config.sector_size);
|
||||
}
|
||||
const writeChunk = (last) =>
|
||||
{
|
||||
const sector_refs = [];
|
||||
// Handle partial beginning
|
||||
[ cur_pos, cur_size ] = this._bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs);
|
||||
// Handle partial end
|
||||
let write_pos, write_chunks, write_size;
|
||||
[ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ] = this._bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, last);
|
||||
waiting++;
|
||||
// FIXME: pool_id: maybe it should be stored in volume metadata to allow to migrate volumes?
|
||||
this.cli.write(this.config.pool_id, vol.id, write_pos, write_chunks, (res) =>
|
||||
{
|
||||
for (const sect of sector_refs)
|
||||
{
|
||||
vol.partial_sectors[sect].refs--;
|
||||
if (!vol.partial_sectors[sect].refs &&
|
||||
vol.header.size >= sect+this.config.sector_size)
|
||||
{
|
||||
// Forget partial data when it's not needed anymore
|
||||
delete(vol.partial_sectors[sect]);
|
||||
}
|
||||
}
|
||||
waiting--;
|
||||
if (res)
|
||||
{
|
||||
err = new Error(res);
|
||||
waiting--;
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, err ? null : object_get_info);
|
||||
}
|
||||
});
|
||||
};
|
||||
// Stream data
|
||||
stream.on('error', (e) =>
|
||||
{
|
||||
err = e;
|
||||
waiting--;
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, null);
|
||||
}
|
||||
});
|
||||
stream.on('end', () =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
waiting--;
|
||||
if (cur_size)
|
||||
{
|
||||
// write last chunk
|
||||
writeChunk(true);
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(null, object_get_info);
|
||||
}
|
||||
});
|
||||
stream.on('data', (chunk) =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
cur_chunks.push(chunk);
|
||||
cur_size += chunk.length;
|
||||
if (cur_size >= this.config.write_chunk_size)
|
||||
{
|
||||
// got a complete chunk, write it out
|
||||
writeChunk(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* range?: [ start, end ], // like in HTTP - first byte index, last byte index
|
||||
* callback: (error, readStream) => void,
|
||||
*/
|
||||
get(objectGetInfo, range, reqUids, callback)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const [ start, end ] = range || [];
|
||||
if (start < 0 || end < 0 || end != null && start != null && end < start || start >= objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('Invalid range: '+start+'-'+end);
|
||||
}
|
||||
let offset = objectGetInfo.key.offset + objectGetInfo.key.hdrlen + (start || 0);
|
||||
let len = objectGetInfo.key.size - (start || 0);
|
||||
if (end)
|
||||
{
|
||||
const len2 = end - (start || 0) + 1;
|
||||
if (len2 < len)
|
||||
len = len2;
|
||||
}
|
||||
callback(null, new VitastorReadStream(this.cli, objectGetInfo.key.volume, offset, len, this.config));
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* callback: (error) => void,
|
||||
*/
|
||||
delete(objectGetInfo, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._delete(objectGetInfo, reqUids)
|
||||
.then(callback)
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
async _delete(objectGetInfo, reqUids)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const in_sect_pos = (objectGetInfo.key.offset % this.config.sector_size);
|
||||
const sect_pos = objectGetInfo.key.offset - in_sect_pos;
|
||||
const vol = this.volumes_by_id[objectGetInfo.key.volume];
|
||||
if (vol && vol.partial_sectors[sect_pos])
|
||||
{
|
||||
// The sector may still be written to in corner cases
|
||||
const sect = vol.partial_sectors[sect_pos];
|
||||
const flags = sect.buffer.readBigInt64LE(in_sect_pos + 8);
|
||||
if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
const del_stat = this.volume_delete_stats[vol.id] = (this.volume_delete_stats[vol.id] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
sect.buffer.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
sect.refs++;
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, sect.buffer, ok));
|
||||
sect.refs--;
|
||||
if (err)
|
||||
{
|
||||
sect.buffer.writeBigInt64LE(0n, in_sect_pos + 8);
|
||||
throw new Error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// RMW with CAS
|
||||
const [ err, buf, version ] = await new Promise<[ any, Buffer, bigint ]>(ok => this.cli.read(
|
||||
this.config.pool_id, objectGetInfo.key.volume, sect_pos, this.config.sector_size,
|
||||
(err, buf, version) => ok([ err, buf, version ])
|
||||
));
|
||||
if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
// FIXME What if JSON crosses sector boundary? Prevent it if we want to pack objects
|
||||
const magic = buf.slice(in_sect_pos, in_sect_pos+8).toString();
|
||||
const flags = buf.readBigInt64LE(in_sect_pos+8);
|
||||
const json_len = Number(buf.readBigInt64LE(in_sect_pos+16));
|
||||
let json_hdr;
|
||||
if (in_sect_pos+24+json_len <= buf.length)
|
||||
{
|
||||
try
|
||||
{
|
||||
json_hdr = JSON.parse(buf.slice(in_sect_pos+24, in_sect_pos+24+json_len).toString());
|
||||
}
|
||||
catch (e)
|
||||
{
|
||||
}
|
||||
}
|
||||
if (magic !== OBJECT_MAGIC || !json_hdr || json_hdr.size !== objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error(
|
||||
'header of object with size '+objectGetInfo.key.size+
|
||||
' bytes not found in volume '+objectGetInfo.key.volume+' at '+objectGetInfo.key.offset
|
||||
);
|
||||
}
|
||||
else if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
buf.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, buf, { version: version+1n }, ok));
|
||||
if (err == vitastor.EINTR)
|
||||
{
|
||||
// Retry
|
||||
await this._delete(objectGetInfo, reqUids);
|
||||
}
|
||||
else if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
else
|
||||
{
|
||||
// FIXME: Write deletion statistics to volumes
|
||||
// FIXME: Implement defragmentation
|
||||
const del_stat = this.volume_delete_stats[objectGetInfo.key.volume] = (this.volume_delete_stats[objectGetInfo.key.volume] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* config: full zenko server config,
|
||||
* callback: (error, stats) => void, // stats is the returned statistics in arbitrary format
|
||||
*/
|
||||
getDiskUsage(config, reqUids, callback)
|
||||
{
|
||||
// FIXME: Iterate all volumes and return its sizes and deletion statistics, or maybe just sizes
|
||||
callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
class VitastorReadStream extends stream.Readable
|
||||
{
|
||||
constructor(cli, volume_id, offset, len, config, options = undefined)
|
||||
{
|
||||
super(options);
|
||||
this.cli = cli;
|
||||
this.volume_id = volume_id;
|
||||
this.offset = offset;
|
||||
this.end = offset + len;
|
||||
this.pos = offset;
|
||||
this.config = config;
|
||||
this._reading = false;
|
||||
}
|
||||
|
||||
_read(n)
|
||||
{
|
||||
if (this._reading)
|
||||
{
|
||||
return;
|
||||
}
|
||||
// FIXME: Validate object header
|
||||
const chunk_size = n && this.config.read_chunk_size < n ? n : this.config.read_chunk_size;
|
||||
const read_offset = this.pos;
|
||||
const round_offset = read_offset - (read_offset % this.config.sector_size);
|
||||
let read_end = this.end <= read_offset+chunk_size ? this.end : read_offset+chunk_size;
|
||||
const round_end = (read_end % this.config.sector_size)
|
||||
? read_end + this.config.sector_size - (read_end % this.config.sector_size)
|
||||
: read_end;
|
||||
if (round_end <= this.end)
|
||||
read_end = round_end;
|
||||
this.pos = read_end;
|
||||
if (read_end <= read_offset)
|
||||
{
|
||||
// EOF
|
||||
this.push(null);
|
||||
return;
|
||||
}
|
||||
this._reading = true;
|
||||
this.cli.read(this.config.pool_id, this.volume_id, round_offset, round_end-round_offset, (err, buf, version) =>
|
||||
{
|
||||
this._reading = false;
|
||||
if (err)
|
||||
{
|
||||
this.destroy(new Error(err));
|
||||
return;
|
||||
}
|
||||
if (read_offset != round_offset || round_end != read_end)
|
||||
{
|
||||
buf = buf.subarray(read_offset-round_offset, buf.length-(round_end-read_end));
|
||||
}
|
||||
if (this.push(buf))
|
||||
{
|
||||
this._read(n);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function once(callback)
|
||||
{
|
||||
let called = false;
|
||||
return function()
|
||||
{
|
||||
if (!called)
|
||||
{
|
||||
called = true;
|
||||
callback.apply(null, arguments);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = VitastorBackend;
|
|
@ -177,6 +177,42 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
updateBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, capability, log, cb) {
|
||||
log.debug('updating bucket capabilities in metadata');
|
||||
// When concurrency update is not supported, we update the whole bucket metadata
|
||||
if (!this.client.putBucketAttributesCapabilities) {
|
||||
return this.updateBucket(bucketName, bucketMD, log, cb);
|
||||
}
|
||||
return this.client.putBucketAttributesCapabilities(bucketName, capabilityName, capacityField, capability,
|
||||
log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
log.trace('bucket capabilities updated in metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
deleteBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, log, cb) {
|
||||
log.debug('deleting bucket capabilities in metadata');
|
||||
// When concurrency update is not supported, we update the whole bucket metadata
|
||||
if (!this.client.deleteBucketAttributesCapability) {
|
||||
return this.updateBucket(bucketName, bucketMD, log, cb);
|
||||
}
|
||||
return this.client.deleteBucketAttributesCapability(bucketName, capabilityName, capacityField,
|
||||
log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
log.trace('bucket capabilities deleted in metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
getBucket(bucketName, log, cb) {
|
||||
log.debug('getting bucket from metadata');
|
||||
this.client.getBucketAttributes(bucketName, log, (err, data) => {
|
||||
|
@ -190,6 +226,19 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
getBucketQuota(bucketName, log, cb) {
|
||||
log.debug('getting bucket quota from metadata');
|
||||
this.client.getBucketAttributes(bucketName, log, (err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const bucketInfo = BucketInfo.fromObj(data);
|
||||
return cb(err, { quota: bucketInfo.getQuota() });
|
||||
});
|
||||
}
|
||||
|
||||
deleteBucket(bucketName, log, cb) {
|
||||
log.debug('deleting bucket from metadata');
|
||||
this.client.deleteBucket(bucketName, log, err => {
|
||||
|
@ -275,7 +324,7 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
deleteObjectMD(bucketName, objName, params, log, cb) {
|
||||
deleteObjectMD(bucketName, objName, params, log, cb, originOp = 's3:ObjectRemoved:Delete') {
|
||||
log.debug('deleting object from metadata');
|
||||
this.client.deleteObject(bucketName, objName, params, log, err => {
|
||||
if (err) {
|
||||
|
@ -285,7 +334,7 @@ class MetadataWrapper {
|
|||
}
|
||||
log.debug('object deleted from metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}, originOp);
|
||||
}
|
||||
|
||||
listObject(bucketName, listingParams, log, cb) {
|
||||
|
@ -499,6 +548,139 @@ class MetadataWrapper {
|
|||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Put bucket indexes
|
||||
*
|
||||
* indexSpec format:
|
||||
* [
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
|
||||
* ...
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
|
||||
* ]
|
||||
*
|
||||
*
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {Array<Object>} indexSpecs index specification
|
||||
* @param {Object} log logger
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
putBucketIndexes(bucketName, indexSpecs, log, cb) {
|
||||
log.debug('put bucket indexes');
|
||||
|
||||
if (typeof this.client.putBucketIndexes !== 'function') {
|
||||
log.error('error from metadata', {
|
||||
method: 'putBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.putBucketIndexes(bucketName, indexSpecs, log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'putBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Delete bucket indexes
|
||||
*
|
||||
* indexSpec format:
|
||||
* [
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
|
||||
* ...
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
|
||||
* ]
|
||||
*
|
||||
*
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {Array<Object>} indexSpecs index specification
|
||||
* @param {Object} log logger
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
deleteBucketIndexes(bucketName, indexSpecs, log, cb) {
|
||||
log.debug('delete bucket indexes');
|
||||
|
||||
if (typeof this.client.deleteBucketIndexes !== 'function') {
|
||||
log.error('error from metadata', {
|
||||
method: 'deleteBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.deleteBucketIndexes(bucketName, indexSpecs, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', {
|
||||
method: 'deleteBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
getBucketIndexes(bucketName, log, cb) {
|
||||
log.debug('get bucket indexes');
|
||||
|
||||
if (typeof this.client.getBucketIndexes !== 'function') {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.getBucketIndexes(bucketName, log, (err, res) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
getIndexingJobs(log, cb) {
|
||||
if (typeof this.client.getIndexingJobs !== 'function') {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getIndexingJobs',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.getIndexingJobs(log, (err, res) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = MetadataWrapper;
|
||||
|
|
|
@ -108,9 +108,26 @@ class ListRecordStream extends stream.Readable {
|
|||
if (value && value.tags) {
|
||||
value.tags = unescape(value.tags);
|
||||
}
|
||||
entry = {
|
||||
type: 'put', // updates overwrite the whole metadata,
|
||||
// updates overwrite the whole metadata,
|
||||
// so they are considered as puts
|
||||
let type = 'put';
|
||||
// When the object metadata contain the "deleted"
|
||||
// flag, it means that the operation is the update
|
||||
// we perform before the deletion of an object. We
|
||||
// perform the update to keep all the metadata in the
|
||||
// oplog. This update is what will be used by backbeat
|
||||
// as the delete operation so we put the type of operation
|
||||
// for this event to a delete.
|
||||
// Backbeat still receives the actual delete operations
|
||||
// but they are ignored as they don't contain any metadata.
|
||||
// The delete operations are kept in case we want to listen
|
||||
// to delete events comming from special collections other
|
||||
// than "bucket" collections.
|
||||
if (value && value.deleted) {
|
||||
type = 'delete';
|
||||
}
|
||||
entry = {
|
||||
type,
|
||||
key: itemObj.o2._id,
|
||||
// updated value may be either stored directly in 'o'
|
||||
// attribute or in '$set' attribute (supposedly when
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -85,7 +85,8 @@ class MongoReadStream extends Readable {
|
|||
Object.assign(query, searchOptions);
|
||||
}
|
||||
|
||||
this._cursor = c.find(query).sort({
|
||||
const projection = { 'value.location': 0 };
|
||||
this._cursor = c.find(query, { projection }).sort({
|
||||
_id: options.reverse ? -1 : 1,
|
||||
});
|
||||
if (options.limit && options.limit !== -1) {
|
||||
|
@ -101,15 +102,10 @@ class MongoReadStream extends Readable {
|
|||
return;
|
||||
}
|
||||
|
||||
this._cursor.next((err, doc) => {
|
||||
this._cursor.next().then(doc => {
|
||||
if (this._destroyed) {
|
||||
return;
|
||||
}
|
||||
if (err) {
|
||||
this.emit('error', err);
|
||||
return;
|
||||
}
|
||||
|
||||
let key = undefined;
|
||||
let value = undefined;
|
||||
|
||||
|
@ -133,6 +129,12 @@ class MongoReadStream extends Readable {
|
|||
value,
|
||||
});
|
||||
}
|
||||
}).catch(err => {
|
||||
if (this._destroyed) {
|
||||
return;
|
||||
}
|
||||
this.emit('error', err);
|
||||
return;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -142,7 +144,7 @@ class MongoReadStream extends Readable {
|
|||
}
|
||||
this._destroyed = true;
|
||||
|
||||
this._cursor.close(err => {
|
||||
this._cursor.close().catch(err => {
|
||||
if (err) {
|
||||
this.emit('error', err);
|
||||
return;
|
||||
|
|
|
@ -185,6 +185,48 @@ function formatVersionKey(key, versionId, vFormat) {
|
|||
return formatVersionKeyV0(key, versionId);
|
||||
}
|
||||
|
||||
function indexFormatMongoArrayToObject(mongoIndexArray) {
|
||||
const indexObj = [];
|
||||
|
||||
for (const idx of mongoIndexArray) {
|
||||
const keys = [];
|
||||
let entries = [];
|
||||
|
||||
if (idx.key instanceof Map) {
|
||||
entries = idx.key.entries();
|
||||
} else {
|
||||
entries = Object.entries(idx.key);
|
||||
}
|
||||
|
||||
for (const k of entries) {
|
||||
keys.push({ key: k[0], order: k[1] });
|
||||
}
|
||||
|
||||
indexObj.push({ name: idx.name, keys });
|
||||
}
|
||||
|
||||
return indexObj;
|
||||
}
|
||||
|
||||
|
||||
function indexFormatObjectToMongoArray(indexObj) {
|
||||
const mongoIndexArray = [];
|
||||
|
||||
for (const idx of indexObj) {
|
||||
const key = new Map();
|
||||
|
||||
for (const k of idx.keys) {
|
||||
key.set(k.key, k.order);
|
||||
}
|
||||
|
||||
// copy all field except keys from idx
|
||||
// eslint-disable-next-line
|
||||
const { keys: _, ...toCopy } = idx;
|
||||
mongoIndexArray.push(Object.assign(toCopy, { name: idx.name, key }));
|
||||
}
|
||||
|
||||
return mongoIndexArray;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
credPrefix,
|
||||
|
@ -195,4 +237,6 @@ module.exports = {
|
|||
translateConditions,
|
||||
formatMasterKey,
|
||||
formatVersionKey,
|
||||
indexFormatMongoArrayToObject,
|
||||
indexFormatObjectToMongoArray,
|
||||
};
|
||||
|
|
|
@ -29,5 +29,4 @@ server.start(() => {
|
|||
logger.info('Metadata Proxy Server successfully started. ' +
|
||||
`Using the ${metadataWrapper.implName} backend`);
|
||||
});
|
||||
|
||||
```
|
||||
|
|
|
@ -10,21 +10,21 @@ function trySetDirSyncFlag(path) {
|
|||
|
||||
const GETFLAGS = 2148034049;
|
||||
const SETFLAGS = 1074292226;
|
||||
const FS_DIRSYNC_FL = 65536;
|
||||
const FS_DIRSYNC_FL = 65536n;
|
||||
const buffer = Buffer.alloc(8, 0);
|
||||
const pathFD = fs.openSync(path, 'r');
|
||||
const status = ioctl(pathFD, GETFLAGS, buffer);
|
||||
assert.strictEqual(status, 0);
|
||||
const currentFlags = buffer.readUIntLE(0, 8);
|
||||
const currentFlags = buffer.readBigInt64LE(0);
|
||||
const flags = currentFlags | FS_DIRSYNC_FL;
|
||||
buffer.writeUIntLE(flags, 0, 8);
|
||||
buffer.writeBigInt64LE(flags, 0);
|
||||
const status2 = ioctl(pathFD, SETFLAGS, buffer);
|
||||
assert.strictEqual(status2, 0);
|
||||
fs.closeSync(pathFD);
|
||||
const pathFD2 = fs.openSync(path, 'r');
|
||||
const confirmBuffer = Buffer.alloc(8, 0);
|
||||
ioctl(pathFD2, GETFLAGS, confirmBuffer);
|
||||
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
|
||||
assert.strictEqual(confirmBuffer.readBigInt64LE(0),
|
||||
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
|
||||
fs.closeSync(pathFD2);
|
||||
}
|
||||
|
|
|
@ -120,8 +120,8 @@ export function generateVersionId(info: string, replicationGroupId: string): str
|
|||
lastSeq = lastTimestamp === ts ? lastSeq + 1 : 0;
|
||||
lastTimestamp = ts;
|
||||
|
||||
// if S3_VERSION_ID_ENCODING_TYPE is "hex", info is used. By default, it is not used.
|
||||
if (process.env.S3_VERSION_ID_ENCODING_TYPE === 'hex') {
|
||||
// if S3_VERSION_ID_ENCODING_TYPE is "hex", info is used.
|
||||
if (process.env.S3_VERSION_ID_ENCODING_TYPE === 'hex' || !process.env.S3_VERSION_ID_ENCODING_TYPE) {
|
||||
// info field stays as is
|
||||
} else {
|
||||
info = ''; // eslint-disable-line
|
||||
|
|
91
package.json
91
package.json
|
@ -3,54 +3,54 @@
|
|||
"engines": {
|
||||
"node": ">=16"
|
||||
},
|
||||
"version": "7.70.31",
|
||||
"version": "8.1.134",
|
||||
"description": "Common utilities for the S3 project components",
|
||||
"main": "build/index.js",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/scality/Arsenal.git"
|
||||
},
|
||||
"author": "Giorgio Regni",
|
||||
"author": "Scality Inc.",
|
||||
"license": "Apache-2.0",
|
||||
"bugs": {
|
||||
"url": "https://github.com/scality/Arsenal/issues"
|
||||
},
|
||||
"homepage": "https://github.com/scality/Arsenal#readme",
|
||||
"dependencies": {
|
||||
"@azure/identity": "^3.1.1",
|
||||
"@azure/storage-blob": "^12.12.0",
|
||||
"@js-sdsl/ordered-set": "^4.4.2",
|
||||
"@types/async": "^3.2.12",
|
||||
"@types/utf8": "^3.0.1",
|
||||
"JSONStream": "^1.0.0",
|
||||
"@swc/cli": "^0.4.0",
|
||||
"@swc/core": "^1.7.4",
|
||||
"agentkeepalive": "^4.1.3",
|
||||
"ajv": "6.12.2",
|
||||
"async": "~2.1.5",
|
||||
"ajv": "^6.12.3",
|
||||
"async": "^2.6.4",
|
||||
"aws-sdk": "^2.1005.0",
|
||||
"azure-storage": "~2.10.7",
|
||||
"backo": "^1.1.0",
|
||||
"base-x": "3.0.8",
|
||||
"base62": "2.0.1",
|
||||
"bson": "4.0.0",
|
||||
"debug": "~2.6.9",
|
||||
"base-x": "^3.0.8",
|
||||
"base62": "^2.0.1",
|
||||
"bson": "^4.0.0",
|
||||
"debug": "^4.1.0",
|
||||
"diskusage": "^1.1.1",
|
||||
"fcntl": "github:scality/node-fcntl#0.2.2",
|
||||
"hdclient": "scality/hdclient#1.1.0",
|
||||
"fcntl": "git+https://git.yourcmc.ru/vitalif/zenko-fcntl.git",
|
||||
"httpagent": "git+https://git.yourcmc.ru/vitalif/zenko-httpagent.git#development/1.0",
|
||||
"https-proxy-agent": "^2.2.0",
|
||||
"ioredis": "^4.28.5",
|
||||
"ipaddr.js": "1.9.1",
|
||||
"ipaddr.js": "^1.9.1",
|
||||
"joi": "^17.6.0",
|
||||
"level": "~5.0.1",
|
||||
"level-sublevel": "~6.6.5",
|
||||
"mongodb": "^3.0.1",
|
||||
"node-forge": "^0.7.1",
|
||||
"prom-client": "14.2.0",
|
||||
"simple-glob": "^0.2",
|
||||
"socket.io": "~4.6.1",
|
||||
"socket.io-client": "~4.6.1",
|
||||
"sproxydclient": "github:scality/sproxydclient#8.0.4",
|
||||
"utf8": "2.1.2",
|
||||
"JSONStream": "^1.0.0",
|
||||
"level": "^5.0.1",
|
||||
"level-sublevel": "^6.6.5",
|
||||
"mongodb": "^5.2.0",
|
||||
"node-forge": "^1.3.0",
|
||||
"prom-client": "^14.2.0",
|
||||
"simple-glob": "^0.2.0",
|
||||
"socket.io": "^4.6.1",
|
||||
"socket.io-client": "^4.6.1",
|
||||
"utf8": "^3.0.0",
|
||||
"uuid": "^3.0.1",
|
||||
"werelogs": "scality/werelogs#8.1.4",
|
||||
"xml2js": "~0.4.23"
|
||||
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"ioctl": "^2.0.2"
|
||||
|
@ -59,22 +59,24 @@
|
|||
"@babel/preset-env": "^7.16.11",
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@sinonjs/fake-timers": "^6.0.1",
|
||||
"@types/async": "^3.2.12",
|
||||
"@types/utf8": "^3.0.1",
|
||||
"@types/ioredis": "^4.28.10",
|
||||
"@types/jest": "^27.4.1",
|
||||
"@types/node": "^17.0.21",
|
||||
"@types/node": "^18.19.41",
|
||||
"@types/xml2js": "^0.4.11",
|
||||
"eslint": "^8.12.0",
|
||||
"eslint-config-airbnb": "6.2.0",
|
||||
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
||||
"eslint": "^8.14.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
||||
"eslint-plugin-react": "^4.3.0",
|
||||
"jest": "^27.5.1",
|
||||
"mocha": "8.0.1",
|
||||
"mongodb-memory-server": "^6.0.2",
|
||||
"mongodb-memory-server": "^8.12.2",
|
||||
"nyc": "^15.1.0",
|
||||
"sinon": "^9.0.2",
|
||||
"temp": "0.9.1",
|
||||
"temp": "^0.9.1",
|
||||
"ts-jest": "^27.1.3",
|
||||
"ts-node": "^10.6.0",
|
||||
"typescript": "^4.6.2"
|
||||
"typescript": "^4.9.5"
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "eslint $(git ls-files '*.js')",
|
||||
|
@ -82,18 +84,28 @@
|
|||
"lint_yml": "yamllint $(git ls-files '*.yml')",
|
||||
"test": "jest tests/unit",
|
||||
"build": "tsc",
|
||||
"prepare": "yarn build",
|
||||
"prepack": "tsc",
|
||||
"postinstall": "[ -d build ] || swc -d build --copy-files package.json index.ts lib",
|
||||
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit",
|
||||
"coverage": "nyc --clean jest tests --coverage --testTimeout=120000 --forceExit",
|
||||
"build_doc": "cd documentation/listingAlgos/pics; dot -Tsvg delimiterStateChart.dot > delimiterStateChart.svg; dot -Tsvg delimiterMasterV0StateChart.dot > delimiterMasterV0StateChart.svg; dot -Tsvg delimiterVersionsStateChart.dot > delimiterVersionsStateChart.svg"
|
||||
},
|
||||
"private": true,
|
||||
"jest": {
|
||||
"maxWorkers": 1,
|
||||
"coverageReporters": [
|
||||
"json"
|
||||
],
|
||||
"collectCoverageFrom": [
|
||||
"lib/**/*.{js,ts}",
|
||||
"index.js"
|
||||
],
|
||||
"preset": "ts-jest",
|
||||
"testEnvironment": "node",
|
||||
"transform": {
|
||||
"^.\\.ts?$": "ts-jest"
|
||||
},
|
||||
"transformIgnorePatterns": [],
|
||||
"globals": {
|
||||
"test-jest": {
|
||||
"diagnostics": {
|
||||
|
@ -101,5 +113,12 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nyc": {
|
||||
"tempDirectory": "coverage",
|
||||
"reporter": [
|
||||
"lcov",
|
||||
"text"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,501 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const sinon = require('sinon');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const { errors, versioning } = require('../../../../index');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const genVID = require('../../../../lib/versioning/VersionID').generateVersionId;
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-bucket';
|
||||
const replicationGroupId = 'RG001';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27018 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
let uidCounter = 0;
|
||||
function generateVersionId() {
|
||||
return genVID(`${process.pid}.${uidCounter++}`,
|
||||
replicationGroupId);
|
||||
}
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
|
||||
];
|
||||
describe('MongoClientInterface::metadata.deleteObjectMD', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
|
||||
function getObjectCount(cb) {
|
||||
collection.countDocuments()
|
||||
.then(count => cb(null, count))
|
||||
.catch(err => cb(err));
|
||||
}
|
||||
|
||||
function getObject(key, cb) {
|
||||
collection.findOne({
|
||||
_id: key,
|
||||
}, {}).then(doc => {
|
||||
if (!doc) {
|
||||
return cb(errors.NoSuchKey);
|
||||
}
|
||||
return cb(null, doc.value);
|
||||
}).catch(err => cb(err));
|
||||
}
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27018',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
|
||||
describe(`vFormat : ${variation.vFormat}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it(`Should delete non versioned object ${variation.vFormat}`, done => {
|
||||
const params = {
|
||||
objName: 'non-deleted-object',
|
||||
objVal: {
|
||||
key: 'non-deleted-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: false,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
return async.series([
|
||||
next => {
|
||||
// we put the master version of object
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// we put the master version of a second object
|
||||
params.objName = 'object-to-deleted';
|
||||
params.objVal.key = 'object-to-deleted';
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// We delete the first object
|
||||
metadata.deleteObjectMD(BUCKET_NAME, params.objName, null, logger, next);
|
||||
},
|
||||
next => {
|
||||
// Object must be removed
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// only 1 object remaining in db
|
||||
getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 1);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should not throw error when object non existent ${variation.vFormat}`, done => {
|
||||
const objName = 'non-existent-object';
|
||||
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should not throw error when bucket non existent ${variation.vFormat}`, done => {
|
||||
const objName = 'non-existent-object';
|
||||
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Master should not be updated when non lastest version is deleted ${variation.vFormat}`, done => {
|
||||
let versionId1 = null;
|
||||
const params = {
|
||||
objName: 'test-object',
|
||||
objVal: {
|
||||
key: 'test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
vFormat: 'v0',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
return async.series([
|
||||
next => {
|
||||
// we start by creating a new version and master
|
||||
versionId1 = generateVersionId(this.replicationGroupId);
|
||||
params.versionId = versionId1;
|
||||
params.objVal.versionId = versionId1;
|
||||
versionParams.versionId = versionId1;
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// we create a second version of the same object (master is updated)
|
||||
params.objVal.versionId = 'version2';
|
||||
versionParams.versionId = null;
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// we delete the first version
|
||||
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 },
|
||||
logger, next);
|
||||
},
|
||||
next => {
|
||||
// the first version should no longer be available
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// master must be containing second version metadata
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.notStrictEqual(data.versionId, versionId1);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// master and one version remaining in db
|
||||
getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 2);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Master should be updated when last version is deleted ${variation.vFormat}`, done => {
|
||||
let versionId1;
|
||||
let versionId2;
|
||||
|
||||
const params = {
|
||||
objName: 'test-object',
|
||||
objVal: {
|
||||
key: 'test-object',
|
||||
versionId: 'null',
|
||||
isLast: false,
|
||||
},
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
return async.series([
|
||||
next => {
|
||||
// we start by creating a new version and master
|
||||
versionId1 = generateVersionId(this.replicationGroupId);
|
||||
params.versionId = versionId1;
|
||||
params.objVal.versionId = versionId1;
|
||||
versionParams.versionId = versionId1;
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// we create a second version of the same object (master is updated)
|
||||
// params.objVal.versionId = 'version2';
|
||||
// versionParams.versionId = null;
|
||||
versionId2 = generateVersionId(this.replicationGroupId);
|
||||
params.versionId = versionId2;
|
||||
params.objVal.versionId = versionId2;
|
||||
versionParams.versionId = versionId2;
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
// deleting latest version
|
||||
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 },
|
||||
logger, next);
|
||||
},
|
||||
next => {
|
||||
// latest version must be removed
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 }, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// master must be updated to contain first version data
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.versionId, versionId1);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// one master and version in the db
|
||||
getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 2);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should fail when version id non existent ${variation.vFormat}`, done => {
|
||||
const versionId = generateVersionId(this.replicationGroupId);
|
||||
const objName = 'test-object';
|
||||
metadata.deleteObjectMD(BUCKET_NAME, objName, { versionId }, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should create master when delete marker removed ${variation.vFormat}`, done => {
|
||||
const objVal = {
|
||||
key: 'test-object',
|
||||
isDeleteMarker: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
let firstVersionVersionId;
|
||||
let deleteMarkerVersionId;
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
firstVersionVersionId = JSON.parse(res).versionId;
|
||||
return next();
|
||||
}),
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
objVal.isDeleteMarker = true;
|
||||
params.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
deleteMarkerVersionId = JSON.parse(res).versionId;
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// using fake clock to override the setTimeout used by the repair
|
||||
const clock = sinon.useFakeTimers();
|
||||
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId: deleteMarkerVersionId },
|
||||
logger, () => {
|
||||
// running the repair callback
|
||||
clock.runAll();
|
||||
clock.restore();
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// waiting for the repair callback to finish
|
||||
next => setTimeout(next, 100),
|
||||
// master should be created
|
||||
next => {
|
||||
getObject('\x7fMtest-object', (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, 'test-object');
|
||||
assert.strictEqual(object.versionId, firstVersionVersionId);
|
||||
assert.strictEqual(object.isDeleteMarker, false);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should delete master when delete marker becomes last version ${variation.vFormat}`, done => {
|
||||
const objVal = {
|
||||
key: 'test-object',
|
||||
isDeleteMarker: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
let versionId;
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next),
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
objVal.isDeleteMarker = true;
|
||||
params.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next);
|
||||
},
|
||||
// putting new version on top of delete marker
|
||||
next => {
|
||||
objVal.isDeleteMarker = false;
|
||||
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
versionId = JSON.parse(res).versionId;
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
// using fake clock to override the setTimeout used by the repair
|
||||
const clock = sinon.useFakeTimers();
|
||||
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId },
|
||||
logger, () => {
|
||||
// running the repair callback
|
||||
clock.runAll();
|
||||
clock.restore();
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// waiting for the repair callback to finish
|
||||
next => setTimeout(next, 100),
|
||||
// master must be deleted
|
||||
next => {
|
||||
getObject('\x7fMtest-object', err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should delete the object directly if params.doesNotNeedOpogUpdate is true', done => {
|
||||
const objName = 'object-to-delete';
|
||||
const objVal = {
|
||||
key: 'object-to-delete',
|
||||
versionId: 'null',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: false,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => {
|
||||
metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, next);
|
||||
},
|
||||
next => {
|
||||
metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 0);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should throw an error if params.doesNotNeedOpogUpdate is true and object does not exist', done => {
|
||||
const objName = 'non-existent-object';
|
||||
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.InternalError);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,303 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const { errors, versioning } = require('../../../../index');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const genVID = versioning.VersionID.generateVersionId;
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const { formatMasterKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-bucket';
|
||||
const replicationGroupId = 'RG001';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27019 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
let uidCounter = 0;
|
||||
function generateVersionId() {
|
||||
return genVID(`${process.pid}.${uidCounter++}`,
|
||||
replicationGroupId);
|
||||
}
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
|
||||
];
|
||||
|
||||
describe('MongoClientInterface::metadata.getObjectMD', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
let versionId1;
|
||||
let versionId2;
|
||||
|
||||
let params = {
|
||||
objName: 'pfx1-test-object',
|
||||
objVal: {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
};
|
||||
|
||||
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
|
||||
const mKey = formatMasterKey(objName, vFormat);
|
||||
collection.updateOne(
|
||||
{
|
||||
_id: mKey,
|
||||
$or: [{
|
||||
'value.versionId': {
|
||||
$exists: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
'value.versionId': {
|
||||
$gt: versionId,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
$set: { _id: mKey, value: objVal },
|
||||
},
|
||||
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the "deleted" property to true
|
||||
* @param {string} key object name
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
function flagObjectForDeletion(key, cb) {
|
||||
collection.updateMany(
|
||||
{ 'value.key': key },
|
||||
{ $set: { 'value.deleted': true } },
|
||||
{ upsert: false }).then(() => cb()).catch(err => cb(err));
|
||||
}
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27019',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
|
||||
describe(`vFormat : ${variation.vFormat}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
versionId1 = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
});
|
||||
},
|
||||
next => {
|
||||
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
|
||||
versionParams, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
versionId2 = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
// reset params
|
||||
params = {
|
||||
objName: 'pfx1-test-object',
|
||||
objVal: {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
};
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it(`Should return latest version of object ${variation.it}`, done =>
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, params.objName);
|
||||
assert.strictEqual(object.versionId, versionId2);
|
||||
return done();
|
||||
}));
|
||||
|
||||
it(`Should return the specified version of object ${variation.it}`, done =>
|
||||
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, params.objName);
|
||||
assert.strictEqual(object.versionId, versionId1);
|
||||
return done();
|
||||
}));
|
||||
|
||||
it(`Should throw error when version non existent ${variation.it}`, done => {
|
||||
const versionId = '1234567890';
|
||||
return metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId }, logger, (err, object) => {
|
||||
assert.deepStrictEqual(object, undefined);
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should throw error when object non existent ${variation.it}`, done => {
|
||||
const objName = 'non-existent-object';
|
||||
return metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should throw error when object non existent ${variation.it}`, done => {
|
||||
const bucketName = 'non-existent-bucket';
|
||||
return metadata.getObjectMD(bucketName, params.objName, null, logger, (err, object) => {
|
||||
assert.deepStrictEqual(object, undefined);
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should return latest version when master is PHD ${variation.it}`, done => {
|
||||
async.series([
|
||||
next => {
|
||||
const objectName = variation.vFormat === 'v0' ? 'pfx1-test-object' : '\x7fMpfx1-test-object';
|
||||
// adding isPHD flag to master
|
||||
const phdVersionId = generateVersionId();
|
||||
params.objVal.versionId = phdVersionId;
|
||||
params.objVal.isPHD = true;
|
||||
updateMasterObject(objectName, phdVersionId, params.objVal,
|
||||
variation.vFormat, next);
|
||||
},
|
||||
// Should return latest object version
|
||||
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, params.objName);
|
||||
assert.strictEqual(object.versionId, versionId2);
|
||||
delete params.isPHD;
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should fail to get an object tagged for deletion', done => {
|
||||
async.series([
|
||||
next => flagObjectForDeletion(params.objName, next),
|
||||
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
|
||||
assert.deepStrictEqual(object, undefined);
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
|
||||
const versioningParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
params.versionId = null;
|
||||
params.objVal.isDeleteMarker = true;
|
||||
return metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal, versioningParams,
|
||||
logger, next);
|
||||
},
|
||||
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, params.objName);
|
||||
assert.strictEqual(object.isDeleteMarker, true);
|
||||
params.objVal.isDeleteMarker = null;
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,331 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const { versioning } = require('../../../../index');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const genVID = versioning.VersionID.generateVersionId;
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const { formatMasterKey, formatVersionKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-bucket-batching';
|
||||
const replicationGroupId = 'RG001';
|
||||
const N = 10;
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27019 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
let uidCounter = 0;
|
||||
function generateVersionId() {
|
||||
return genVID(`${process.pid}.${uidCounter++}`,
|
||||
replicationGroupId);
|
||||
}
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: false },
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: true },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: false },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: true },
|
||||
];
|
||||
|
||||
describe('MongoClientInterface::metadata.getObjectsMD', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
let versionId2;
|
||||
|
||||
const params = {
|
||||
key: 'pfx1-test-object',
|
||||
objVal: {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
};
|
||||
|
||||
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
|
||||
const mKey = formatMasterKey(objName, vFormat);
|
||||
collection.updateOne(
|
||||
{
|
||||
_id: mKey,
|
||||
$or: [{
|
||||
'value.versionId': {
|
||||
$exists: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
'value.versionId': {
|
||||
$gt: versionId,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
$set: { _id: mKey, value: objVal },
|
||||
},
|
||||
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the "deleted" property to true
|
||||
* @param {string} key object name
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
function flagObjectForDeletion(key, cb) {
|
||||
collection.updateMany(
|
||||
{ 'value.key': key },
|
||||
{ $set: { 'value.deleted': true } },
|
||||
{ upsert: false }).then(() => cb()).catch(err => cb(err));
|
||||
}
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27019',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
const itOnlyInV1 = variation.vFormat === 'v1' && variation.versioning ? it : it.skip;
|
||||
describe(`vFormat : ${variation.vFormat}, versioning: ${variation.versioning}`, () => {
|
||||
let paramsArr = [];
|
||||
|
||||
beforeEach(done => {
|
||||
// reset params
|
||||
paramsArr = Array.from({ length: N }, (_, i) => ({
|
||||
key: `pfx1-test-object${i + 1}`,
|
||||
objVal: {
|
||||
key: `pfx1-test-object${i + 1}`,
|
||||
versionId: 'null',
|
||||
},
|
||||
}));
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
const versionParams = {
|
||||
versioning: variation.versioning,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
async.eachSeries(paramsArr, (params, eachCb) => {
|
||||
metadata.putObjectMD(BUCKET_NAME, params.key, params.objVal,
|
||||
versionParams, logger, (err, res) => {
|
||||
if (err) {
|
||||
return eachCb(err);
|
||||
}
|
||||
if (variation.versioning) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
params.versionId = JSON.parse(res).versionId;
|
||||
}
|
||||
return eachCb(null);
|
||||
});
|
||||
}, next);
|
||||
},
|
||||
next => {
|
||||
metadata.putObjectMD(BUCKET_NAME, paramsArr[N - 1].key, paramsArr[N - 1].objVal,
|
||||
versionParams, logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
if (variation.versioning) {
|
||||
versionId2 = JSON.parse(res).versionId;
|
||||
} else {
|
||||
versionId2 = 'null';
|
||||
}
|
||||
return next(null);
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it(`should get ${N} objects${variation.versioning ? '' : ' master'} versions using batching`, done => {
|
||||
const request = paramsArr.map(({ key, objVal }) => ({
|
||||
key,
|
||||
params: {
|
||||
versionId: variation.versioning ? objVal.versionId : null,
|
||||
},
|
||||
}));
|
||||
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
|
||||
assert.strictEqual(err, null);
|
||||
assert.strictEqual(objects.length, N);
|
||||
objects.forEach((obj, i) => {
|
||||
assert.strictEqual(obj.doc.key, paramsArr[i].key);
|
||||
if (variation.versioning) {
|
||||
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
|
||||
}
|
||||
});
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should not throw an error if object or version is inexistent and return null doc', done => {
|
||||
const request = [{
|
||||
key: 'nonexistent',
|
||||
params: {
|
||||
versionId: variation.versioning ? 'nonexistent' : null,
|
||||
},
|
||||
}];
|
||||
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
|
||||
assert.strictEqual(err, null);
|
||||
assert.strictEqual(objects.length, 1);
|
||||
assert.strictEqual(objects[0].doc, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`should return latest version when master is PHD ${variation.it}`, done => {
|
||||
if (!variation.versioning) {
|
||||
return done();
|
||||
}
|
||||
const request = paramsArr.map(({ key, objVal }) => ({
|
||||
key,
|
||||
params: {
|
||||
versionId: variation.versioning ? objVal.versionId : null,
|
||||
},
|
||||
}));
|
||||
return async.series([
|
||||
next => {
|
||||
let objectName = null;
|
||||
if (variations.versioning) {
|
||||
objectName =
|
||||
formatVersionKey(paramsArr[N - 1].key, paramsArr[N - 1].versionId, variation.vFormat);
|
||||
} else {
|
||||
objectName = formatMasterKey(paramsArr[N - 1].key, variation.vFormat);
|
||||
}
|
||||
// adding isPHD flag to master
|
||||
const phdVersionId = generateVersionId();
|
||||
paramsArr[N - 1].objVal.versionId = phdVersionId;
|
||||
paramsArr[N - 1].objVal.isPHD = true;
|
||||
updateMasterObject(objectName, phdVersionId, paramsArr[N - 1].objVal,
|
||||
variation.vFormat, next);
|
||||
},
|
||||
// Should return latest object version
|
||||
next => metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
objects.forEach((obj, i) => {
|
||||
assert.strictEqual(obj.doc.key, paramsArr[i].objVal.key);
|
||||
if (variation.versioning && i === N - 1) {
|
||||
assert.strictEqual(obj.doc.versionId, versionId2);
|
||||
} else {
|
||||
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
|
||||
}
|
||||
});
|
||||
delete params.isPHD;
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should fail to get an object tagged for deletion', done => {
|
||||
const key = paramsArr[0].key;
|
||||
flagObjectForDeletion(key, err => {
|
||||
assert(err);
|
||||
metadata.getObjectsMD(BUCKET_NAME, [{ key }], logger, (err, object) => {
|
||||
assert.strictEqual(err, null);
|
||||
assert.strictEqual(object[0].doc, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
|
||||
const versioningParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
paramsArr[0].versionId = null;
|
||||
paramsArr[0].objVal.isDeleteMarker = true;
|
||||
return metadata.putObjectMD(BUCKET_NAME, paramsArr[0].key, paramsArr[0].objVal,
|
||||
versioningParams, logger, next);
|
||||
},
|
||||
next => metadata.getObjectsMD(BUCKET_NAME, [{ key: paramsArr[0].key }], logger, (err, objects) => {
|
||||
assert.strictEqual(err, null);
|
||||
assert.strictEqual(objects[0].doc.key, paramsArr[0].key);
|
||||
assert.strictEqual(objects[0].doc.isDeleteMarker, true);
|
||||
paramsArr[0].objVal.isDeleteMarker = null;
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,744 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const MetadataWrapper =
|
||||
require('../../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { versioning } = require('../../../../../index');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const { assertContents, flagObjectForDeletion, makeBucketMD, putBulkObjectVersions } = require('./utils');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-lifecycle-list-current-bucket';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27020 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
describe('MongoClientInterface::metadata.listLifecycleObject::current', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
const expectedVersionIds = {};
|
||||
const location1 = 'loc1';
|
||||
const location2 = 'loc2';
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27020',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
|
||||
describe(`bucket format version: ${v}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = makeBucketMD(BUCKET_NAME);
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
metadata.client.defaultBucketKeyFormat = v;
|
||||
async.series([
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
const objName = 'pfx1-test-object';
|
||||
const objVal = {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
dataStoreName: location1,
|
||||
};
|
||||
const nbVersions = 5;
|
||||
|
||||
const timestamp = 0;
|
||||
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
|
||||
nbVersions, timestamp, logger, (err, data) => {
|
||||
expectedVersionIds[objName] = data.lastVersionId;
|
||||
return next(err);
|
||||
});
|
||||
/* eslint-disable max-len */
|
||||
// The following versions are created:
|
||||
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id4", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id3", "value" : { "key" : "pfx1-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:00.004Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id2", "value" : { "key" : "pfx1-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:00.003Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id1", "value" : { "key" : "pfx1-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:00.002Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx1-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:00.001Z" } }
|
||||
/* eslint-enable max-len */
|
||||
},
|
||||
next => {
|
||||
const objName = 'pfx2-test-object';
|
||||
const objVal = {
|
||||
key: 'pfx2-test-object',
|
||||
versionId: 'null',
|
||||
dataStoreName: location2,
|
||||
};
|
||||
const nbVersions = 5;
|
||||
const timestamp = 2000;
|
||||
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
|
||||
nbVersions, timestamp, logger, (err, data) => {
|
||||
expectedVersionIds[objName] = data.lastVersionId;
|
||||
return next(err);
|
||||
});
|
||||
/* eslint-disable max-len */
|
||||
// The following versions are created:
|
||||
// { "_id" : "Mpfx2-test-object", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}id4", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}id3", "value" : { "key" : "pfx2-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:02.004Z" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}id2", "value" : { "key" : "pfx2-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:02.003Z" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}id1", "value" : { "key" : "pfx2-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:02.002Z" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx2-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:02.001Z" } }
|
||||
/* eslint-enable max-len */
|
||||
},
|
||||
next => {
|
||||
const objName = 'pfx3-test-object';
|
||||
const objVal = {
|
||||
key: 'pfx3-test-object',
|
||||
versionId: 'null',
|
||||
dataStoreName: location1,
|
||||
};
|
||||
const nbVersions = 5;
|
||||
const timestamp = 1000;
|
||||
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
|
||||
nbVersions, timestamp, logger, (err, data) => {
|
||||
expectedVersionIds[objName] = data.lastVersionId;
|
||||
return next(err);
|
||||
});
|
||||
/* eslint-disable max-len */
|
||||
// The following versions are created:
|
||||
// { "_id" : "Mpfx3-test-object", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}id4", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}id3", "value" : { "key" : "pfx3-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:01.004Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}id2", "value" : { "key" : "pfx3-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:01.003Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}id1", "value" : { "key" : "pfx3-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:01.002Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}id0", "value" : { "key" : "pfx3-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:01.001Z" } }
|
||||
/* eslint-enable max-len */
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it('Should list current versions of objects', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list current versions of objects excluding keys stored in location2', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
excludedDataStoreName: location2,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list current versions of objects excluding keys stored in location1', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
excludedDataStoreName: location1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list current versions of objects with prefix and excluding keys stored in location2', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
excludedDataStoreName: location2,
|
||||
prefix: 'pfx3',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return trucated list of current versions excluding keys stored in location2', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
excludedDataStoreName: location2,
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
params.marker = 'pfx1-test-object';
|
||||
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return empty list when beforeDate is before the objects creation date', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:00.000Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 0);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the current version modified before 1970-01-01T00:00:00.010Z', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:00.10Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the current versions modified before 1970-01-01T00:00:01.010Z', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:01.010Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the current versions modified before 1970-01-01T00:00:02.010Z', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:02.010Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should truncate the list of current versions modified before 1970-01-01T00:00:01.010Z', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:01.010Z',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
params.marker = 'pfx1-test-object';
|
||||
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Should truncate list of current versions of objects', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
maxKeys: 2,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.NextMarker, 'pfx2-test-object');
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list the following current versions of objects', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
marker: 'pfx2-test-object',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list current versions that start with prefix', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
prefix: 'pfx2',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the list of current versions modified before 1970-01-01T00:00:01.010Z with prefix pfx1',
|
||||
done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
beforeDate: '1970-01-01T00:00:01.010Z',
|
||||
maxKeys: 1,
|
||||
prefix: 'pfx1',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should not list deleted version', done => {
|
||||
const objVal = {
|
||||
'key': 'pfx4-test-object',
|
||||
'last-modified': new Date(0).toISOString(),
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
};
|
||||
let deletedVersionId;
|
||||
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
deletedVersionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
next => metadata.deleteObjectMD(BUCKET_NAME, objVal.key,
|
||||
{ versionId: deletedVersionId }, logger, next),
|
||||
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should not list object with delete marker', done => {
|
||||
const objVal = {
|
||||
'key': 'pfx4-test-object',
|
||||
'last-modified': new Date(0).toISOString(),
|
||||
};
|
||||
|
||||
const dmObjVal = { ...objVal, isDeleteMarker: true };
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
};
|
||||
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams, logger, next),
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, dmObjVal, versionParams, logger, next),
|
||||
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should not list phd master key when listing current versions', done => {
|
||||
const objVal = {
|
||||
'key': 'pfx4-test-object',
|
||||
'versionId': 'null',
|
||||
'last-modified': new Date(0).toISOString(),
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
prefix: 'pfx4',
|
||||
};
|
||||
let versionId;
|
||||
let lastVersionId;
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
versionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
lastVersionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx4-test-object', { versionId: lastVersionId },
|
||||
logger, next),
|
||||
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should not list the current version tagged for deletion', done => {
|
||||
const objVal = {
|
||||
'key': 'pfx4-test-object',
|
||||
'last-modified': new Date(0).toISOString(),
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
|
||||
logger, next),
|
||||
next => flagObjectForDeletion(collection, objVal.key, next),
|
||||
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const expected = [
|
||||
{
|
||||
key: 'pfx1-test-object',
|
||||
LastModified: '1970-01-01T00:00:00.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx1-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx2-test-object',
|
||||
LastModified: '1970-01-01T00:00:02.005Z',
|
||||
dataStoreName: location2,
|
||||
VersionId: expectedVersionIds['pfx2-test-object'],
|
||||
},
|
||||
{
|
||||
key: 'pfx3-test-object',
|
||||
LastModified: '1970-01-01T00:00:01.005Z',
|
||||
dataStoreName: location1,
|
||||
VersionId: expectedVersionIds['pfx3-test-object'],
|
||||
},
|
||||
];
|
||||
assertContents(data.Contents, expected);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,215 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const MetadataWrapper =
|
||||
require('../../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { versioning } = require('../../../../../index');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const { makeBucketMD } = require('./utils');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
|
||||
const BUCKET_NAME = 'test-lifecycle-list-bucket-null';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27020 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
describe('MongoClientInterface::metadata.listLifecycleObject::nullVersion', () => {
|
||||
let metadata;
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27020',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
|
||||
describe(`bucket format version: ${v}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = makeBucketMD(BUCKET_NAME);
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
metadata.client.defaultBucketKeyFormat = v;
|
||||
async.series([
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
|
||||
next => {
|
||||
const objName = 'key0';
|
||||
const timestamp = 0;
|
||||
|
||||
const lastModified = new Date(timestamp).toISOString();
|
||||
const objVal = {
|
||||
'key': objName,
|
||||
'versionId': 'null',
|
||||
'isNull': true,
|
||||
'last-modified': lastModified,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
const objName = 'key1';
|
||||
const timestamp = 0;
|
||||
|
||||
const lastModified = new Date(timestamp).toISOString();
|
||||
const objVal = {
|
||||
'key': objName,
|
||||
'versionId': 'null',
|
||||
'isNull': true,
|
||||
'last-modified': lastModified,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
const objName = 'key1';
|
||||
const timestamp = 0;
|
||||
|
||||
const lastModified = new Date(timestamp).toISOString();
|
||||
const objVal = {
|
||||
'key': objName,
|
||||
'last-modified': lastModified,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
|
||||
},
|
||||
// key2 simulates a scenario where:
|
||||
// 1) bucket is versioned
|
||||
// 2) put object key2
|
||||
// 3) bucket versioning gets suspended
|
||||
// 4) put object key2
|
||||
// result:
|
||||
// {
|
||||
// "_id" : "Mkey0",
|
||||
// "value" : {
|
||||
// "key" : "key2",
|
||||
// "isNull" : true,
|
||||
// "versionId" : "<VersionId2>",
|
||||
// "last-modified" : "2023-07-11T14:16:00.151Z",
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "_id" : "Vkey0\u0000<VersionId1>",
|
||||
// "value" : {
|
||||
// "key" : "key2",
|
||||
// "versionId" : "<VersionId1>",
|
||||
// "tags" : {
|
||||
// },
|
||||
// "last-modified" : "2023-07-11T14:15:36.713Z",
|
||||
// }
|
||||
// },
|
||||
next => {
|
||||
const objName = 'key2';
|
||||
const timestamp = 0;
|
||||
|
||||
const lastModified = new Date(timestamp).toISOString();
|
||||
const objVal = {
|
||||
'key': objName,
|
||||
'last-modified': lastModified,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
|
||||
},
|
||||
next => {
|
||||
const objName = 'key2';
|
||||
const timestamp = 0;
|
||||
const params = {
|
||||
versionId: '',
|
||||
};
|
||||
|
||||
const lastModified = new Date(timestamp).toISOString();
|
||||
const objVal = {
|
||||
'key': objName,
|
||||
'last-modified': lastModified,
|
||||
'isNull': true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, params, logger, next);
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => metadata.deleteBucket(BUCKET_NAME, logger, done));
|
||||
|
||||
it('Should list the null current version and set IsNull to true', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterCurrent',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
|
||||
// check that key0 has a null current version
|
||||
const firstKey = data.Contents[0];
|
||||
assert.strictEqual(firstKey.key, 'key0');
|
||||
assert.strictEqual(firstKey.value.IsNull, true);
|
||||
|
||||
// check that key1 has no null current version
|
||||
const secondKey = data.Contents[1];
|
||||
assert.strictEqual(secondKey.key, 'key1');
|
||||
assert(!secondKey.value.IsNull);
|
||||
|
||||
// check that key2 has a null current version
|
||||
const thirdKey = data.Contents[2];
|
||||
assert.strictEqual(thirdKey.key, 'key2');
|
||||
assert.strictEqual(thirdKey.value.IsNull, true);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list the null non-current version and set IsNull to true', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterNonCurrent',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
|
||||
// check that key1 has a null non-current version
|
||||
const firstKey = data.Contents[0];
|
||||
assert.strictEqual(firstKey.key, 'key1');
|
||||
assert.strictEqual(firstKey.value.IsNull, true);
|
||||
|
||||
// check that key2 has no null non-current version
|
||||
const secondKey = data.Contents[1];
|
||||
assert.strictEqual(secondKey.key, 'key2');
|
||||
assert(!secondKey.value.IsNull);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,455 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const MetadataWrapper =
|
||||
require('../../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { versioning } = require('../../../../../index');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const { makeBucketMD, putBulkObjectVersions } = require('./utils');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-lifecycle-list-orphan-bucket';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27020 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
describe('MongoClientInterface::metadata.listLifecycleObject::orphan', () => {
|
||||
let metadata;
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27020',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
|
||||
describe(`bucket format version: ${v}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = makeBucketMD(BUCKET_NAME);
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
metadata.client.defaultBucketKeyFormat = v;
|
||||
async.series([
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
|
||||
next => {
|
||||
const keyName = 'pfx0-test-object';
|
||||
|
||||
const objVal = {
|
||||
'key': keyName,
|
||||
'isDeleteMarker': true,
|
||||
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
|
||||
},
|
||||
next => {
|
||||
const params = {
|
||||
objName: 'pfx1-test-object',
|
||||
objVal: {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
nbVersions: 1,
|
||||
};
|
||||
const timestamp = 0;
|
||||
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
|
||||
params.nbVersions, timestamp, logger, next);
|
||||
},
|
||||
next => {
|
||||
const params = {
|
||||
objName: 'pfx2-test-object',
|
||||
objVal: {
|
||||
key: 'pfx2-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
nbVersions: 1,
|
||||
};
|
||||
const timestamp = 0;
|
||||
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
|
||||
params.nbVersions, timestamp, logger, next);
|
||||
},
|
||||
next => {
|
||||
const keyName = 'pfx2-test-object';
|
||||
|
||||
const objVal = {
|
||||
'key': keyName,
|
||||
'isDeleteMarker': true,
|
||||
'last-modified': new Date(2).toISOString(), // 1970-01-01T00:00:00.002Z
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
|
||||
},
|
||||
next => {
|
||||
const keyName = 'pfx3-test-object';
|
||||
|
||||
const objVal = {
|
||||
'key': keyName,
|
||||
'isDeleteMarker': true,
|
||||
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
|
||||
},
|
||||
next => {
|
||||
const keyName = 'pfx4-test-object';
|
||||
|
||||
const objVal = {
|
||||
'key': keyName,
|
||||
'isDeleteMarker': true,
|
||||
'last-modified': new Date(5).toISOString(), // 1970-01-01T00:00:00.005Z
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
|
||||
},
|
||||
next => {
|
||||
const keyName = 'pfx4-test-object2';
|
||||
|
||||
const objVal = {
|
||||
'key': keyName,
|
||||
'isDeleteMarker': true,
|
||||
'last-modified': new Date(6).toISOString(), // 1970-01-01T00:00:00.006Z
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
};
|
||||
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
|
||||
},
|
||||
], done);
|
||||
});
|
||||
/* eslint-disable max-len */
|
||||
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
|
||||
// { "_id" : "Vpfx0-test-object{sep}v0", "value" : { "key" : "pfx0-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v0" } }
|
||||
// { "_id" : "Vpfx1-test-object{sep}v1", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}v3", "value" : { "key" : "pfx2-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.002Z", "versionId" : "v3" } }
|
||||
// { "_id" : "Vpfx2-test-object{sep}v2", "value" : { "key" : "pfx2-test-object", "versionId" : "v2", "last-modified" : "1970-01-01T00:00:00.001Z" } }
|
||||
// { "_id" : "Vpfx3-test-object{sep}v4", "value" : { "key" : "pfx3-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v4" } }
|
||||
// { "_id" : "Vpfx4-test-object{sep}v5", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.005Z", "versionId" : "v5" } }
|
||||
// { "_id" : "Vpfx4-test-object2{sep}v6", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.006Z", "versionId" : "v6" } }
|
||||
/* eslint-enable max-len */
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it('Should list orphan delete markers', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 4);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
|
||||
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return empty list when beforeDate is before youngest last-modified', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
beforeDate: '1970-01-01T00:00:00.000Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 0);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list orphan delete markers older than 1970-01-01T00:00:00.003Z', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
beforeDate: '1970-01-01T00:00:00.003Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the first part of the orphan delete markers listing', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.NextMarker, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the second part of the orphan delete markers listing', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
marker: 'pfx0-test-object',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx3-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the third part of the orphan delete markers listing', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
marker: 'pfx3-test-object',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the fourth part of the orphan delete markers listing', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
marker: 'pfx4-test-object',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list the two first orphan delete markers', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 2,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list the four first orphan delete markers', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 4,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 4);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
|
||||
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return an empty list if no orphan delete marker starts with prefix pfx2', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
prefix: 'pfx2',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 0);
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should list orphan delete markers that start with prefix pfx4', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
prefix: 'pfx4',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx4-test-object2');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the first orphan delete marker version that starts with prefix', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
prefix: 'pfx4',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the following orphan delete marker version that starts with prefix', done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
marker: 'pfx4-test-object',
|
||||
prefix: 'pfx4',
|
||||
maxKeys: 1,
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert(!data.NextMarker);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
|
||||
done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 2,
|
||||
beforeDate: '1970-01-01T00:00:00.006Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the following list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
|
||||
done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 2,
|
||||
beforeDate: '1970-01-01T00:00:00.006Z',
|
||||
marker: 'pfx3-test-object',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, false);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.001Z',
|
||||
done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterOrphanDeleteMarker',
|
||||
maxKeys: 2,
|
||||
beforeDate: '1970-01-01T00:00:00.001Z',
|
||||
};
|
||||
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.IsTruncated, true);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
|
||||
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
|
||||
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,104 @@
|
|||
const async = require('async');
|
||||
const BucketInfo = require('../../../../../lib/models/BucketInfo').default;
|
||||
const assert = require('assert');
|
||||
|
||||
/**
|
||||
* Puts multpile versions of an object
|
||||
* @param {Object} metadata - metadata client
|
||||
* @param {String} bucketName - bucket name
|
||||
* @param {String} objName - object key
|
||||
* @param {Object} objVal - object metadata
|
||||
* @param {Object} params - versioning parameters
|
||||
* @param {number} versionNb - number of versions to put
|
||||
* @param {number} timestamp - used for last-modified
|
||||
* @param {Object} logger - a Logger instance
|
||||
* @param {Function} cb - callback
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function putBulkObjectVersions(metadata, bucketName, objName, objVal, params, versionNb, timestamp, logger, cb) {
|
||||
let count = 0;
|
||||
const versionIds = [];
|
||||
return async.whilst(
|
||||
() => count < versionNb,
|
||||
cbIterator => {
|
||||
count++;
|
||||
const lastModified = new Date(timestamp + count).toISOString();
|
||||
const finalObjectVal = Object.assign(objVal, { 'last-modified': lastModified });
|
||||
return metadata.putObjectMD(bucketName, objName, finalObjectVal, params, logger, (err, data) => {
|
||||
versionIds.push(JSON.parse(data).versionId);
|
||||
return cbIterator(err, versionIds);
|
||||
});
|
||||
}, (err, expectedVersionIds) => {
|
||||
// The last version is removed since it represents the current version.
|
||||
const lastVersionId = expectedVersionIds.pop();
|
||||
// array is reversed to be alligned with the version order (latest to oldest).
|
||||
expectedVersionIds.reverse();
|
||||
return cb(err, { lastVersionId, expectedVersionIds });
|
||||
});
|
||||
}
|
||||
|
||||
function makeBucketMD(bucketName) {
|
||||
return BucketInfo.fromObj({
|
||||
_name: bucketName,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
}
|
||||
|
||||
function assertContents(contents, expected) {
|
||||
assert.strictEqual(contents.length, expected.length);
|
||||
contents.forEach((c, i) => {
|
||||
assert.strictEqual(c.key, expected[i].key);
|
||||
assert.strictEqual(c.value.LastModified, expected[i].LastModified);
|
||||
assert.strictEqual(c.value.staleDate, expected[i].staleDate);
|
||||
assert.strictEqual(c.value.dataStoreName, expected[i].dataStoreName);
|
||||
if (expected[i].VersionId) {
|
||||
assert.strictEqual(c.value.VersionId, expected[i].VersionId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the "deleted" property to true
|
||||
* @param {Object} collection - collection to be updated
|
||||
* @param {string} key - object name
|
||||
* @param {Function} cb - callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
function flagObjectForDeletion(collection, key, cb) {
|
||||
collection.updateMany(
|
||||
{ 'value.key': key },
|
||||
{ $set: { 'value.deleted': true } },
|
||||
{ upsert: false })
|
||||
.then(() => cb())
|
||||
.catch(err => cb(err));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
putBulkObjectVersions,
|
||||
makeBucketMD,
|
||||
assertContents,
|
||||
flagObjectForDeletion,
|
||||
};
|
|
@ -0,0 +1,572 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { versioning } = require('../../../../index');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
const sinon = require('sinon');
|
||||
const MongoReadStream = require('../../../../lib/storage/metadata/mongoclient/readStream');
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-bucket';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27020 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
|
||||
];
|
||||
|
||||
describe('MongoClientInterface::metadata.listObject', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
|
||||
/**
|
||||
* Puts multpile versions of an object
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {String} objName object key
|
||||
* @param {Object} objVal object metadata
|
||||
* @param {Object} params versioning parameters
|
||||
* @param {number} versionNb number of versions to put
|
||||
* @param {Function} cb callback
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function putBulkObjectVersions(bucketName, objName, objVal, params, versionNb, cb) {
|
||||
let count = 0;
|
||||
async.whilst(
|
||||
() => count < versionNb,
|
||||
cbIterator => {
|
||||
count++;
|
||||
// eslint-disable-next-line
|
||||
return metadata.putObjectMD(bucketName, objName, objVal, params,
|
||||
logger, cbIterator);
|
||||
}, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the "deleted" property to true
|
||||
* @param {string} key object name
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
function flagObjectForDeletion(key, cb) {
|
||||
collection.updateMany(
|
||||
{ 'value.key': key },
|
||||
{ $set: { 'value.deleted': true } },
|
||||
{ upsert: false }).then(() => cb()).catch(err => cb(err));
|
||||
}
|
||||
|
||||
function customListingParser(entries) {
|
||||
return entries.map(entry => {
|
||||
const tmp = JSON.parse(entry.value);
|
||||
return tmp;
|
||||
});
|
||||
}
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27020',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
describe(`vFormat : ${variation.vFormat}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
const params = {
|
||||
objName: 'pfx1-test-object',
|
||||
objVal: {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
location: [{
|
||||
start: 0,
|
||||
size: 150,
|
||||
dataStoreETag: 'etag',
|
||||
dataStoreVersionId: 'versionId',
|
||||
}],
|
||||
},
|
||||
nbVersions: 5,
|
||||
};
|
||||
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
|
||||
params.nbVersions, next);
|
||||
},
|
||||
next => {
|
||||
const params = {
|
||||
objName: 'pfx2-test-object',
|
||||
objVal: {
|
||||
key: 'pfx2-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
nbVersions: 5,
|
||||
};
|
||||
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
|
||||
params.nbVersions, next);
|
||||
},
|
||||
next => {
|
||||
const params = {
|
||||
objName: 'pfx3-test-object',
|
||||
objVal: {
|
||||
key: 'pfx3-test-object',
|
||||
versionId: 'null',
|
||||
},
|
||||
nbVersions: 5,
|
||||
};
|
||||
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
|
||||
params.nbVersions, next);
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it(`Should list master versions of objects ${variation.it}`, done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
|
||||
assert.strictEqual(data.Contents[2].key, 'pfx3-test-object');
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 2,
|
||||
};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Contents.length, 2);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
|
||||
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should list master versions of objects that start with prefix ${variation.it}`, done => {
|
||||
const bucketName = BUCKET_NAME;
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
prefix: 'pfx2',
|
||||
};
|
||||
return metadata.listObject(bucketName, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Contents.length, 1);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx2-test-object');
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should return empty results when bucket non existent (master) ${variation.it}`, done => {
|
||||
const bucketName = 'non-existent-bucket';
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return metadata.listObject(bucketName, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert(data);
|
||||
assert.strictEqual(data.Contents.length, 0);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should list all versions of objects ${variation.it}`, done => {
|
||||
const bucketName = BUCKET_NAME;
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
maxKeys: 1000,
|
||||
};
|
||||
const versionsPerKey = {};
|
||||
return metadata.listObject(bucketName, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Versions.length, 15);
|
||||
data.Versions.forEach(version => {
|
||||
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
|
||||
});
|
||||
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
|
||||
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
|
||||
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
maxKeys: 5,
|
||||
};
|
||||
const versionsPerKey = {};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Versions.length, 5);
|
||||
data.Versions.forEach(version => {
|
||||
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
|
||||
});
|
||||
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should list versions of objects that start with prefix ${variation.it}`, done => {
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
maxKeys: 100,
|
||||
prefix: 'pfx2',
|
||||
};
|
||||
const versionsPerKey = {};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(data.Versions.length, 5);
|
||||
data.Versions.forEach(version => {
|
||||
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
|
||||
});
|
||||
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should return empty results when bucket not existing (version) ${variation.it}`, done => {
|
||||
const bucketName = 'non-existent-bucket';
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return metadata.listObject(bucketName, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert(data);
|
||||
assert.strictEqual(data.Versions.length, 0);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should check entire list with pagination (version) ${variation.it}`, done => {
|
||||
const versionsPerKey = {};
|
||||
const bucketName = BUCKET_NAME;
|
||||
const get = (maxKeys, keyMarker, versionIdMarker, cb) => metadata.listObject(bucketName, {
|
||||
listingType: 'DelimiterVersions',
|
||||
maxKeys,
|
||||
keyMarker,
|
||||
versionIdMarker,
|
||||
}, logger, (err, res) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
res.Versions.forEach(version => {
|
||||
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
|
||||
});
|
||||
if (res.IsTruncated) {
|
||||
return get(maxKeys, res.NextKeyMarker, res.NextVersionIdMarker, cb);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
return get(3, null, null, err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(Object.keys(versionsPerKey).length, 3);
|
||||
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
|
||||
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
|
||||
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it(`Should not list phd master key when listing masters ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
prefix: 'pfx1',
|
||||
};
|
||||
let versionId;
|
||||
let lastVersionId;
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
versionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
lastVersionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
// when deleting the last version of an object a PHD master is created
|
||||
// and kept for 15s before it's repaired
|
||||
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
|
||||
logger, next),
|
||||
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should not list phd master key when listing versions ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: 'pfx1-test-object',
|
||||
versionId: 'null',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
prefix: 'pfx1',
|
||||
};
|
||||
let lastVersionId;
|
||||
let versionIds;
|
||||
async.series([
|
||||
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Versions.length, 5);
|
||||
versionIds = data.Versions.map(version => version.VersionId);
|
||||
return next();
|
||||
}),
|
||||
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
|
||||
logger, (err, res) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
lastVersionId = JSON.parse(res).versionId;
|
||||
return next(null);
|
||||
}),
|
||||
// when deleting the last version of an object a PHD master is created
|
||||
// and kept for 15s before it's repaired
|
||||
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
|
||||
logger, next),
|
||||
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
const newVersionIds = data.Versions.map(version => version.VersionId);
|
||||
assert.strictEqual(data.Versions.length, 5);
|
||||
assert(versionIds.every(version => newVersionIds.includes(version)));
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should not list objects tagged for deletion (master keys)', done => {
|
||||
const objVal = {
|
||||
key: 'pfx4-test-object',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
|
||||
logger, next),
|
||||
next => flagObjectForDeletion(objVal.key, next),
|
||||
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
const listedObjectNames = data.Contents.map(x => x.key);
|
||||
assert(!listedObjectNames.includes(objVal.key));
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should not list objects tagged for deletion (version keys)', done => {
|
||||
const objVal = {
|
||||
key: 'pfx4-test-object',
|
||||
};
|
||||
const versionParams = {
|
||||
versioning: true,
|
||||
};
|
||||
const params = {
|
||||
listingType: 'DelimiterVersions',
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
|
||||
logger, next),
|
||||
next => flagObjectForDeletion(objVal.key, next),
|
||||
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Versions.length, 15);
|
||||
const listedObjectNames = data.Versions.map(x => x.key);
|
||||
assert(!listedObjectNames.includes(objVal.key));
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('Should properly destroy the MongoDBReadStream', done => {
|
||||
// eslint-disable-next-line func-names
|
||||
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
|
||||
// You can add extra logic here if needed
|
||||
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
|
||||
});
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, err => {
|
||||
assert.ifError(err);
|
||||
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
|
||||
// Restore original destroy method
|
||||
destroyStub.restore();
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
it('Should properly destroy the MongoDBReadStream on error', done => {
|
||||
// eslint-disable-next-line func-names
|
||||
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
|
||||
// You can add extra logic here if needed
|
||||
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
|
||||
});
|
||||
// stub the cursor creation to emit an error
|
||||
// eslint-disable-next-line func-names
|
||||
const readStub = sinon.stub(MongoReadStream.prototype, '_read').callsFake(function () {
|
||||
this.emit('error', new Error('error'));
|
||||
});
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return metadata.listObject(BUCKET_NAME, params, logger, err => {
|
||||
assert(err, 'Expected an error');
|
||||
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
|
||||
destroyStub.restore();
|
||||
readStub.restore();
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should not include location in listing result and use custom listing parser', done => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27020',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
customListingParser,
|
||||
};
|
||||
|
||||
const parserSpy = sinon.spy(opts, 'customListingParser');
|
||||
|
||||
const md = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
md.setup(() => {
|
||||
const params = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: 100,
|
||||
};
|
||||
return md.listObject(BUCKET_NAME, params, logger, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Contents.length, 3);
|
||||
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
|
||||
assert.strictEqual(data.Contents[0].location, undefined);
|
||||
assert(parserSpy.called);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,462 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const { errors, versioning } = require('../../../../index');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
|
||||
const IMPL_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'test-bucket';
|
||||
const OBJECT_NAME = 'test-object';
|
||||
const VERSION_ID = '98451712418844999999RG001 22019.0';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27021 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
|
||||
];
|
||||
|
||||
describe('MongoClientInterface:metadata.putObjectMD', () => {
|
||||
let metadata;
|
||||
let collection;
|
||||
|
||||
function getObject(key, cb) {
|
||||
collection.findOne({
|
||||
_id: key,
|
||||
}, {}).then(doc => {
|
||||
if (!doc) {
|
||||
return cb(errors.NoSuchKey);
|
||||
}
|
||||
return cb(null, doc.value);
|
||||
}).catch(err => cb(err));
|
||||
}
|
||||
|
||||
function getObjectCount(cb) {
|
||||
collection.countDocuments()
|
||||
.then(count => cb(null, count))
|
||||
.catch(err => cb(err));
|
||||
}
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27021',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
|
||||
describe(`vFormat : ${variation.vFormat}`, () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
collection = metadata.client.getCollection(BUCKET_NAME);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
it(`Should put a new non versionned object ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: 'null',
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: null,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// When versionning not active only one document is created (master)
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 1);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should update the metadata ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: 'null',
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: null,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
next => {
|
||||
objVal.updated = true;
|
||||
metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// object metadata must be updated
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.strictEqual(object.updated, true);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// Only a master version should be created
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 1);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should put versionned object with the specific versionId ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: VERSION_ID,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
// checking if metadata corresponds to what was given to the function
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.strictEqual(object.versionId, VERSION_ID);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// We'll have one master and one version
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 2);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should put new version and update master ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
let versionId = null;
|
||||
|
||||
async.series([
|
||||
// We first create a master and a version
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
versionId = JSON.parse(data).versionId;
|
||||
return next();
|
||||
}),
|
||||
// We put another version of the object
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
// Master must be updated
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.notStrictEqual(object.versionId, versionId);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// we'll have two versions and one master
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 3);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should update master when versionning is disabled ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: null,
|
||||
repairMaster: null,
|
||||
};
|
||||
let versionId = null;
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
versionId = JSON.parse(data).versionId;
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
// Disabling versionning and putting new version
|
||||
params.versioning = false;
|
||||
params.versionId = '';
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// Master must be updated
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.notStrictEqual(object.versionId, versionId);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// The second put shouldn't create a new version
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 2);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
it(`Should update latest version and repair master ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: VERSION_ID,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
next => {
|
||||
// Updating the version and repairing master
|
||||
params.repairMaster = true;
|
||||
objVal.updated = true;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// Master must be updated
|
||||
next => {
|
||||
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
|
||||
getObject(key, (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.strictEqual(object.versionId, VERSION_ID);
|
||||
assert.strictEqual(object.updated, true);
|
||||
return next();
|
||||
});
|
||||
},
|
||||
// The second put shouldn't create a new version
|
||||
next => getObjectCount((err, count) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(count, 2);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should delete master when last version is delete marker ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
isDeleteMarker: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: VERSION_ID,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
objVal.isDeleteMarker = true;
|
||||
params.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// master must be deleted
|
||||
next => getObject('\x7fMtest-object', err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should create master when new version is put on top of delete marker ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
isDeleteMarker: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
versionId: VERSION_ID,
|
||||
repairMaster: null,
|
||||
};
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
objVal.isDeleteMarker = true;
|
||||
params.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// We put a new version on top of delete marker
|
||||
next => {
|
||||
objVal.isDeleteMarker = false;
|
||||
objVal.updated = true;
|
||||
objVal.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// master must be created
|
||||
next => getObject('\x7fMtest-object', (err, object) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(object.key, OBJECT_NAME);
|
||||
assert.strictEqual(object.updated, true);
|
||||
assert.strictEqual(object.isDeleteMarker, false);
|
||||
assert.notEqual(object.versionId, VERSION_ID);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
|
||||
itOnlyInV1(`Should not create master when previous version is updated ${variation.it}`, done => {
|
||||
const objVal = {
|
||||
key: OBJECT_NAME,
|
||||
versionId: VERSION_ID,
|
||||
updated: false,
|
||||
isDeleteMarker: false,
|
||||
};
|
||||
const params = {
|
||||
versioning: true,
|
||||
repairMaster: null,
|
||||
versionId: VERSION_ID,
|
||||
};
|
||||
async.series([
|
||||
// We first create a new version and master
|
||||
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
|
||||
// putting a delete marker as last version
|
||||
next => {
|
||||
objVal.isDeleteMarker = true;
|
||||
params.versionId = null;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
// update previous version
|
||||
next => {
|
||||
objVal.isDeleteMarker = false;
|
||||
objVal.updated = true;
|
||||
params.versionId = VERSION_ID;
|
||||
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
|
||||
},
|
||||
next => getObject('\x7fMtest-object', err => {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,336 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const werelogs = require('werelogs');
|
||||
const { MongoMemoryReplSet } = require('mongodb-memory-server');
|
||||
const { errors, versioning } = require('../../../../index');
|
||||
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
|
||||
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
|
||||
const MetadataWrapper =
|
||||
require('../../../../lib/storage/metadata/MetadataWrapper');
|
||||
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
|
||||
|
||||
const IMP_NAME = 'mongodb';
|
||||
const DB_NAME = 'metadata';
|
||||
const BUCKET_NAME = 'testbucket';
|
||||
|
||||
const mongoserver = new MongoMemoryReplSet({
|
||||
debug: false,
|
||||
instanceOpts: [
|
||||
{ port: 27022 },
|
||||
],
|
||||
replSet: {
|
||||
name: 'rs0',
|
||||
count: 1,
|
||||
DB_NAME,
|
||||
storageEngine: 'ephemeralForTest',
|
||||
},
|
||||
});
|
||||
|
||||
describe('MongoClientInterface:withCond', () => {
|
||||
let metadata;
|
||||
|
||||
const variations = [
|
||||
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
|
||||
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
|
||||
];
|
||||
|
||||
beforeAll(done => {
|
||||
mongoserver.start().then(() => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
replicaSetHosts: 'localhost:27022',
|
||||
writeConcern: 'majority',
|
||||
replicaSet: 'rs0',
|
||||
readPreference: 'primary',
|
||||
database: DB_NAME,
|
||||
},
|
||||
};
|
||||
metadata = new MetadataWrapper(IMP_NAME, opts, null, logger);
|
||||
metadata.setup(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
.then(() => next())
|
||||
.catch(next),
|
||||
], done);
|
||||
});
|
||||
|
||||
variations.forEach(variation => {
|
||||
describe('::putObjectWithCond', () => {
|
||||
beforeEach(done => {
|
||||
const bucketMD = BucketInfo.fromObj({
|
||||
_name: BUCKET_NAME,
|
||||
_owner: 'testowner',
|
||||
_ownerDisplayName: 'testdisplayname',
|
||||
_creationDate: new Date().toJSON(),
|
||||
_acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
_mdBucketModelVersion: 10,
|
||||
_transient: false,
|
||||
_deleted: false,
|
||||
_serverSideEncryption: null,
|
||||
_versioningConfiguration: null,
|
||||
_locationConstraint: 'us-east-1',
|
||||
_readLocationConstraint: null,
|
||||
_cors: null,
|
||||
_replicationConfiguration: null,
|
||||
_lifecycleConfiguration: null,
|
||||
_uid: '',
|
||||
_isNFS: null,
|
||||
ingestion: null,
|
||||
});
|
||||
async.series([
|
||||
next => {
|
||||
metadata.client.defaultBucketKeyFormat = variation.vFormat;
|
||||
return next();
|
||||
},
|
||||
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
|
||||
], done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
const tests = [
|
||||
[
|
||||
`should upsert object if an existing object does not exist ${variation.it}`,
|
||||
{
|
||||
initVal: null,
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: { value: { number: 24 } },
|
||||
expectedVal: { value: { number: 42, string: 'forty-two' } },
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should not update an existing object if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: { value: { number: 24 } },
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.InternalError,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should not update an existing object if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: { value: { string: { $eq: 'twenty-four' } } },
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.InternalError,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should not update an existing object if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: {
|
||||
value: {
|
||||
string: { $eq: 'twenty-four' },
|
||||
number: { $eq: 0 },
|
||||
},
|
||||
},
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.InternalError,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should update an existing object if the conditions passes ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: { value: { number: 24 } },
|
||||
expectedVal: { value: { number: 42, string: 'forty-two' } },
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should update an existing object if the conditions passes ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: { value: { string: { $eq: 'twenty-four' } } },
|
||||
expectedVal: { value: { number: 42, string: 'forty-two' } },
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should update an existing object if the conditions passes ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
upsertVal: { value: { number: 42, string: 'forty-two' } },
|
||||
conditions: {
|
||||
value: {
|
||||
string: { $eq: 'twenty-four' },
|
||||
number: { $eq: 24 },
|
||||
},
|
||||
},
|
||||
expectedVal: { value: { number: 42, string: 'forty-two' } },
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
];
|
||||
tests.forEach(([msg, testCase]) => it(msg, done => {
|
||||
const objectKey = 'testkey';
|
||||
const {
|
||||
initVal, upsertVal, conditions, expectedVal, error,
|
||||
} = testCase;
|
||||
const params = { conditions };
|
||||
async.series([
|
||||
next => {
|
||||
if (!initVal) {
|
||||
return next();
|
||||
}
|
||||
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
|
||||
{}, logger, next);
|
||||
},
|
||||
next => metadata.putObjectWithCond(BUCKET_NAME, objectKey,
|
||||
upsertVal, params, logger, err => {
|
||||
if (error) {
|
||||
assert.deepStrictEqual(err, error);
|
||||
return next();
|
||||
}
|
||||
assert(!err);
|
||||
return next();
|
||||
}),
|
||||
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
|
||||
(err, res) => {
|
||||
assert(!err);
|
||||
assert.deepStrictEqual(res, expectedVal);
|
||||
next();
|
||||
}),
|
||||
], done);
|
||||
}));
|
||||
});
|
||||
|
||||
describe('::deleteObjectWithCond', () => {
|
||||
afterEach(done => {
|
||||
metadata.deleteBucket(BUCKET_NAME, logger, done);
|
||||
});
|
||||
|
||||
const tests = [
|
||||
[
|
||||
`should return no such key if the object does not exist ${variation.it}`,
|
||||
{
|
||||
initVal: null,
|
||||
conditions: { value: { number: 24 } },
|
||||
expectedVal: null,
|
||||
error: errors.NoSuchKey,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should return no such key if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
conditions: { value: { number: { $eq: 24 } } },
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.NoSuchKey,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should return no such key if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
conditions: { value: { string: 'twenty-four' } },
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.NoSuchKey,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should return no such key if the conditions fails ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 0, string: 'zero' } },
|
||||
conditions: {
|
||||
value: {
|
||||
string: 'twenty-four',
|
||||
number: { $eq: 0 },
|
||||
},
|
||||
},
|
||||
expectedVal: { value: { number: 0, string: 'zero' } },
|
||||
error: errors.NoSuchKey,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should successfully delete matched object ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
conditions: { value: { number: 24 } },
|
||||
expectedVal: null,
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should successfully delete matched object ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
conditions: { value: { string: { $eq: 'twenty-four' } } },
|
||||
expectedVal: null,
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
[
|
||||
`should successfully delete matched object ${variation.it}`,
|
||||
{
|
||||
initVal: { value: { number: 24, string: 'twenty-four' } },
|
||||
conditions: {
|
||||
value: {
|
||||
string: { $eq: 'twenty-four' },
|
||||
number: { $eq: 24 },
|
||||
},
|
||||
},
|
||||
expectedVal: null,
|
||||
error: null,
|
||||
},
|
||||
],
|
||||
];
|
||||
tests.forEach(([msg, testCase]) => it(msg, done => {
|
||||
const objectKey = 'testkey';
|
||||
const { initVal, conditions, expectedVal, error } = testCase;
|
||||
const params = { conditions };
|
||||
async.series([
|
||||
next => {
|
||||
if (!initVal) {
|
||||
return next();
|
||||
}
|
||||
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
|
||||
{}, logger, next);
|
||||
},
|
||||
next => metadata.deleteObjectWithCond(BUCKET_NAME, objectKey,
|
||||
params, logger, err => {
|
||||
if (error) {
|
||||
assert.deepStrictEqual(err, error);
|
||||
return next();
|
||||
}
|
||||
assert(!err);
|
||||
return next();
|
||||
}),
|
||||
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
|
||||
(err, res) => {
|
||||
if (expectedVal) {
|
||||
assert.deepStrictEqual(res, expectedVal);
|
||||
} else {
|
||||
assert.deepStrictEqual(err, errors.NoSuchKey);
|
||||
}
|
||||
return next();
|
||||
}),
|
||||
], done);
|
||||
}));
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,316 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const werelogs = require('werelogs');
|
||||
const assert = require('assert');
|
||||
const async = require('async');
|
||||
|
||||
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
|
||||
const MetadataWrapper =
|
||||
require('../../../lib/storage/metadata/MetadataWrapper');
|
||||
const BucketRoutes =
|
||||
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
|
||||
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
|
||||
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
|
||||
|
||||
const routes = new BucketRoutes(metadataWrapper, logger);
|
||||
const dispatcher = new RequestDispatcher(routes);
|
||||
|
||||
const Bucket = 'test';
|
||||
const bucketInfo = {
|
||||
acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
WRITE: [],
|
||||
WRITE_ACP: [],
|
||||
READ: [],
|
||||
READ_ACP: [],
|
||||
},
|
||||
name: Bucket,
|
||||
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
|
||||
ownerDisplayName: 'anonymousCoward',
|
||||
creationDate: '2018-06-04T17:45:42.592Z',
|
||||
mdBucketModelVersion: 8,
|
||||
transient: false,
|
||||
deleted: false,
|
||||
serverSideEncryption: null,
|
||||
versioningConfiguration: null,
|
||||
locationConstraint: 'us-east-1',
|
||||
readLocationConstraint: 'us-east-1',
|
||||
cors: null,
|
||||
replicationConfiguration: null,
|
||||
lifecycleConfiguration: null,
|
||||
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
|
||||
isNFS: null,
|
||||
};
|
||||
|
||||
const objects = [
|
||||
'aaa',
|
||||
'bbb/xaa',
|
||||
'bbb/xbb',
|
||||
'bbb/xcc',
|
||||
'ccc',
|
||||
'ddd',
|
||||
];
|
||||
|
||||
function _getExpectedListing(prefix, objects) {
|
||||
const filtered = objects.map(key => {
|
||||
const deprefixed = key.slice(prefix.length);
|
||||
return deprefixed.replace(/[/].*/, '/');
|
||||
});
|
||||
const keySet = {};
|
||||
return filtered.filter(key => {
|
||||
if (keySet[key]) {
|
||||
return false;
|
||||
}
|
||||
if (key === '') {
|
||||
return false;
|
||||
}
|
||||
keySet[key] = true;
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
function _listingURL(prefix, marker) {
|
||||
const reSlash = /[/]/g;
|
||||
const escapedPrefix = prefix.replace(reSlash, '%2F');
|
||||
const escapedMarker = marker.replace(reSlash, '%2F');
|
||||
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
|
||||
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
|
||||
}
|
||||
|
||||
function _listObjects(prefix, objects, cb) {
|
||||
const keys = _getExpectedListing(prefix, objects);
|
||||
const markers = keys.slice(0);
|
||||
markers.unshift(undefined);
|
||||
const lastKey = keys[keys.length - 1];
|
||||
const listing = keys.map(key => ({
|
||||
key,
|
||||
IsTruncated: key !== lastKey,
|
||||
isPrefix: key.endsWith('/'),
|
||||
}));
|
||||
let nextMarker = '';
|
||||
async.mapSeries(listing, (obj, next) => {
|
||||
dispatcher.get(_listingURL(prefix, nextMarker),
|
||||
(err, response, body) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
if (obj.isPrefix) {
|
||||
assert.strictEqual(body.Contents.length, 0);
|
||||
assert.strictEqual(body.CommonPrefixes.length,
|
||||
1);
|
||||
assert.strictEqual(body.CommonPrefixes[0],
|
||||
prefix + obj.key);
|
||||
} else {
|
||||
assert.strictEqual(body.Contents.length, 1);
|
||||
assert.strictEqual(body.CommonPrefixes.length,
|
||||
0);
|
||||
assert.strictEqual(body.Contents[0].key,
|
||||
prefix + obj.key);
|
||||
}
|
||||
assert.strictEqual(body.IsTruncated,
|
||||
obj.IsTruncated);
|
||||
if (body.IsTruncated) {
|
||||
nextMarker = body.NextMarker;
|
||||
}
|
||||
return next();
|
||||
});
|
||||
}, err => cb(err));
|
||||
}
|
||||
|
||||
function _createObjects(objects, cb) {
|
||||
async.mapLimit(objects, 5, (key, next) => {
|
||||
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
|
||||
{ key }, next);
|
||||
}, err => {
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
function _readObjects(objects, cb) {
|
||||
async.mapLimit(objects, 5, (key, next) => {
|
||||
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
|
||||
(err, response, body) => {
|
||||
assert.deepStrictEqual(body.key, key);
|
||||
next(err);
|
||||
});
|
||||
}, err => {
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
function _deleteObjects(objects, cb) {
|
||||
async.mapLimit(objects, 5, (key, next) => {
|
||||
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
|
||||
err => next(err));
|
||||
}, err => {
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
describe('Basic Metadata Proxy Server test',
|
||||
() => {
|
||||
jest.setTimeout(10000);
|
||||
it('Shoud get the metadataInformation', done => {
|
||||
dispatcher.get('/default/metadataInformation',
|
||||
(err, response, body) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
assert.deepStrictEqual(
|
||||
body, { metadataVersion: 2 });
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Basic Metadata Proxy Server CRUD test', () => {
|
||||
jest.setTimeout(10000);
|
||||
|
||||
beforeEach(done => {
|
||||
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
|
||||
done);
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
dispatcher.delete(`/default/bucket/${Bucket}`, done);
|
||||
});
|
||||
|
||||
it('Should get the bucket attributes', done => {
|
||||
dispatcher.get(`/default/attributes/${Bucket}`,
|
||||
(err, response, body) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
assert.deepStrictEqual(body.name,
|
||||
bucketInfo.name);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Should crud an object', done => {
|
||||
async.waterfall([
|
||||
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
|
||||
{ foo: 'gabu' }, err => next(err)),
|
||||
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
|
||||
(err, response, body) => {
|
||||
if (!err) {
|
||||
assert.deepStrictEqual(body.foo,
|
||||
'gabu');
|
||||
next(err);
|
||||
}
|
||||
}),
|
||||
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
|
||||
{ foo: 'zome' }, err => next(err)),
|
||||
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
|
||||
(err, response, body) => {
|
||||
if (!err) {
|
||||
assert.deepStrictEqual(body.foo,
|
||||
'zome');
|
||||
next(err);
|
||||
}
|
||||
}),
|
||||
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
|
||||
err => next(err)),
|
||||
], err => done(err));
|
||||
});
|
||||
|
||||
it('Should list objects', done => {
|
||||
async.waterfall([
|
||||
next => _createObjects(objects, next),
|
||||
next => _readObjects(objects, next),
|
||||
next => _listObjects('', objects, next),
|
||||
next => _listObjects('bbb/', objects, next),
|
||||
next => _deleteObjects(objects, next),
|
||||
], err => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('Should update bucket properties', done => {
|
||||
dispatcher.get(
|
||||
`/default/attributes/${Bucket}`, (err, response, body) => {
|
||||
assert.strictEqual(err, null);
|
||||
const bucketInfo = body;
|
||||
const newOwnerDisplayName = 'divertedfrom';
|
||||
bucketInfo.ownerDisplayName = newOwnerDisplayName;
|
||||
dispatcher.post(
|
||||
`/default/attributes/${Bucket}`, bucketInfo, err => {
|
||||
assert.strictEqual(err, null);
|
||||
dispatcher.get(
|
||||
`/default/attributes/${Bucket}`,
|
||||
(err, response, body) => {
|
||||
assert.strictEqual(err, null);
|
||||
const newBucketInfo = body;
|
||||
assert.strictEqual(
|
||||
newBucketInfo.ownerDisplayName,
|
||||
newOwnerDisplayName);
|
||||
done(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Should fail to list a non existing bucket', done => {
|
||||
dispatcher.get('/default/bucket/nonexisting',
|
||||
(err, response) => {
|
||||
assert.strictEqual(
|
||||
response.responseHead.statusCode,
|
||||
404);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('Should fail to get attributes from a non existing bucket', done => {
|
||||
dispatcher.get('/default/attributes/nonexisting',
|
||||
(err, response) => {
|
||||
assert.strictEqual(
|
||||
response.responseHead.statusCode,
|
||||
404);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('should succeed a health check', done => {
|
||||
dispatcher.get('/_/healthcheck', (err, response, body) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
const expectedResponse = {
|
||||
memorybucket: {
|
||||
code: 200,
|
||||
message: 'OK',
|
||||
},
|
||||
};
|
||||
assert.strictEqual(response.responseHead.statusCode, 200);
|
||||
assert.deepStrictEqual(body, expectedResponse);
|
||||
return done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('should work with parallel route', done => {
|
||||
const objectName = 'theObj';
|
||||
async.waterfall([
|
||||
next => _createObjects([objectName], next),
|
||||
next => {
|
||||
dispatcher.get(
|
||||
`/default/parallel/${Bucket}/${objectName}`,
|
||||
(err, response, body) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
assert.strictEqual(response.responseHead.statusCode,
|
||||
200);
|
||||
const bucketMD = JSON.parse(body.bucket);
|
||||
const objectMD = JSON.parse(body.obj);
|
||||
const expectedObjectMD = { key: objectName };
|
||||
assert.deepStrictEqual(bucketMD.name,
|
||||
bucketInfo.name);
|
||||
assert.deepStrictEqual(objectMD, expectedObjectMD);
|
||||
return next(err);
|
||||
});
|
||||
},
|
||||
next => _deleteObjects([objectName], next),
|
||||
], done);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,318 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const assert = require('assert');
|
||||
const async = require('async');
|
||||
|
||||
const RedisClient = require('../../../lib/metrics/RedisClient').default;
|
||||
const StatsModel = require('../../../lib/metrics/StatsModel').default;
|
||||
|
||||
// setup redis client
|
||||
const config = {
|
||||
host: '127.0.0.1',
|
||||
port: 6379,
|
||||
enableOfflineQueue: true,
|
||||
};
|
||||
const fakeLogger = {
|
||||
trace: () => {},
|
||||
error: () => {},
|
||||
};
|
||||
const redisClient = new RedisClient(config, fakeLogger);
|
||||
|
||||
// setup stats model
|
||||
const STATS_INTERVAL = 300; // 5 minutes
|
||||
const STATS_EXPIRY = 86400; // 24 hours
|
||||
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
|
||||
|
||||
function setExpectedStats(expected) {
|
||||
return expected.concat(
|
||||
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
|
||||
}
|
||||
|
||||
// Since many methods were overwritten, these tests should validate the changes
|
||||
// made to the original methods
|
||||
describe('StatsModel class', () => {
|
||||
const id = 'arsenal-test';
|
||||
const id2 = 'test-2';
|
||||
const id3 = 'test-3';
|
||||
|
||||
afterEach(() => redisClient.clear(() => {}));
|
||||
|
||||
it('should convert a 2d array columns into rows and vice versa using _zip',
|
||||
() => {
|
||||
const arrays = [
|
||||
[1, 2, 3],
|
||||
[4, 5, 6],
|
||||
[7, 8, 9],
|
||||
];
|
||||
|
||||
const res = statsModel._zip(arrays);
|
||||
const expected = [
|
||||
[1, 4, 7],
|
||||
[2, 5, 8],
|
||||
[3, 6, 9],
|
||||
];
|
||||
|
||||
assert.deepStrictEqual(res, expected);
|
||||
});
|
||||
|
||||
it('_zip should return an empty array if given an invalid array', () => {
|
||||
const arrays = [];
|
||||
|
||||
const res = statsModel._zip(arrays);
|
||||
|
||||
assert.deepStrictEqual(res, []);
|
||||
});
|
||||
|
||||
it('_getCount should return a an array of all valid integer values',
|
||||
() => {
|
||||
const res = statsModel._getCount([
|
||||
[null, '1'],
|
||||
[null, '2'],
|
||||
[null, null],
|
||||
]);
|
||||
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
|
||||
});
|
||||
|
||||
it('should correctly record a new request by default one increment',
|
||||
done => {
|
||||
async.series([
|
||||
next => {
|
||||
statsModel.reportNewRequest(id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
const expected = [[null, 1], [null, 1]];
|
||||
assert.deepStrictEqual(res, expected);
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.reportNewRequest(id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
const expected = [[null, 2], [null, 1]];
|
||||
assert.deepStrictEqual(res, expected);
|
||||
next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should record new requests by defined amount increments', done => {
|
||||
function noop() {}
|
||||
|
||||
async.series([
|
||||
next => {
|
||||
statsModel.reportNewRequest(id, 9);
|
||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.reportNewRequest(id);
|
||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([10]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.reportNewRequest(id, noop);
|
||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([11]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should correctly record a 500 on the server', done => {
|
||||
statsModel.report500(id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
const expected = [[null, 1], [null, 1]];
|
||||
assert.deepStrictEqual(res, expected);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should respond back with total requests as an array', done => {
|
||||
async.series([
|
||||
next => {
|
||||
statsModel.reportNewRequest(id, err => {
|
||||
assert.ifError(err);
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.report500(id, err => {
|
||||
assert.ifError(err);
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
const expected = {
|
||||
'requests': setExpectedStats([1]),
|
||||
'500s': setExpectedStats([1]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should not crash on empty results', done => {
|
||||
async.series([
|
||||
next => {
|
||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
const expected = {
|
||||
'requests': setExpectedStats([]),
|
||||
'500s': setExpectedStats([]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.getAllStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
const expected = {
|
||||
'requests': setExpectedStats([]),
|
||||
'500s': setExpectedStats([]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should return a zero-filled array if no ids are passed to getAllStats',
|
||||
done => {
|
||||
statsModel.getAllStats(fakeLogger, [], (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests, setExpectedStats([]));
|
||||
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should get accurately reported data for given id from getAllStats',
|
||||
done => {
|
||||
statsModel.reportNewRequest(id, 9);
|
||||
statsModel.reportNewRequest(id2, 2);
|
||||
statsModel.reportNewRequest(id3, 3);
|
||||
statsModel.report500(id);
|
||||
|
||||
async.series([
|
||||
next => {
|
||||
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.equal(res.requests[0], 9);
|
||||
assert.equal(res['500s'][0], 1);
|
||||
next();
|
||||
});
|
||||
},
|
||||
next => {
|
||||
statsModel.getAllStats(fakeLogger, [id, id2, id3],
|
||||
(err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.equal(res.requests[0], 14);
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([14]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should normalize to the nearest hour using normalizeTimestampByHour',
|
||||
() => {
|
||||
const date = new Date('2018-09-13T23:30:59.195Z');
|
||||
const newDate = new Date(statsModel.normalizeTimestampByHour(date));
|
||||
|
||||
assert.strictEqual(date.getHours(), newDate.getHours());
|
||||
assert.strictEqual(newDate.getMinutes(), 0);
|
||||
assert.strictEqual(newDate.getSeconds(), 0);
|
||||
assert.strictEqual(newDate.getMilliseconds(), 0);
|
||||
});
|
||||
|
||||
it('should get previous hour using _getDatePreviousHour', () => {
|
||||
const date = new Date('2018-09-13T23:30:59.195Z');
|
||||
const newDate = statsModel._getDatePreviousHour(new Date(date));
|
||||
|
||||
const millisecondsInOneHour = 3600000;
|
||||
assert.strictEqual(date - newDate, millisecondsInOneHour);
|
||||
});
|
||||
|
||||
it('should get an array of hourly timestamps using getSortedSetHours',
|
||||
() => {
|
||||
const epoch = 1536882476501;
|
||||
const millisecondsInOneHour = 3600000;
|
||||
|
||||
const expected = [];
|
||||
let dateInMilliseconds = statsModel.normalizeTimestampByHour(
|
||||
new Date(epoch));
|
||||
|
||||
for (let i = 0; i < 24; i++) {
|
||||
expected.push(dateInMilliseconds);
|
||||
dateInMilliseconds -= millisecondsInOneHour;
|
||||
}
|
||||
const res = statsModel.getSortedSetHours(epoch);
|
||||
|
||||
assert.deepStrictEqual(res, expected);
|
||||
});
|
||||
|
||||
it('should apply TTL on a new sorted set using addToSortedSet', done => {
|
||||
const key = 'a-test-key';
|
||||
const score = 100;
|
||||
const value = 'a-value';
|
||||
|
||||
const now = Date.now();
|
||||
const nearestHour = statsModel.normalizeTimestampByHour(new Date(now));
|
||||
|
||||
statsModel.addToSortedSet(key, score, value, (err, res) => {
|
||||
assert.ifError(err);
|
||||
// check both a "zadd" and "expire" occurred
|
||||
assert.equal(res, 1);
|
||||
redisClient.ttl(key, (err, res) => {
|
||||
assert.ifError(err);
|
||||
// assert this new set has a ttl applied
|
||||
assert(res > 0);
|
||||
|
||||
const adjustmentSecs = now - nearestHour;
|
||||
const msInADay = 24 * 60 * 60 * 1000;
|
||||
const msInAnHour = 60 * 60 * 1000;
|
||||
const upperLimitSecs =
|
||||
Math.ceil((msInADay - adjustmentSecs) / 1000);
|
||||
const lowerLimitSecs =
|
||||
Math.floor((msInADay - adjustmentSecs - msInAnHour) / 1000);
|
||||
|
||||
// assert new ttl is between 23 and 24 hours adjusted by time
|
||||
// elapsed since normalized hourly time
|
||||
assert(res >= lowerLimitSecs);
|
||||
assert(res <= upperLimitSecs);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue