Compare commits
560 Commits
improvemen
...
developmen
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | 19855115ae | |
Vitaliy Filippov | 329d8ef32c | |
Vitaliy Filippov | f0ded4ea4f | |
Vitaliy Filippov | 3eea263384 | |
Vitaliy Filippov | c26d4f7d70 | |
Vitaliy Filippov | 63137e7a7b | |
Vitaliy Filippov | fdb23b1cd2 | |
Vitaliy Filippov | 4120eac127 | |
Maha Benzekri | d9bbd6cf3e | |
Maha Benzekri | 65e89d286d | |
Maha Benzekri | dcbc5ca98f | |
Maha Benzekri | 817bb836ec | |
Maha Benzekri | e3e4b2aea7 | |
Francois Ferrand | 9cd72221e8 | |
Francois Ferrand | bdcd4685ad | |
Francois Ferrand | b2b6c47ba7 | |
Jonathan Gramain | da173d53b4 | |
Jonathan Gramain | 7eb2701f21 | |
Jonathan Gramain | 6ec3c8e10d | |
Jonathan Gramain | 7aaf277db2 | |
Francois Ferrand | 67421f8c76 | |
Francois Ferrand | bf2260b1ae | |
Francois Ferrand | 11e0e1b489 | |
Anurag Mittal | f13ec2cf4c | |
Anurag Mittal | e369c7e6d2 | |
Anurag Mittal | c5c1db4568 | |
Anurag Mittal | 58f4d3cb3a | |
Anurag Mittal | b049f39e2a | |
williamlardier | 30eaaf15eb | |
williamlardier | 9d16fb0a34 | |
williamlardier | cdc612f379 | |
williamlardier | 61dd65b2c4 | |
bert-e | 2c0696322e | |
Maha Benzekri | c464a70b90 | |
Maha Benzekri | af07bb3df4 | |
Maha Benzekri | 1858654f34 | |
Maha Benzekri | 0475c8520a | |
Maha Benzekri | 31a4de5372 | |
Maha Benzekri | 0c53d13439 | |
Maha Benzekri | cad8b14df1 | |
Nicolas Humbert | fe29bacc79 | |
Nicolas Humbert | a86cff4631 | |
Kerkesni | f13a5d79ea | |
Maha Benzekri | ca8f570f15 | |
Maha Benzekri | a4bca10faf | |
Jonathan Gramain | c2ab4a2052 | |
Jonathan Gramain | fd0aa314eb | |
Jonathan Gramain | a643a3e6cc | |
Jonathan Gramain | e9d815cc9d | |
Jonathan Gramain | c86d24fc8f | |
Jonathan Gramain | 3b6d3838f5 | |
Jonathan Gramain | fcdfa889be | |
Mickael Bourgois | 5b8fcf0313 | |
Mickael Bourgois | bdfde26fe4 | |
Mickael Bourgois | e53613783a | |
Mickael Bourgois | 69dbbb143a | |
Mickael Bourgois | 403c4e5040 | |
Nicolas Humbert | a1dc2bd84d | |
Nicolas Humbert | 01409d690c | |
Nicolas Humbert | 9ee40f343b | |
bert-e | 77ed018b4f | |
bert-e | f77700236f | |
Nicolas Humbert | 43ff16b28a | |
bert-e | 05c628728d | |
Nicolas Humbert | 2a807dc4ef | |
Nicolas Humbert | 1f8b0a4032 | |
bert-e | 0dd7fe9875 | |
Mickael Bourgois | f7a6af8d9a | |
Mickael Bourgois | e6d0eff1a8 | |
Mickael Bourgois | 9d558351e7 | |
Mickael Bourgois | 68150da72e | |
Mickael Bourgois | 2b2c4bc50e | |
Mickael Bourgois | 3068086a97 | |
Mickael Bourgois | 0af7eb5530 | |
bert-e | 7e372b7bd5 | |
bert-e | a121810552 | |
bert-e | 9bf1bcc483 | |
Nicolas Humbert | 06402c6c94 | |
Nicolas Humbert | a6f3c82827 | |
Nicolas Humbert | f1891851b3 | |
bert-e | a1eed4fefb | |
Nicolas Humbert | 68204448a1 | |
Nicolas Humbert | 40e271f7e2 | |
bert-e | d8f7f18f5a | |
bert-e | 5f4d7afefb | |
bert-e | 2482fdfafc | |
bert-e | e151b3fff1 | |
Nicolas Humbert | b8bbdbbd81 | |
Nicolas Humbert | 46258bca74 | |
williamlardier | b6bc11881a | |
williamlardier | 648257612b | |
williamlardier | 7423fac674 | |
williamlardier | 9647043a02 | |
williamlardier | f9e1f91791 | |
williamlardier | 9c5bc2bfe0 | |
Jonathan Gramain | 1a0a981271 | |
bert-e | a45b2eb6a4 | |
bert-e | b00378d46d | |
Mickael Bourgois | 2c3bfb16ef | |
Jonathan Gramain | c72d8be223 | |
Jonathan Gramain | f63cb3c762 | |
bert-e | 15fd621c5c | |
bert-e | effbf63dd4 | |
bert-e | 285fe2f63b | |
bert-e | 1d8ebe6a9c | |
bert-e | 00555597e0 | |
bert-e | bddc2ccd01 | |
Jonathan Gramain | 7908654b51 | |
Jonathan Gramain | 0d7cf8d40a | |
Jonathan Gramain | c4c75e976c | |
Jonathan Gramain | 1266a14253 | |
williamlardier | 851c72bd0f | |
bert-e | 722b6ae699 | |
bert-e | 29925a15ad | |
williamlardier | 6b64f50450 | |
Jonathan Gramain | 8dc3ba7ca6 | |
bert-e | 3c2283b062 | |
Jonathan Gramain | a6a76acede | |
Jonathan Gramain | 6a116734a9 | |
Jonathan Gramain | 9325ea4996 | |
Jonathan Gramain | 33ba89f0cf | |
Jonathan Gramain | c67331d350 | |
Jonathan Gramain | 6d6f1860ef | |
Nicolas Humbert | cbe6a5e2d6 | |
Mickael Bourgois | be1557d972 | |
Mickael Bourgois | a03463061c | |
Mickael Bourgois | 8ad0ea73a7 | |
Mickael Bourgois | a94040d13b | |
Mickael Bourgois | f265ed6122 | |
Mickael Bourgois | 7301c706fd | |
Mickael Bourgois | bfc8dee559 | |
Frédéric Meinnel | 5a5ef7c572 | |
Frédéric Meinnel | 918c2c5473 | |
Frédéric Meinnel | 29f39ab480 | |
Frédéric Meinnel | b7ac7f4616 | |
Frédéric Meinnel | f8ce90f9c3 | |
Frédéric Meinnel | 5734d11cf1 | |
Frédéric Meinnel | 4da59769d2 | |
Frédéric Meinnel | 60573991ee | |
Jonathan Gramain | 6f58f9dd68 | |
Jonathan Gramain | 3b9c93be68 | |
Jonathan Gramain | 081af3e795 | |
bert-e | 042f541a45 | |
bert-e | 63bf2cb5b1 | |
bert-e | 39f42d9cb4 | |
Mickael Bourgois | 02f126f040 | |
bert-e | 1477a70e47 | |
Mickael Bourgois | 7233ec2635 | |
Mickael Bourgois | c4b44016bc | |
Mickael Bourgois | a78a84faa7 | |
Mickael Bourgois | c3ff6526a1 | |
Frédéric Meinnel | 59d47a3e21 | |
Frédéric Meinnel | 6b61347c29 | |
Mickael Bourgois | 4bf29524eb | |
Mickael Bourgois | 9aa001c4d1 | |
Frédéric Meinnel | aea4663ff2 | |
Frédéric Meinnel | 5012e9209c | |
Frédéric Meinnel | 1568ad59c6 | |
bert-e | c2f6b45116 | |
bert-e | a0322b131c | |
Mickael Bourgois | b5487e3c94 | |
bert-e | 993b9e6093 | |
bert-e | ddd6c87831 | |
Mickael Bourgois | f2974cbd07 | |
bert-e | 7440794d93 | |
Mickael Bourgois | 1efab676bc | |
Mickael Bourgois | a167e1d5fa | |
Mickael Bourgois | c7e153917a | |
bert-e | 087369b37d | |
bert-e | 2d2030dfe4 | |
bert-e | 45cc4aa79e | |
Will Toozs | da80e12dab | |
Will Toozs | a7cf94d0fe | |
Jonathan Gramain | 2a82095d03 | |
Jonathan Gramain | 44b3d25459 | |
Jonathan Gramain | f1d6e30fb6 | |
Jonathan Gramain | 9186643caa | |
Jonathan Gramain | 485a76ceb9 | |
Jonathan Gramain | 00109a2c44 | |
Jonathan Gramain | aed1247825 | |
Jonathan Gramain | 0507c04ce9 | |
Will Toozs | 62736abba4 | |
Will Toozs | 97118f09c4 | |
Will Toozs | 5a84a8c0ad | |
bert-e | 37234efd14 | |
Jonathan Gramain | 2799381ef2 | |
Jonathan Gramain | a3f13e5387 | |
Jonathan Gramain | f4e83086d6 | |
Jonathan Gramain | d08a267965 | |
Jonathan Gramain | 063a2fb8fb | |
Jonathan Gramain | 1bc3360daf | |
Jonathan Gramain | 206f14bdf5 | |
Maha Benzekri | 74ff1691a0 | |
Maha Benzekri | 5ffae72693 | |
Maha Benzekri | 477a574500 | |
bert-e | 2a4ea38301 | |
bert-e | df4c22154e | |
Maha Benzekri | 3642ac03b2 | |
Francois Ferrand | d800179f86 | |
Francois Ferrand | c1c45a4af9 | |
Francois Ferrand | da536ed037 | |
Nicolas Humbert | 06901104e8 | |
Nicolas Humbert | a99a6d9d97 | |
Nicolas Humbert | 06244059a8 | |
Nicolas Humbert | 079f631711 | |
Benoit A. | 863f45d256 | |
KillianG | 4b642cf8b4 | |
KillianG | 2537f8aa9a | |
Maha Benzekri | 7866a1d06f | |
Maha Benzekri | 29ef2ef265 | |
Maha Benzekri | 1509f1bdfe | |
Maha Benzekri | 13d349d211 | |
Maha Benzekri | 34a32c967d | |
Maha Benzekri | 90ab985271 | |
Maha Benzekri | fbf5562a11 | |
bert-e | d79ed1b9c8 | |
bert-e | c34ad0dc31 | |
Maha Benzekri | df5ff0f400 | |
Maha Benzekri | 777783171a | |
Will Toozs | 39988e52e2 | |
Will Toozs | 79c82a4c3d | |
williamlardier | 17b5bbc233 | |
williamlardier | 4aa8b5cc6e | |
williamlardier | 5deed6c2e1 | |
Nicolas Humbert | af34571771 | |
Nicolas Humbert | 79b83a9067 | |
Nicolas Humbert | 5fd675a316 | |
Nicolas Humbert | d84cc974d3 | |
Maha Benzekri | dcf0f902ff | |
Maha Benzekri | 0177fbe98f | |
Maha Benzekri | f49cea3914 | |
Maha Benzekri | 73c6f41fa3 | |
bert-e | 5b66f8d089 | |
bert-e | b61d178b18 | |
Maha Benzekri | 9ea39c6ed9 | |
Florent Monjalet | e51b06cfea | |
Florent Monjalet | f2bc701f8c | |
Nicolas Humbert | 4d6b03ba47 | |
Nicolas Humbert | f03f049683 | |
Nicolas Humbert | d7b51de024 | |
Nicolas Humbert | cf51adf1c7 | |
Nicolas Humbert | 8a7c1be2d1 | |
Nicolas Humbert | c049df0a97 | |
Nicolas Humbert | 2b2667e29a | |
Nicolas Humbert | 8eb4a29c36 | |
bert-e | 862317703e | |
Nicolas Humbert | e69a97f240 | |
Nicolas Humbert | 81e838000f | |
bert-e | 547ce816e0 | |
Nicolas Humbert | 8256d6debf | |
bert-e | 15d5e93a2d | |
Nicolas Humbert | 69c1698eb7 | |
bert-e | d11bcb56e9 | |
Nicolas Humbert | c2cd90925f | |
bert-e | 0ed35c3d86 | |
bert-e | b1723594eb | |
Nicolas Humbert | c0218821ff | |
Nicolas Humbert | 49e32758fb | |
Nicolas Humbert | e13d0f5ed8 | |
Nicolas Humbert | 0d5907956f | |
Nicolas Humbert | f0c5d60ce9 | |
Nicolas Humbert | 8c2f4cf357 | |
Nicolas Humbert | f3f1da9bb3 | |
Nicolas Humbert | 036b75842e | |
Nicolas Humbert | 7ac5774635 | |
Nicolas Humbert | f3b928fce0 | |
Nicolas Humbert | 7173a357d9 | |
Nicolas Humbert | 7c4f461196 | |
Nicolas Humbert | 0a4d6f862f | |
bert-e | 8716fee67d | |
bert-e | 2938bb0c88 | |
williamlardier | 05c93446ab | |
williamlardier | 8d758327dd | |
williamlardier | be63c09624 | |
Nicolas Humbert | 4615875462 | |
Rahul Padigela | bdb59a0e63 | |
bert-e | a89d1d8d75 | |
Rahul Padigela | 89e5f7dffe | |
williamlardier | 57e84980c8 | |
williamlardier | 51bfd41bea | |
Nicolas Humbert | 96cbaeb821 | |
Nicolas Humbert | cb01346d07 | |
Nicolas Humbert | 3f24336b83 | |
Nicolas Humbert | 1e66518a79 | |
bert-e | 15b68fa9fa | |
Nicolas Humbert | 51703a65f5 | |
bert-e | 09aaa2d5ee | |
Nicolas Humbert | ad39d90b6f | |
Jonathan Gramain | 20e9fe4adb | |
bert-e | e9c67f7f67 | |
Jonathan Gramain | af3fd17ec2 | |
bert-e | 536d474f57 | |
bert-e | 55e68cfa17 | |
bert-e | 67c98fd81b | |
williamlardier | 5cd70d7cf1 | |
KillianG | 25be9014c9 | |
KillianG | ed42f24580 | |
KillianG | ce076cb3df | |
KillianG | 4bc3de52ff | |
bert-e | beb5f69be3 | |
bert-e | 5f3540a0d5 | |
bert-e | 654d628d39 | |
gaspardmoindrot | e8a409e337 | |
Alexander Chan | 4093bf2b04 | |
Alexander Chan | d0bb6d5b0c | |
bert-e | 3f7229eebe | |
bert-e | 7eb9d52da5 | |
Nicolas Humbert | e216c9dd20 | |
williamlardier | 0c1afe535b | |
williamlardier | 73335ae6ec | |
Alexander Chan | 99c514e8f2 | |
Alexander Chan | cfd9fdcfc4 | |
Alexander Chan | d809dac5e3 | |
williamlardier | 53dac8d233 | |
williamlardier | 6d5ef07eee | |
williamlardier | 272166e406 | |
williamlardier | 3af05e672b | |
williamlardier | 8b0c90cb2f | |
Alexander Chan | dfc9b761e2 | |
Alexander Chan | 04f1eb7f04 | |
bert-e | c204b90847 | |
bert-e | 78d6e7fd72 | |
Alexander Chan | 7768fa8d35 | |
KillianG | 4d9a9adc48 | |
KillianG | c4804e52ee | |
KillianG | 671cf3a679 | |
Jonathan Gramain | 9a5e27f97b | |
Jonathan Gramain | d744a709d2 | |
Jonathan Gramain | a9d003c6f8 | |
Jonathan Gramain | 99e04bd6fa | |
Jonathan Gramain | d3bdddeba3 | |
bert-e | 3252f7de03 | |
Jonathan Gramain | c4cc5a2c3d | |
Jonathan Gramain | fedd0190cc | |
Jonathan Gramain | 56fd4ad734 | |
Jonathan Gramain | ebe6b65fcf | |
Nicolas Humbert | 7994bf7b96 | |
Nicolas Humbert | 4be0a06c4a | |
bert-e | da7dbdc51f | |
Will Toozs | 2103ef1237 | |
Will Toozs | dbc1c54246 | |
bert-e | 6c22f8404d | |
KillianG | 00e03f0592 | |
KillianG | d453758b7d | |
KillianG | a964dc99c3 | |
Jonathan Gramain | 3a4da1d7c0 | |
williamlardier | 5074e6c0a4 | |
williamlardier | bd05dd6918 | |
williamlardier | fbda12ce3c | |
Nicolas Humbert | b02934bb39 | |
Nicolas Humbert | c9a444969b | |
Nicolas Humbert | 5d018860ec | |
bert-e | 5838e02096 | |
Nicolas Humbert | ecd600ac4b | |
Naren | ab0324da05 | |
Naren | 2b353b33af | |
Naren | 5377b20ceb | |
Naren | 21b329b301 | |
Naren | bd76402586 | |
bert-e | fd57f47be1 | |
bert-e | 94edf8be70 | |
Naren | 1d104345fd | |
Jonathan Gramain | 58e47e5015 | |
Jonathan Gramain | 4d782ecec6 | |
Jonathan Gramain | 655a10ce52 | |
Jonathan Gramain | 0c7f0e607d | |
Jonathan Gramain | caa5d53e9b | |
Jonathan Gramain | 21da975187 | |
bert-e | e0df67a115 | |
Naren | 7e18ae77e0 | |
Naren | 4750118f85 | |
Naren | c273c8b823 | |
Jonathan Gramain | d3b50fafa8 | |
Naren | 47e68a9b60 | |
Naren | bd0a199ffa | |
Naren | 4b1f69bcbb | |
Naren | e3a6814e3f | |
Alexander Chan | bf4072151f | |
Alexander Chan | f33cd69e45 | |
Alexander Chan | acd13ff31b | |
Alexander Chan | bb3e5d078f | |
Jonathan Gramain | 22fa04b7e7 | |
Jonathan Gramain | 10a94a0a96 | |
bert-e | 4d71a834d5 | |
Alexander Chan | 054f61d6c1 | |
Alexander Chan | fa26a487f5 | |
Alexander Chan | c1dd2e4946 | |
Alexander Chan | a714103b82 | |
Jonathan Gramain | 66740f5aba | |
Jonathan Gramain | a3a83dd89c | |
williamlardier | 8db8109391 | |
Jonathan Gramain | d90af29019 | |
Jonathan Gramain | 9d8d98fcc9 | |
Jonathan Gramain | 01830d19a0 | |
Jonathan Gramain | 49cc018fa4 | |
Jonathan Gramain | dd87c869ca | |
Jonathan Gramain | df44cffb96 | |
Jonathan Gramain | 164053d1e8 | |
Jonathan Gramain | af741c50fb | |
williamlardier | 9c46703b89 | |
williamlardier | 47672d60ce | |
Jonathan Gramain | 6d41d103e8 | |
Jonathan Gramain | 34ccca9b07 | |
Jonathan Gramain | 6e5d8d14af | |
Jonathan Gramain | 890ac08dcd | |
Jonathan Gramain | 4cda9f6a6b | |
Jonathan Gramain | fbb62ef17c | |
Jonathan Gramain | 4949b7cc35 | |
Jonathan Gramain | 2b6fee4e84 | |
Jonathan Gramain | 8077186c3a | |
Jonathan Gramain | 1c07618b18 | |
Jonathan Gramain | 4d7eaee0cc | |
williamlardier | c460338163 | |
williamlardier | f17d52b602 | |
williamlardier | a6b234b7a8 | |
williamlardier | ff353bb4d6 | |
williamlardier | 0f9c9c2f18 | |
williamlardier | f6b2cf2c1a | |
Kerkesni | ecafbae36a | |
Kerkesni | d1cd7e8dba | |
Francois Ferrand | 3da6719200 | |
Francois Ferrand | c0dd54ef51 | |
Francois Ferrand | 7910792390 | |
Francois Ferrand | a4f4c51290 | |
Francois Ferrand | 66c4bc52b5 | |
Francois Ferrand | 81cd6652d6 | |
Francois Ferrand | 2a07f67244 | |
Francois Ferrand | 1a634015ee | |
williamlardier | 7a88a54918 | |
williamlardier | b25e620750 | |
williamlardier | 38ef89cc83 | |
williamlardier | 1a6c828bfc | |
williamlardier | 3d769c6960 | |
williamlardier | 8a27920a85 | |
williamlardier | 7642a22176 | |
Jonathan Gramain | 7b64896234 | |
Jonathan Gramain | 4f0a846814 | |
bert-e | 8f63687ef3 | |
Kerkesni | 26f45fa81a | |
Kerkesni | 76b59057f7 | |
Kerkesni | ae0da3d605 | |
bert-e | 7c1bd453ee | |
bert-e | 162d9ec46b | |
Kerkesni | ccd6462015 | |
Kerkesni | 665c77570c | |
Kerkesni | 27307b397c | |
Kerkesni | 414eada32b | |
Kerkesni | fdf0c6fe99 | |
Kerkesni | 8cc0be7da2 | |
bert-e | 65231633a7 | |
Kerkesni | 9a975723c1 | |
Kerkesni | ef024ddef3 | |
Kerkesni | b61138a348 | |
Kerkesni | d852eef08e | |
Kerkesni | fd63b857f3 | |
Alexander Chan | 92c567414a | |
Alexander Chan | ec55e39175 | |
Jonathan Gramain | c343820cae | |
Jonathan Gramain | 0f9da6a44e | |
Jonathan Gramain | 53a42f7411 | |
Jonathan Gramain | 9c2bed8034 | |
williamlardier | 8307a1513e | |
williamlardier | 706c2425fe | |
williamlardier | 8618d77de9 | |
williamlardier | 9d614a4ab3 | |
williamlardier | 7763685cb0 | |
Artem Bakalov | 8abe746222 | |
Artem Bakalov | 4c6712741b | |
bert-e | e74cca6795 | |
Artem Bakalov | 87b060f2ae | |
bert-e | 1427abecb7 | |
bert-e | 9dc357ab8d | |
bert-e | 4771ce3067 | |
Artem Bakalov | f62c3d22ed | |
williamlardier | 4e8a907d99 | |
williamlardier | a237e38c51 | |
williamlardier | 4388cb7790 | |
williamlardier | 095a2012cb | |
Killian Gardahaut | 6f42b3e64c | |
Killian Gardahaut | 264e0c1aad | |
Jonathan Gramain | 237872a5a3 | |
Jonathan Gramain | 0130355e1a | |
bert-e | 390fd97edf | |
Nicolas Humbert | 1c9e4eb93d | |
bert-e | af50ef47d7 | |
bert-e | a4f163f466 | |
Nicolas Humbert | 4d0cc9bc12 | |
bert-e | 657f969d05 | |
Jonathan Gramain | 4f2b1ca960 | |
bert-e | b43cf22b2c | |
Killian Gardahaut | 46c44ccaa6 | |
Killian Gardahaut | f45f65596b | |
bert-e | 90c63168c1 | |
bert-e | 10402ae78d | |
Jonathan Gramain | 5cd1df8601 | |
Jonathan Gramain | ee38856f29 | |
Jonathan Gramain | fe5f868f43 | |
Jonathan Gramain | dc229bb8aa | |
Killian Gardahaut | c0ee81eb7a | |
Killian Gardahaut | a6a48e812f | |
bert-e | 604a0170f1 | |
bert-e | 5a8372437b | |
Killian Gardahaut | 9d8f4793c9 | |
Killian Gardahaut | 69d33a3341 | |
Killian Gardahaut | c4ead93bd9 | |
Jonathan Gramain | 981c9c1a23 | |
Jonathan Gramain | 71de409ee9 | |
KillianG | 806f988334 | |
KillianG | 976a05c3e5 | |
KillianG | 46c24c5cc3 | |
Killian Gardahaut | c5004cb521 | |
KillianG | bc9cfb0b6d | |
KillianG | 4b6e342ff8 | |
Killian Gardahaut | d48d4d0c18 | |
Killian Gardahaut | 5a32c8eca0 | |
Kerkesni | 480f5a4427 | |
bert-e | 852ae9bd0f | |
Kerkesni | 6c132bca90 | |
Taylor McKinnon | 3d77540c47 | |
Taylor McKinnon | 3882ecf1a0 | |
Taylor McKinnon | 4f0506cf31 | |
Taylor McKinnon | acf38cc010 | |
Nicolas Humbert | d92a91f076 | |
Nicolas Humbert | 28779db602 | |
Alexander Chan | 8db16c5532 | |
Jordi Bertran de Balanda | 33439ec215 | |
Jordi Bertran de Balanda | 785b824b69 | |
bert-e | 9873c0f112 | |
Jordi Bertran de Balanda | 63212e2db3 | |
Nicolas Humbert | 725a492c2c | |
Nicolas Humbert | e446e3e132 | |
bert-e | 25c6b34a1e | |
Jordi Bertran de Balanda | 721d7ede93 | |
Jordi Bertran de Balanda | 3179d1c620 | |
Nicolas Humbert | fbbba32d69 | |
Jordi Bertran de Balanda | 56c1ba5c21 | |
Will Toozs | 73431094a3 | |
Will Toozs | aed1d8419b | |
Will Toozs | c3cb0aa514 | |
bert-e | 5919d20fa4 | |
Nicolas Humbert | 56665069c1 | |
Nicolas Humbert | 61fe54bd73 | |
Francois Ferrand | e227d9d5ca | |
Francois Ferrand | a206b5f95e | |
Francois Ferrand | 9b8f9f8afd | |
Francois Ferrand | cdcc44d272 | |
Francois Ferrand | 066be20a9d | |
Xin LI | 5acef6895f | |
Xin LI | 6e3386f693 | |
Xin LI | 2c630848ee | |
williamlardier | f7d360fe0b | |
williamlardier | 0a61b43252 | |
williamlardier | c014e630be | |
williamlardier | a747d5feda | |
KillianG | 765857071a | |
KillianG | 91b39da7e5 | |
williamlardier | 2cc6ebe9b4 | |
Xin LI | 5634e1bb1f | |
williamlardier | 7887d22d0d | |
williamlardier | 2f142aea7f |
|
@ -1 +1,6 @@
|
|||
{ "extends": "scality" }
|
||||
{
|
||||
"extends": "scality",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2020
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
name: codeQL
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [development/*, stabilization/*, hotfix/*]
|
||||
pull_request:
|
||||
branches: [development/*, stabilization/*, hotfix/*]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Static analysis with CodeQL
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: javascript, typescript
|
||||
|
||||
- name: Build and analyze
|
||||
uses: github/codeql-action/analyze@v3
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
name: dependency review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [development/*, stabilization/*, hotfix/*]
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v4
|
|
@ -25,18 +25,18 @@ jobs:
|
|||
- 6379:6379
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'yarn'
|
||||
- name: install dependencies
|
||||
run: yarn install --frozen-lockfile --prefer-offline
|
||||
run: yarn install --frozen-lockfile --prefer-offline --network-concurrency 1
|
||||
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
|
||||
- name: lint yaml
|
||||
run: yarn --silent lint_yml
|
||||
- name: lint javascript
|
||||
run: yarn --silent lint -- --max-warnings 0
|
||||
run: yarn --silent lint --max-warnings 0
|
||||
- name: lint markdown
|
||||
run: yarn --silent lint_md
|
||||
- name: add hostname
|
||||
|
@ -46,7 +46,9 @@ jobs:
|
|||
run: yarn --silent coverage
|
||||
- name: run functional tests
|
||||
run: yarn ft_test
|
||||
- uses: codecov/codecov-action@v2
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: run executables tests
|
||||
run: yarn install && yarn test
|
||||
working-directory: 'lib/executables/pensieveCreds/'
|
||||
|
@ -57,9 +59,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install NodeJS
|
||||
uses: actions/setup-node@v2
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: yarn
|
||||
|
@ -70,7 +72,7 @@ jobs:
|
|||
run: yarn build
|
||||
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
|
||||
- name: Upload artifacts
|
||||
uses: scality/action-artifacts@v2
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"$schema": "https://swc.rs/schema.json",
|
||||
"jsc": {
|
||||
"parser": {
|
||||
"syntax": "typescript"
|
||||
},
|
||||
"target": "es2017"
|
||||
},
|
||||
"module": {
|
||||
"type": "commonjs"
|
||||
}
|
||||
}
|
|
@ -178,3 +178,83 @@ this._serverSideEncryption.configuredMasterKeyId = configuredMasterKeyId || unde
|
|||
### Usage
|
||||
|
||||
Used to store the users configured KMS key id
|
||||
|
||||
## Model version 15
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._tags = tags || null;
|
||||
```
|
||||
|
||||
The Tag Set of a bucket is an array of objects with Key and Value:
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
Key: 'something',
|
||||
Value: 'some_data'
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Model version 16
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._capabilities = capabilities || undefined;
|
||||
```
|
||||
|
||||
For capacity-enabled buckets, contains the following data:
|
||||
|
||||
```javascript
|
||||
{
|
||||
_capabilities: {
|
||||
VeeamSOSApi?: {
|
||||
SystemInfo?: {
|
||||
ProtocolVersion: String,
|
||||
ModelName: String,
|
||||
ProtocolCapabilities: {
|
||||
CapacityInfo: Boolean,
|
||||
UploadSessions: Boolean,
|
||||
IAMSTS: Boolean,
|
||||
},
|
||||
APIEndpoints: {
|
||||
IAMEndpoint: String,
|
||||
STSEndpoint: String,
|
||||
},
|
||||
SystemRecommendations?: {
|
||||
S3ConcurrentTaskLimit: Number,
|
||||
S3MultiObjectDelete: Number,
|
||||
StorageCurrentTasksLimit: Number,
|
||||
KbBlockSize: Number,
|
||||
}
|
||||
LastModified?: String,
|
||||
},
|
||||
CapacityInfo?: {
|
||||
Capacity: Number,
|
||||
Available: Number,
|
||||
Used: Number,
|
||||
LastModified?: String,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store bucket tagging
|
||||
|
||||
## Model version 17
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._quotaMax = quotaMax || 0;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store bucket quota
|
|
@ -0,0 +1,27 @@
|
|||
# Delimiter
|
||||
|
||||
The Delimiter class handles raw listings from the database with an
|
||||
optional delimiter, and fills in a curated listing with "Contents" and
|
||||
"CommonPrefixes" as a result.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
- only lists keys belonging to the given **prefix** (if provided)
|
||||
|
||||
- groups listed keys that have a common prefix ending with a delimiter
|
||||
inside CommonPrefixes
|
||||
|
||||
- can take a **marker** or **continuationToken** to list from a specific key
|
||||
|
||||
- can take a **maxKeys** parameter to limit how many keys can be returned
|
||||
|
||||
## State Chart
|
||||
|
||||
- States with grey background are *Idle* states, which are waiting for
|
||||
a new listing key
|
||||
|
||||
- States with blue background are *Processing* states, which are
|
||||
actively processing a new listing key passed by the filter()
|
||||
function
|
||||
|
||||
![Delimiter State Chart](./pics/delimiterStateChart.svg)
|
|
@ -0,0 +1,45 @@
|
|||
# DelimiterMaster
|
||||
|
||||
The DelimiterMaster class handles raw listings from the database of a
|
||||
versioned or non-versioned bucket with an optional delimiter, and
|
||||
fills in a curated listing with "Contents" and "CommonPrefixes" as a
|
||||
result.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
- only lists latest versions of versioned buckets
|
||||
|
||||
- only lists keys belonging to the given **prefix** (if provided)
|
||||
|
||||
- does not list latest versions that are delete markers
|
||||
|
||||
- groups listed keys that have a common prefix ending with a delimiter
|
||||
inside CommonPrefixes
|
||||
|
||||
- can take a **marker** or **continuationToken** to list from a specific key
|
||||
|
||||
- can take a **maxKeys** parameter to limit how many keys can be returned
|
||||
|
||||
- reconciles internal PHD keys with the next version (those are
|
||||
created when a specific version that is the latest version is
|
||||
deleted)
|
||||
|
||||
- skips internal keys like replay keys
|
||||
|
||||
## State Chart
|
||||
|
||||
- States with grey background are *Idle* states, which are waiting for
|
||||
a new listing key
|
||||
|
||||
- States with blue background are *Processing* states, which are
|
||||
actively processing a new listing key passed by the filter()
|
||||
function
|
||||
|
||||
### Bucket Vformat=v0
|
||||
|
||||
![DelimiterMaster State Chart for v0 format](./pics/delimiterMasterV0StateChart.svg)
|
||||
|
||||
### Bucket Vformat=v1
|
||||
|
||||
For buckets in versioning key format **v1**, the algorithm used is the
|
||||
one from [Delimiter](delimiter.md).
|
|
@ -0,0 +1,33 @@
|
|||
# DelimiterVersions
|
||||
|
||||
The DelimiterVersions class handles raw listings from the database of a
|
||||
versioned or non-versioned bucket with an optional delimiter, and
|
||||
fills in a curated listing with "Versions" and "CommonPrefixes" as a
|
||||
result.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
- lists individual distinct versions of versioned buckets
|
||||
|
||||
- only lists keys belonging to the given **prefix** (if provided)
|
||||
|
||||
- groups listed keys that have a common prefix ending with a delimiter
|
||||
inside CommonPrefixes
|
||||
|
||||
- can take a **keyMarker** and optionally a **versionIdMarker** to
|
||||
list from a specific key or version
|
||||
|
||||
- can take a **maxKeys** parameter to limit how many keys can be returned
|
||||
|
||||
- skips internal keys like replay keys
|
||||
|
||||
## State Chart
|
||||
|
||||
- States with grey background are *Idle* states, which are waiting for
|
||||
a new listing key
|
||||
|
||||
- States with blue background are *Processing* states, which are
|
||||
actively processing a new listing key passed by the filter()
|
||||
function
|
||||
|
||||
![DelimiterVersions State Chart](./pics/delimiterVersionsStateChart.svg)
|
|
@ -0,0 +1,45 @@
|
|||
digraph {
|
||||
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
|
||||
edge [fontsize=14];
|
||||
rankdir=TB;
|
||||
|
||||
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
|
||||
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
|
||||
|
||||
node [fillcolor="lightgrey"];
|
||||
"NotSkippingPrefixNorVersions.Idle" [label="NotSkippingPrefixNorVersions",group="NotSkippingPrefixNorVersions",width=4];
|
||||
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
"SkippingVersions.Idle" [label="SkippingVersions",group="SkippingVersions"];
|
||||
"WaitVersionAfterPHD.Idle" [label="WaitVersionAfterPHD",group="WaitVersionAfterPHD"];
|
||||
|
||||
node [fillcolor="lightblue"];
|
||||
"NotSkippingPrefixNorVersions.Processing" [label="NotSkippingPrefixNorVersions",group="NotSkippingPrefixNorVersions",width=4];
|
||||
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
"SkippingVersions.Processing" [label="SkippingVersions",group="SkippingVersions"];
|
||||
"WaitVersionAfterPHD.Processing" [label="WaitVersionAfterPHD",group="WaitVersionAfterPHD"];
|
||||
|
||||
START -> "SkippingVersions.Idle" [label="[marker != undefined]"]
|
||||
START -> "NotSkippingPrefixNorVersions.Idle" [label="[marker == undefined]"]
|
||||
|
||||
"NotSkippingPrefixNorVersions.Idle" -> "NotSkippingPrefixNorVersions.Processing" [label="filter(key, value)"]
|
||||
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
|
||||
"SkippingVersions.Idle" -> "SkippingVersions.Processing" [label="filter(key, value)"]
|
||||
"WaitVersionAfterPHD.Idle" -> "WaitVersionAfterPHD.Processing" [label="filter(key, value)"]
|
||||
|
||||
|
||||
"NotSkippingPrefixNorVersions.Processing" -> "SkippingVersions.Idle" [label="[Version.isDeleteMarker(value)]\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingPrefixNorVersions.Processing" -> "WaitVersionAfterPHD.Idle" [label="[Version.isPHD(value)]\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingPrefixNorVersions.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(<ReplayPrefix>)]\n/ prefix <- <ReplayPrefix>\n-> FILTER_SKIP"]
|
||||
"NotSkippingPrefixNorVersions.Processing" -> END [label="[isListableKey(key, value) and\nKeys == maxKeys]\n-> FILTER_END"]
|
||||
"NotSkippingPrefixNorVersions.Processing" -> "SkippingPrefix.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nhasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingPrefixNorVersions.Processing" -> "SkippingVersions.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nnot hasDelimiter(key)]\n/ Contents.append(key, value)\n-> FILTER_ACCEPT"]
|
||||
|
||||
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
|
||||
"SkippingPrefix.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not key.startsWith(prefix)]"]
|
||||
|
||||
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="[isVersionKey(key)]\n-> FILTER_SKIP"]
|
||||
"SkippingVersions.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not isVersionKey(key)]"]
|
||||
|
||||
"WaitVersionAfterPHD.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[isVersionKey(key) and master(key) == PHDkey]\n/ key <- master(key)"]
|
||||
"WaitVersionAfterPHD.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not isVersionKey(key) or master(key) != PHDkey]"]
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.43.0 (0)
|
||||
-->
|
||||
<!-- Title: %3 Pages: 1 -->
|
||||
<svg width="2313pt" height="460pt"
|
||||
viewBox="0.00 0.00 2313.37 460.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 456)">
|
||||
<title>%3</title>
|
||||
<polygon fill="white" stroke="transparent" points="-4,4 -4,-456 2309.37,-456 2309.37,4 -4,4"/>
|
||||
<!-- START -->
|
||||
<g id="node1" class="node">
|
||||
<title>START</title>
|
||||
<ellipse fill="black" stroke="black" cx="35.37" cy="-445" rx="7" ry="7"/>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Idle -->
|
||||
<g id="node3" class="node">
|
||||
<title>NotSkippingPrefixNorVersions.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M925.37,-387C925.37,-387 661.37,-387 661.37,-387 655.37,-387 649.37,-381 649.37,-375 649.37,-375 649.37,-363 649.37,-363 649.37,-357 655.37,-351 661.37,-351 661.37,-351 925.37,-351 925.37,-351 931.37,-351 937.37,-357 937.37,-363 937.37,-363 937.37,-375 937.37,-375 937.37,-381 931.37,-387 925.37,-387"/>
|
||||
<text text-anchor="middle" x="793.37" y="-365.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefixNorVersions</text>
|
||||
</g>
|
||||
<!-- START->NotSkippingPrefixNorVersions.Idle -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>START->NotSkippingPrefixNorVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M42.39,-443.31C95.3,-438.15 434.98,-404.99 638.94,-385.08"/>
|
||||
<polygon fill="black" stroke="black" points="639.54,-388.53 649.15,-384.08 638.86,-381.57 639.54,-388.53"/>
|
||||
<text text-anchor="middle" x="497.87" y="-408.8" font-family="Times,serif" font-size="14.00">[marker == undefined]</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Idle -->
|
||||
<g id="node5" class="node">
|
||||
<title>SkippingVersions.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M242.37,-138C242.37,-138 50.37,-138 50.37,-138 44.37,-138 38.37,-132 38.37,-126 38.37,-126 38.37,-114 38.37,-114 38.37,-108 44.37,-102 50.37,-102 50.37,-102 242.37,-102 242.37,-102 248.37,-102 254.37,-108 254.37,-114 254.37,-114 254.37,-126 254.37,-126 254.37,-132 248.37,-138 242.37,-138"/>
|
||||
<text text-anchor="middle" x="146.37" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
|
||||
</g>
|
||||
<!-- START->SkippingVersions.Idle -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>START->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M33.04,-438.14C20.64,-405.9 -34.57,-248.17 33.37,-156 36.76,-151.4 40.74,-147.39 45.16,-143.89"/>
|
||||
<polygon fill="black" stroke="black" points="47.27,-146.68 53.53,-138.13 43.3,-140.92 47.27,-146.68"/>
|
||||
<text text-anchor="middle" x="85.87" y="-321.8" font-family="Times,serif" font-size="14.00">[marker != undefined]</text>
|
||||
</g>
|
||||
<!-- END -->
|
||||
<g id="node2" class="node">
|
||||
<title>END</title>
|
||||
<ellipse fill="black" stroke="black" cx="727.37" cy="-120" rx="7" ry="7"/>
|
||||
<ellipse fill="none" stroke="black" cx="727.37" cy="-120" rx="11" ry="11"/>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="node7" class="node">
|
||||
<title>NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M925.37,-300C925.37,-300 661.37,-300 661.37,-300 655.37,-300 649.37,-294 649.37,-288 649.37,-288 649.37,-276 649.37,-276 649.37,-270 655.37,-264 661.37,-264 661.37,-264 925.37,-264 925.37,-264 931.37,-264 937.37,-270 937.37,-276 937.37,-276 937.37,-288 937.37,-288 937.37,-294 931.37,-300 925.37,-300"/>
|
||||
<text text-anchor="middle" x="793.37" y="-278.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefixNorVersions</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Idle->NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Idle->NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M793.37,-350.8C793.37,-339.16 793.37,-323.55 793.37,-310.24"/>
|
||||
<polygon fill="black" stroke="black" points="796.87,-310.18 793.37,-300.18 789.87,-310.18 796.87,-310.18"/>
|
||||
<text text-anchor="middle" x="851.37" y="-321.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle -->
|
||||
<g id="node4" class="node">
|
||||
<title>SkippingPrefix.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M1209.37,-138C1209.37,-138 1017.37,-138 1017.37,-138 1011.37,-138 1005.37,-132 1005.37,-126 1005.37,-126 1005.37,-114 1005.37,-114 1005.37,-108 1011.37,-102 1017.37,-102 1017.37,-102 1209.37,-102 1209.37,-102 1215.37,-102 1221.37,-108 1221.37,-114 1221.37,-114 1221.37,-126 1221.37,-126 1221.37,-132 1215.37,-138 1209.37,-138"/>
|
||||
<text text-anchor="middle" x="1113.37" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing -->
|
||||
<g id="node8" class="node">
|
||||
<title>SkippingPrefix.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M1070.37,-36C1070.37,-36 878.37,-36 878.37,-36 872.37,-36 866.37,-30 866.37,-24 866.37,-24 866.37,-12 866.37,-12 866.37,-6 872.37,0 878.37,0 878.37,0 1070.37,0 1070.37,0 1076.37,0 1082.37,-6 1082.37,-12 1082.37,-12 1082.37,-24 1082.37,-24 1082.37,-30 1076.37,-36 1070.37,-36"/>
|
||||
<text text-anchor="middle" x="974.37" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle->SkippingPrefix.Processing -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>SkippingPrefix.Idle->SkippingPrefix.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1011.89,-101.96C994.96,-97.13 981.04,-91.17 975.37,-84 967.11,-73.56 966.25,-58.93 967.72,-46.2"/>
|
||||
<polygon fill="black" stroke="black" points="971.22,-46.52 969.4,-36.09 964.31,-45.38 971.22,-46.52"/>
|
||||
<text text-anchor="middle" x="1033.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing -->
|
||||
<g id="node9" class="node">
|
||||
<title>SkippingVersions.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M381.37,-36C381.37,-36 189.37,-36 189.37,-36 183.37,-36 177.37,-30 177.37,-24 177.37,-24 177.37,-12 177.37,-12 177.37,-6 183.37,0 189.37,0 189.37,0 381.37,0 381.37,0 387.37,0 393.37,-6 393.37,-12 393.37,-12 393.37,-24 393.37,-24 393.37,-30 387.37,-36 381.37,-36"/>
|
||||
<text text-anchor="middle" x="285.37" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Idle->SkippingVersions.Processing -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>SkippingVersions.Idle->SkippingVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M141.4,-101.91C138.35,-87.58 136.8,-67.37 147.37,-54 151.89,-48.28 161.64,-43.34 173.99,-39.12"/>
|
||||
<polygon fill="black" stroke="black" points="175.39,-42.36 183.89,-36.04 173.3,-35.67 175.39,-42.36"/>
|
||||
<text text-anchor="middle" x="205.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- WaitVersionAfterPHD.Idle -->
|
||||
<g id="node6" class="node">
|
||||
<title>WaitVersionAfterPHD.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M1534.37,-138C1534.37,-138 1342.37,-138 1342.37,-138 1336.37,-138 1330.37,-132 1330.37,-126 1330.37,-126 1330.37,-114 1330.37,-114 1330.37,-108 1336.37,-102 1342.37,-102 1342.37,-102 1534.37,-102 1534.37,-102 1540.37,-102 1546.37,-108 1546.37,-114 1546.37,-114 1546.37,-126 1546.37,-126 1546.37,-132 1540.37,-138 1534.37,-138"/>
|
||||
<text text-anchor="middle" x="1438.37" y="-116.2" font-family="Times,serif" font-size="16.00">WaitVersionAfterPHD</text>
|
||||
</g>
|
||||
<!-- WaitVersionAfterPHD.Processing -->
|
||||
<g id="node10" class="node">
|
||||
<title>WaitVersionAfterPHD.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M1534.37,-36C1534.37,-36 1342.37,-36 1342.37,-36 1336.37,-36 1330.37,-30 1330.37,-24 1330.37,-24 1330.37,-12 1330.37,-12 1330.37,-6 1336.37,0 1342.37,0 1342.37,0 1534.37,0 1534.37,0 1540.37,0 1546.37,-6 1546.37,-12 1546.37,-12 1546.37,-24 1546.37,-24 1546.37,-30 1540.37,-36 1534.37,-36"/>
|
||||
<text text-anchor="middle" x="1438.37" y="-14.2" font-family="Times,serif" font-size="16.00">WaitVersionAfterPHD</text>
|
||||
</g>
|
||||
<!-- WaitVersionAfterPHD.Idle->WaitVersionAfterPHD.Processing -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>WaitVersionAfterPHD.Idle->WaitVersionAfterPHD.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1438.37,-101.58C1438.37,-86.38 1438.37,-64.07 1438.37,-46.46"/>
|
||||
<polygon fill="black" stroke="black" points="1441.87,-46.22 1438.37,-36.22 1434.87,-46.22 1441.87,-46.22"/>
|
||||
<text text-anchor="middle" x="1496.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->END -->
|
||||
<g id="edge10" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->END</title>
|
||||
<path fill="none" stroke="black" d="M649.15,-273.62C611.7,-268.54 578.44,-260.07 566.37,-246 540.33,-215.64 540,-186.08 566.37,-156 586.46,-133.07 673.88,-148.86 702.37,-138 705.22,-136.91 708.06,-135.44 710.76,-133.82"/>
|
||||
<polygon fill="black" stroke="black" points="712.88,-136.61 719.13,-128.05 708.91,-130.84 712.88,-136.61"/>
|
||||
<text text-anchor="middle" x="672.87" y="-212.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="672.87" y="-197.3" font-family="Times,serif" font-size="14.00">Keys == maxKeys]</text>
|
||||
<text text-anchor="middle" x="672.87" y="-182.3" font-family="Times,serif" font-size="14.00">-> FILTER_END</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge9" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M937.6,-274.31C1018.89,-269.01 1106.69,-260.11 1119.37,-246 1143.16,-219.51 1134.03,-175.72 1124.38,-147.62"/>
|
||||
<polygon fill="black" stroke="black" points="1127.6,-146.22 1120.86,-138.04 1121.03,-148.64 1127.6,-146.22"/>
|
||||
<text text-anchor="middle" x="1254.37" y="-212.3" font-family="Times,serif" font-size="14.00">[key.startsWith(<ReplayPrefix>)]</text>
|
||||
<text text-anchor="middle" x="1254.37" y="-197.3" font-family="Times,serif" font-size="14.00">/ prefix <- <ReplayPrefix></text>
|
||||
<text text-anchor="middle" x="1254.37" y="-182.3" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge11" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M799.18,-263.65C800.96,-258.05 802.85,-251.79 804.37,-246 814.73,-206.45 793.03,-183.41 823.37,-156 851.23,-130.83 954.1,-142.59 991.37,-138 992.65,-137.84 993.94,-137.68 995.24,-137.52"/>
|
||||
<polygon fill="black" stroke="black" points="995.81,-140.98 1005.29,-136.25 994.93,-134.03 995.81,-140.98"/>
|
||||
<text text-anchor="middle" x="969.37" y="-234.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="969.37" y="-219.8" font-family="Times,serif" font-size="14.00">nKeys < maxKeys and</text>
|
||||
<text text-anchor="middle" x="969.37" y="-204.8" font-family="Times,serif" font-size="14.00">hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="969.37" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix <- prefixOf(key)</text>
|
||||
<text text-anchor="middle" x="969.37" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
|
||||
<text text-anchor="middle" x="969.37" y="-159.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->SkippingVersions.Idle -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M649.11,-279.23C439.56,-275.94 73.58,-267.19 53.37,-246 25.76,-217.06 30.6,-188.89 53.37,-156 56.56,-151.39 60.44,-147.39 64.78,-143.91"/>
|
||||
<polygon fill="black" stroke="black" points="66.8,-146.76 73.04,-138.2 62.83,-141 66.8,-146.76"/>
|
||||
<text text-anchor="middle" x="167.87" y="-204.8" font-family="Times,serif" font-size="14.00">[Version.isDeleteMarker(value)]</text>
|
||||
<text text-anchor="middle" x="167.87" y="-189.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->SkippingVersions.Idle -->
|
||||
<g id="edge12" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M649.33,-279.1C514.97,-275.99 331.4,-267.75 305.37,-246 273.69,-219.53 311.53,-185.22 282.37,-156 276.73,-150.36 270.32,-145.59 263.42,-141.56"/>
|
||||
<polygon fill="black" stroke="black" points="264.92,-138.39 254.44,-136.84 261.67,-144.59 264.92,-138.39"/>
|
||||
<text text-anchor="middle" x="411.87" y="-227.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="411.87" y="-212.3" font-family="Times,serif" font-size="14.00">nKeys < maxKeys and</text>
|
||||
<text text-anchor="middle" x="411.87" y="-197.3" font-family="Times,serif" font-size="14.00">not hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="411.87" y="-182.3" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
|
||||
<text text-anchor="middle" x="411.87" y="-167.3" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefixNorVersions.Processing->WaitVersionAfterPHD.Idle -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>NotSkippingPrefixNorVersions.Processing->WaitVersionAfterPHD.Idle</title>
|
||||
<path fill="none" stroke="black" d="M937.38,-280.87C1099.43,-279.42 1344.59,-272.74 1378.37,-246 1411.11,-220.08 1384.48,-192.16 1405.37,-156 1407.38,-152.52 1409.8,-149.11 1412.4,-145.87"/>
|
||||
<polygon fill="black" stroke="black" points="1415.16,-148.04 1419.13,-138.21 1409.9,-143.41 1415.16,-148.04"/>
|
||||
<text text-anchor="middle" x="1486.87" y="-204.8" font-family="Times,serif" font-size="14.00">[Version.isPHD(value)]</text>
|
||||
<text text-anchor="middle" x="1486.87" y="-189.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge13" class="edge">
|
||||
<title>SkippingPrefix.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M1064.61,-36.08C1074.44,-40.7 1083.66,-46.57 1091.37,-54 1101.65,-63.92 1107.13,-78.81 1110.04,-91.84"/>
|
||||
<polygon fill="black" stroke="black" points="1106.62,-92.56 1111.88,-101.76 1113.5,-91.29 1106.62,-92.56"/>
|
||||
<text text-anchor="middle" x="1190.37" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
|
||||
<text text-anchor="middle" x="1190.37" y="-57.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="edge14" class="edge">
|
||||
<title>SkippingPrefix.Processing->NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M899.82,-36.01C864.18,-48.2 824.54,-68.57 802.37,-102 771.84,-148.02 779.31,-216.26 786.77,-253.8"/>
|
||||
<polygon fill="black" stroke="black" points="783.43,-254.92 788.94,-263.97 790.28,-253.46 783.43,-254.92"/>
|
||||
<text text-anchor="middle" x="899.37" y="-116.3" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing->SkippingVersions.Idle -->
|
||||
<g id="edge15" class="edge">
|
||||
<title>SkippingVersions.Processing->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M283.88,-36.24C281.71,-50.87 276.4,-71.43 263.37,-84 258.07,-89.11 252.06,-93.48 245.62,-97.21"/>
|
||||
<polygon fill="black" stroke="black" points="243.85,-94.19 236.61,-101.92 247.09,-100.39 243.85,-94.19"/>
|
||||
<text text-anchor="middle" x="349.87" y="-72.8" font-family="Times,serif" font-size="14.00">[isVersionKey(key)]</text>
|
||||
<text text-anchor="middle" x="349.87" y="-57.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing->NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="edge16" class="edge">
|
||||
<title>SkippingVersions.Processing->NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M382.46,-36.08C396.72,-40.7 410.82,-46.57 423.37,-54 476.67,-85.57 487.28,-102.42 518.37,-156 539.39,-192.23 514.46,-218.85 546.37,-246 561.72,-259.06 598.56,-267.25 639.23,-272.39"/>
|
||||
<polygon fill="black" stroke="black" points="639.01,-275.89 649.36,-273.59 639.84,-268.93 639.01,-275.89"/>
|
||||
<text text-anchor="middle" x="590.37" y="-116.3" font-family="Times,serif" font-size="14.00">[not isVersionKey(key)]</text>
|
||||
</g>
|
||||
<!-- WaitVersionAfterPHD.Processing->NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="edge17" class="edge">
|
||||
<title>WaitVersionAfterPHD.Processing->NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1536.41,-36.13C1544.73,-40.79 1552.27,-46.65 1558.37,-54 1585.64,-86.89 1597.89,-215.12 1568.37,-246 1547.29,-268.05 1167.71,-276.42 947.74,-279.43"/>
|
||||
<polygon fill="black" stroke="black" points="947.67,-275.93 937.71,-279.57 947.76,-282.93 947.67,-275.93"/>
|
||||
<text text-anchor="middle" x="1758.37" y="-123.8" font-family="Times,serif" font-size="14.00">[isVersionKey(key) and master(key) == PHDkey]</text>
|
||||
<text text-anchor="middle" x="1758.37" y="-108.8" font-family="Times,serif" font-size="14.00">/ key <- master(key)</text>
|
||||
</g>
|
||||
<!-- WaitVersionAfterPHD.Processing->NotSkippingPrefixNorVersions.Processing -->
|
||||
<g id="edge18" class="edge">
|
||||
<title>WaitVersionAfterPHD.Processing->NotSkippingPrefixNorVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1546.51,-21.25C1677.94,-26.54 1888.29,-44.09 1937.37,-102 1947.71,-114.21 1946.85,-125.11 1937.37,-138 1841.62,-268.08 1749.48,-218.23 1590.37,-246 1471.26,-266.79 1143.92,-275.5 947.77,-278.94"/>
|
||||
<polygon fill="black" stroke="black" points="947.6,-275.44 937.66,-279.11 947.72,-282.44 947.6,-275.44"/>
|
||||
<text text-anchor="middle" x="2124.87" y="-116.3" font-family="Times,serif" font-size="14.00">[not isVersionKey(key) or master(key) != PHDkey]</text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 18 KiB |
|
@ -0,0 +1,35 @@
|
|||
digraph {
|
||||
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
|
||||
edge [fontsize=14];
|
||||
rankdir=TB;
|
||||
|
||||
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
|
||||
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
|
||||
|
||||
node [fillcolor="lightgrey"];
|
||||
"NotSkipping.Idle" [label="NotSkipping",group="NotSkipping"];
|
||||
"NeverSkipping.Idle" [label="NeverSkipping",group="NeverSkipping"];
|
||||
"NotSkippingPrefix.Idle" [label="NotSkippingPrefix",group="NotSkippingPrefix"];
|
||||
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
|
||||
node [fillcolor="lightblue"];
|
||||
"NeverSkipping.Processing" [label="NeverSkipping",group="NeverSkipping"];
|
||||
"NotSkippingPrefix.Processing" [label="NotSkippingPrefix",group="NotSkippingPrefix"];
|
||||
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
|
||||
START -> "NotSkipping.Idle"
|
||||
"NotSkipping.Idle" -> "NeverSkipping.Idle" [label="[delimiter == undefined]"]
|
||||
"NotSkipping.Idle" -> "NotSkippingPrefix.Idle" [label="[delimiter == '/']"]
|
||||
|
||||
"NeverSkipping.Idle" -> "NeverSkipping.Processing" [label="filter(key, value)"]
|
||||
"NotSkippingPrefix.Idle" -> "NotSkippingPrefix.Processing" [label="filter(key, value)"]
|
||||
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
|
||||
|
||||
"NeverSkipping.Processing" -> END [label="[nKeys == maxKeys]\n-> FILTER_END"]
|
||||
"NeverSkipping.Processing" -> "NeverSkipping.Idle" [label="[nKeys < maxKeys]\n/ Contents.append(key, value)\n -> FILTER_ACCEPT"]
|
||||
"NotSkippingPrefix.Processing" -> END [label="[nKeys == maxKeys]\n -> FILTER_END"]
|
||||
"NotSkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[nKeys < maxKeys and hasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingPrefix.Processing" -> "NotSkippingPrefix.Idle" [label="[nKeys < maxKeys and not hasDelimiter(key)]\n/ Contents.append(key, value)\n -> FILTER_ACCEPT"]
|
||||
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
|
||||
"SkippingPrefix.Processing" -> "NotSkippingPrefix.Processing" [label="[not key.startsWith(prefix)]"]
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.43.0 (0)
|
||||
-->
|
||||
<!-- Title: %3 Pages: 1 -->
|
||||
<svg width="975pt" height="533pt"
|
||||
viewBox="0.00 0.00 975.00 533.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 529)">
|
||||
<title>%3</title>
|
||||
<polygon fill="white" stroke="transparent" points="-4,4 -4,-529 971,-529 971,4 -4,4"/>
|
||||
<!-- START -->
|
||||
<g id="node1" class="node">
|
||||
<title>START</title>
|
||||
<ellipse fill="black" stroke="black" cx="283" cy="-518" rx="7" ry="7"/>
|
||||
</g>
|
||||
<!-- NotSkipping.Idle -->
|
||||
<g id="node3" class="node">
|
||||
<title>NotSkipping.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M379,-474C379,-474 187,-474 187,-474 181,-474 175,-468 175,-462 175,-462 175,-450 175,-450 175,-444 181,-438 187,-438 187,-438 379,-438 379,-438 385,-438 391,-444 391,-450 391,-450 391,-462 391,-462 391,-468 385,-474 379,-474"/>
|
||||
<text text-anchor="middle" x="283" y="-452.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
|
||||
</g>
|
||||
<!-- START->NotSkipping.Idle -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>START->NotSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M283,-510.58C283,-504.23 283,-494.07 283,-484.3"/>
|
||||
<polygon fill="black" stroke="black" points="286.5,-484.05 283,-474.05 279.5,-484.05 286.5,-484.05"/>
|
||||
</g>
|
||||
<!-- END -->
|
||||
<g id="node2" class="node">
|
||||
<title>END</title>
|
||||
<ellipse fill="black" stroke="black" cx="196" cy="-120" rx="7" ry="7"/>
|
||||
<ellipse fill="none" stroke="black" cx="196" cy="-120" rx="11" ry="11"/>
|
||||
</g>
|
||||
<!-- NeverSkipping.Idle -->
|
||||
<g id="node4" class="node">
|
||||
<title>NeverSkipping.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M262,-387C262,-387 70,-387 70,-387 64,-387 58,-381 58,-375 58,-375 58,-363 58,-363 58,-357 64,-351 70,-351 70,-351 262,-351 262,-351 268,-351 274,-357 274,-363 274,-363 274,-375 274,-375 274,-381 268,-387 262,-387"/>
|
||||
<text text-anchor="middle" x="166" y="-365.2" font-family="Times,serif" font-size="16.00">NeverSkipping</text>
|
||||
</g>
|
||||
<!-- NotSkipping.Idle->NeverSkipping.Idle -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>NotSkipping.Idle->NeverSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M216.5,-437.82C206.51,-433.18 196.91,-427.34 189,-420 182.25,-413.74 177.33,-405.11 173.81,-396.79"/>
|
||||
<polygon fill="black" stroke="black" points="177.05,-395.47 170.3,-387.31 170.49,-397.9 177.05,-395.47"/>
|
||||
<text text-anchor="middle" x="279.5" y="-408.8" font-family="Times,serif" font-size="14.00">[delimiter == undefined]</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Idle -->
|
||||
<g id="node5" class="node">
|
||||
<title>NotSkippingPrefix.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M496,-387C496,-387 304,-387 304,-387 298,-387 292,-381 292,-375 292,-375 292,-363 292,-363 292,-357 298,-351 304,-351 304,-351 496,-351 496,-351 502,-351 508,-357 508,-363 508,-363 508,-375 508,-375 508,-381 502,-387 496,-387"/>
|
||||
<text text-anchor="middle" x="400" y="-365.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefix</text>
|
||||
</g>
|
||||
<!-- NotSkipping.Idle->NotSkippingPrefix.Idle -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>NotSkipping.Idle->NotSkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M340.77,-437.93C351.2,-433.2 361.45,-427.29 370,-420 377.58,-413.53 383.76,-404.65 388.51,-396.16"/>
|
||||
<polygon fill="black" stroke="black" points="391.63,-397.74 393.08,-387.24 385.4,-394.54 391.63,-397.74"/>
|
||||
<text text-anchor="middle" x="442.5" y="-408.8" font-family="Times,serif" font-size="14.00">[delimiter == '/']</text>
|
||||
</g>
|
||||
<!-- NeverSkipping.Processing -->
|
||||
<g id="node7" class="node">
|
||||
<title>NeverSkipping.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M204,-270C204,-270 12,-270 12,-270 6,-270 0,-264 0,-258 0,-258 0,-246 0,-246 0,-240 6,-234 12,-234 12,-234 204,-234 204,-234 210,-234 216,-240 216,-246 216,-246 216,-258 216,-258 216,-264 210,-270 204,-270"/>
|
||||
<text text-anchor="middle" x="108" y="-248.2" font-family="Times,serif" font-size="16.00">NeverSkipping</text>
|
||||
</g>
|
||||
<!-- NeverSkipping.Idle->NeverSkipping.Processing -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>NeverSkipping.Idle->NeverSkipping.Processing</title>
|
||||
<path fill="none" stroke="black" d="M64.1,-350.93C47.33,-346.11 33.58,-340.17 28,-333 15.72,-317.21 17.05,-304.74 28,-288 30.93,-283.52 34.58,-279.6 38.69,-276.19"/>
|
||||
<polygon fill="black" stroke="black" points="40.97,-278.86 47.1,-270.22 36.92,-273.16 40.97,-278.86"/>
|
||||
<text text-anchor="middle" x="86" y="-306.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Processing -->
|
||||
<g id="node8" class="node">
|
||||
<title>NotSkippingPrefix.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M554,-270C554,-270 362,-270 362,-270 356,-270 350,-264 350,-258 350,-258 350,-246 350,-246 350,-240 356,-234 362,-234 362,-234 554,-234 554,-234 560,-234 566,-240 566,-246 566,-246 566,-258 566,-258 566,-264 560,-270 554,-270"/>
|
||||
<text text-anchor="middle" x="458" y="-248.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefix</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Idle->NotSkippingPrefix.Processing -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>NotSkippingPrefix.Idle->NotSkippingPrefix.Processing</title>
|
||||
<path fill="none" stroke="black" d="M395.69,-350.84C392.38,-333.75 390.03,-307.33 401,-288 403.42,-283.74 406.58,-279.94 410.19,-276.55"/>
|
||||
<polygon fill="black" stroke="black" points="412.5,-279.18 418.1,-270.18 408.11,-273.73 412.5,-279.18"/>
|
||||
<text text-anchor="middle" x="459" y="-306.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle -->
|
||||
<g id="node6" class="node">
|
||||
<title>SkippingPrefix.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M554,-138C554,-138 362,-138 362,-138 356,-138 350,-132 350,-126 350,-126 350,-114 350,-114 350,-108 356,-102 362,-102 362,-102 554,-102 554,-102 560,-102 566,-108 566,-114 566,-114 566,-126 566,-126 566,-132 560,-138 554,-138"/>
|
||||
<text text-anchor="middle" x="458" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing -->
|
||||
<g id="node9" class="node">
|
||||
<title>SkippingPrefix.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M691,-36C691,-36 499,-36 499,-36 493,-36 487,-30 487,-24 487,-24 487,-12 487,-12 487,-6 493,0 499,0 499,0 691,0 691,0 697,0 703,-6 703,-12 703,-12 703,-24 703,-24 703,-30 697,-36 691,-36"/>
|
||||
<text text-anchor="middle" x="595" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle->SkippingPrefix.Processing -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>SkippingPrefix.Idle->SkippingPrefix.Processing</title>
|
||||
<path fill="none" stroke="black" d="M452.35,-101.95C448.76,-87.65 446.54,-67.45 457,-54 461.44,-48.29 471.08,-43.36 483.3,-39.15"/>
|
||||
<polygon fill="black" stroke="black" points="484.61,-42.41 493.1,-36.07 482.51,-35.73 484.61,-42.41"/>
|
||||
<text text-anchor="middle" x="515" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- NeverSkipping.Processing->END -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>NeverSkipping.Processing->END</title>
|
||||
<path fill="none" stroke="black" d="M102.91,-233.88C97.93,-213.45 93.18,-179.15 109,-156 123.79,-134.35 154.41,-126.09 175.08,-122.94"/>
|
||||
<polygon fill="black" stroke="black" points="175.62,-126.4 185.11,-121.69 174.76,-119.45 175.62,-126.4"/>
|
||||
<text text-anchor="middle" x="185" y="-189.8" font-family="Times,serif" font-size="14.00">[nKeys == maxKeys]</text>
|
||||
<text text-anchor="middle" x="185" y="-174.8" font-family="Times,serif" font-size="14.00">-> FILTER_END</text>
|
||||
</g>
|
||||
<!-- NeverSkipping.Processing->NeverSkipping.Idle -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>NeverSkipping.Processing->NeverSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M129.49,-270.27C134.87,-275.48 140.18,-281.55 144,-288 153.56,-304.17 159.09,-324.63 162.21,-340.81"/>
|
||||
<polygon fill="black" stroke="black" points="158.78,-341.49 163.94,-350.74 165.68,-340.29 158.78,-341.49"/>
|
||||
<text text-anchor="middle" x="265.5" y="-321.8" font-family="Times,serif" font-size="14.00">[nKeys < maxKeys]</text>
|
||||
<text text-anchor="middle" x="265.5" y="-306.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
|
||||
<text text-anchor="middle" x="265.5" y="-291.8" font-family="Times,serif" font-size="14.00"> -> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Processing->END -->
|
||||
<g id="edge9" class="edge">
|
||||
<title>NotSkippingPrefix.Processing->END</title>
|
||||
<path fill="none" stroke="black" d="M349.96,-237.93C333,-232.81 316.36,-225.74 302,-216 275.27,-197.87 285.01,-177.6 261,-156 247.64,-143.98 229.41,-134.62 215.65,-128.62"/>
|
||||
<polygon fill="black" stroke="black" points="216.74,-125.28 206.16,-124.7 214.07,-131.75 216.74,-125.28"/>
|
||||
<text text-anchor="middle" x="378" y="-189.8" font-family="Times,serif" font-size="14.00">[nKeys == maxKeys]</text>
|
||||
<text text-anchor="middle" x="378" y="-174.8" font-family="Times,serif" font-size="14.00"> -> FILTER_END</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Processing->NotSkippingPrefix.Idle -->
|
||||
<g id="edge11" class="edge">
|
||||
<title>NotSkippingPrefix.Processing->NotSkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M499.64,-270.11C506.59,-274.86 512.87,-280.76 517,-288 526.9,-305.38 528.94,-316.96 517,-333 513.56,-337.62 509.53,-341.66 505.07,-345.18"/>
|
||||
<polygon fill="black" stroke="black" points="502.89,-342.43 496.63,-350.98 506.85,-348.2 502.89,-342.43"/>
|
||||
<text text-anchor="middle" x="690.5" y="-321.8" font-family="Times,serif" font-size="14.00">[nKeys < maxKeys and not hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="690.5" y="-306.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
|
||||
<text text-anchor="middle" x="690.5" y="-291.8" font-family="Times,serif" font-size="14.00"> -> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingPrefix.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge10" class="edge">
|
||||
<title>NotSkippingPrefix.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M458,-233.74C458,-211.98 458,-174.32 458,-148.56"/>
|
||||
<polygon fill="black" stroke="black" points="461.5,-148.33 458,-138.33 454.5,-148.33 461.5,-148.33"/>
|
||||
<text text-anchor="middle" x="609.5" y="-204.8" font-family="Times,serif" font-size="14.00">[nKeys < maxKeys and hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="609.5" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix <- prefixOf(key)</text>
|
||||
<text text-anchor="middle" x="609.5" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
|
||||
<text text-anchor="middle" x="609.5" y="-159.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge12" class="edge">
|
||||
<title>SkippingPrefix.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M593.49,-36.23C591.32,-50.84 586,-71.39 573,-84 567.75,-89.09 561.77,-93.45 555.38,-97.17"/>
|
||||
<polygon fill="black" stroke="black" points="553.66,-94.12 546.43,-101.87 556.91,-100.32 553.66,-94.12"/>
|
||||
<text text-anchor="middle" x="672" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
|
||||
<text text-anchor="middle" x="672" y="-57.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->NotSkippingPrefix.Processing -->
|
||||
<g id="edge13" class="edge">
|
||||
<title>SkippingPrefix.Processing->NotSkippingPrefix.Processing</title>
|
||||
<path fill="none" stroke="black" d="M703.16,-31.64C728.6,-36.87 750.75,-44.11 759,-54 778.46,-77.34 776.26,-200.01 762,-216 749.37,-230.17 656.13,-239.42 576.2,-244.84"/>
|
||||
<polygon fill="black" stroke="black" points="575.77,-241.36 566.03,-245.51 576.24,-248.34 575.77,-241.36"/>
|
||||
<text text-anchor="middle" x="870" y="-116.3" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 12 KiB |
|
@ -0,0 +1,50 @@
|
|||
digraph {
|
||||
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
|
||||
edge [fontsize=14];
|
||||
rankdir=TB;
|
||||
|
||||
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
|
||||
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
|
||||
|
||||
node [fillcolor="lightgrey"];
|
||||
"NotSkipping.Idle" [label="NotSkipping",group="NotSkipping",width=4];
|
||||
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
"WaitForNullKey.Idle" [label="WaitForNullKey",group="WaitForNullKey"];
|
||||
"SkippingVersions.Idle" [label="SkippingVersions",group="SkippingVersions"];
|
||||
|
||||
node [fillcolor="lightblue"];
|
||||
"NotSkipping.Processing" [label="NotSkipping",group="NotSkipping",width=4];
|
||||
"NotSkippingV0.Processing" [label="NotSkippingV0",group="NotSkipping",width=4];
|
||||
"NotSkippingV1.Processing" [label="NotSkippingV1",group="NotSkipping",width=4];
|
||||
"NotSkippingCommon.Processing" [label="NotSkippingCommon",group="NotSkipping",width=4];
|
||||
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
|
||||
"WaitForNullKey.Processing" [label="WaitForNullKey",group="WaitForNullKey"];
|
||||
"SkippingVersions.Processing" [label="SkippingVersions",group="SkippingVersions"];
|
||||
|
||||
START -> "WaitForNullKey.Idle" [label="[versionIdMarker != undefined]"]
|
||||
START -> "NotSkipping.Idle" [label="[versionIdMarker == undefined]"]
|
||||
|
||||
"NotSkipping.Idle" -> "NotSkipping.Processing" [label="filter(key, value)"]
|
||||
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
|
||||
"WaitForNullKey.Idle" -> "WaitForNullKey.Processing" [label="filter(key, value)"]
|
||||
"SkippingVersions.Idle" -> "SkippingVersions.Processing" [label="filter(key, value)"]
|
||||
|
||||
"NotSkipping.Processing" -> "NotSkippingV0.Processing" [label="vFormat='v0'"]
|
||||
"NotSkipping.Processing" -> "NotSkippingV1.Processing" [label="vFormat='v1'"]
|
||||
|
||||
"WaitForNullKey.Processing" -> "NotSkipping.Processing" [label="master(key) != keyMarker"]
|
||||
"WaitForNullKey.Processing" -> "SkippingVersions.Processing" [label="master(key) == keyMarker"]
|
||||
"NotSkippingV0.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(<ReplayPrefix>)]\n/ prefix <- <ReplayPrefix>\n-> FILTER_SKIP"]
|
||||
"NotSkippingV0.Processing" -> "NotSkipping.Idle" [label="[Version.isPHD(value)]\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingV0.Processing" -> "NotSkippingCommon.Processing" [label="[not key.startsWith(<ReplayPrefix>)\nand not Version.isPHD(value)]"]
|
||||
"NotSkippingV1.Processing" -> "NotSkippingCommon.Processing" [label="[always]"]
|
||||
"NotSkippingCommon.Processing" -> END [label="[isListableKey(key, value) and\nKeys == maxKeys]\n-> FILTER_END"]
|
||||
"NotSkippingCommon.Processing" -> "SkippingPrefix.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nhasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
|
||||
"NotSkippingCommon.Processing" -> "NotSkipping.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nnot hasDelimiter(key)]\n/ Contents.append(key, versionId, value)\n-> FILTER_ACCEPT"]
|
||||
|
||||
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
|
||||
"SkippingPrefix.Processing" -> "NotSkipping.Processing" [label="[not key.startsWith(prefix)]"]
|
||||
"SkippingVersions.Processing" -> "NotSkipping.Processing" [label="master(key) !== keyMarker or \nversionId > versionIdMarker"]
|
||||
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId < versionIdMarker\n-> FILTER_SKIP"]
|
||||
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId == versionIdMarker\n-> FILTER_ACCEPT"]
|
||||
}
|
|
@ -0,0 +1,265 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.43.0 (0)
|
||||
-->
|
||||
<!-- Title: %3 Pages: 1 -->
|
||||
<svg width="1522pt" height="922pt"
|
||||
viewBox="0.00 0.00 1522.26 922.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 918)">
|
||||
<title>%3</title>
|
||||
<polygon fill="white" stroke="transparent" points="-4,4 -4,-918 1518.26,-918 1518.26,4 -4,4"/>
|
||||
<!-- START -->
|
||||
<g id="node1" class="node">
|
||||
<title>START</title>
|
||||
<ellipse fill="black" stroke="black" cx="393.26" cy="-907" rx="7" ry="7"/>
|
||||
</g>
|
||||
<!-- NotSkipping.Idle -->
|
||||
<g id="node3" class="node">
|
||||
<title>NotSkipping.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M436.26,-675C436.26,-675 172.26,-675 172.26,-675 166.26,-675 160.26,-669 160.26,-663 160.26,-663 160.26,-651 160.26,-651 160.26,-645 166.26,-639 172.26,-639 172.26,-639 436.26,-639 436.26,-639 442.26,-639 448.26,-645 448.26,-651 448.26,-651 448.26,-663 448.26,-663 448.26,-669 442.26,-675 436.26,-675"/>
|
||||
<text text-anchor="middle" x="304.26" y="-653.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
|
||||
</g>
|
||||
<!-- START->NotSkipping.Idle -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>START->NotSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M391.06,-899.87C380.45,-870.31 334.26,-741.58 313.93,-684.93"/>
|
||||
<polygon fill="black" stroke="black" points="317.12,-683.46 310.45,-675.23 310.53,-685.82 317.12,-683.46"/>
|
||||
<text text-anchor="middle" x="470.76" y="-783.8" font-family="Times,serif" font-size="14.00">[versionIdMarker == undefined]</text>
|
||||
</g>
|
||||
<!-- WaitForNullKey.Idle -->
|
||||
<g id="node5" class="node">
|
||||
<title>WaitForNullKey.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M692.26,-849C692.26,-849 500.26,-849 500.26,-849 494.26,-849 488.26,-843 488.26,-837 488.26,-837 488.26,-825 488.26,-825 488.26,-819 494.26,-813 500.26,-813 500.26,-813 692.26,-813 692.26,-813 698.26,-813 704.26,-819 704.26,-825 704.26,-825 704.26,-837 704.26,-837 704.26,-843 698.26,-849 692.26,-849"/>
|
||||
<text text-anchor="middle" x="596.26" y="-827.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
|
||||
</g>
|
||||
<!-- START->WaitForNullKey.Idle -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>START->WaitForNullKey.Idle</title>
|
||||
<path fill="none" stroke="black" d="M399.56,-903.7C420.56,-896.05 489.7,-870.85 540.08,-852.48"/>
|
||||
<polygon fill="black" stroke="black" points="541.38,-855.73 549.57,-849.02 538.98,-849.16 541.38,-855.73"/>
|
||||
<text text-anchor="middle" x="608.76" y="-870.8" font-family="Times,serif" font-size="14.00">[versionIdMarker != undefined]</text>
|
||||
</g>
|
||||
<!-- END -->
|
||||
<g id="node2" class="node">
|
||||
<title>END</title>
|
||||
<ellipse fill="black" stroke="black" cx="45.26" cy="-120" rx="7" ry="7"/>
|
||||
<ellipse fill="none" stroke="black" cx="45.26" cy="-120" rx="11" ry="11"/>
|
||||
</g>
|
||||
<!-- NotSkipping.Processing -->
|
||||
<g id="node7" class="node">
|
||||
<title>NotSkipping.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M761.26,-558C761.26,-558 497.26,-558 497.26,-558 491.26,-558 485.26,-552 485.26,-546 485.26,-546 485.26,-534 485.26,-534 485.26,-528 491.26,-522 497.26,-522 497.26,-522 761.26,-522 761.26,-522 767.26,-522 773.26,-528 773.26,-534 773.26,-534 773.26,-546 773.26,-546 773.26,-552 767.26,-558 761.26,-558"/>
|
||||
<text text-anchor="middle" x="629.26" y="-536.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
|
||||
</g>
|
||||
<!-- NotSkipping.Idle->NotSkipping.Processing -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>NotSkipping.Idle->NotSkipping.Processing</title>
|
||||
<path fill="none" stroke="black" d="M333.17,-638.98C364.86,-620.99 417.68,-592.92 466.26,-576 483.64,-569.95 502.44,-564.74 520.88,-560.34"/>
|
||||
<polygon fill="black" stroke="black" points="521.83,-563.71 530.78,-558.04 520.25,-556.89 521.83,-563.71"/>
|
||||
<text text-anchor="middle" x="524.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle -->
|
||||
<g id="node4" class="node">
|
||||
<title>SkippingPrefix.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M662.26,-138C662.26,-138 470.26,-138 470.26,-138 464.26,-138 458.26,-132 458.26,-126 458.26,-126 458.26,-114 458.26,-114 458.26,-108 464.26,-102 470.26,-102 470.26,-102 662.26,-102 662.26,-102 668.26,-102 674.26,-108 674.26,-114 674.26,-114 674.26,-126 674.26,-126 674.26,-132 668.26,-138 662.26,-138"/>
|
||||
<text text-anchor="middle" x="566.26" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing -->
|
||||
<g id="node11" class="node">
|
||||
<title>SkippingPrefix.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M779.26,-36C779.26,-36 587.26,-36 587.26,-36 581.26,-36 575.26,-30 575.26,-24 575.26,-24 575.26,-12 575.26,-12 575.26,-6 581.26,0 587.26,0 587.26,0 779.26,0 779.26,0 785.26,0 791.26,-6 791.26,-12 791.26,-12 791.26,-24 791.26,-24 791.26,-30 785.26,-36 779.26,-36"/>
|
||||
<text text-anchor="middle" x="683.26" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Idle->SkippingPrefix.Processing -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>SkippingPrefix.Idle->SkippingPrefix.Processing</title>
|
||||
<path fill="none" stroke="black" d="M552.64,-101.74C543.31,-87.68 534.41,-67.95 545.26,-54 549.71,-48.29 559.34,-43.36 571.56,-39.15"/>
|
||||
<polygon fill="black" stroke="black" points="572.87,-42.41 581.36,-36.07 570.77,-35.73 572.87,-42.41"/>
|
||||
<text text-anchor="middle" x="603.26" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- WaitForNullKey.Processing -->
|
||||
<g id="node12" class="node">
|
||||
<title>WaitForNullKey.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M692.26,-762C692.26,-762 500.26,-762 500.26,-762 494.26,-762 488.26,-756 488.26,-750 488.26,-750 488.26,-738 488.26,-738 488.26,-732 494.26,-726 500.26,-726 500.26,-726 692.26,-726 692.26,-726 698.26,-726 704.26,-732 704.26,-738 704.26,-738 704.26,-750 704.26,-750 704.26,-756 698.26,-762 692.26,-762"/>
|
||||
<text text-anchor="middle" x="596.26" y="-740.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
|
||||
</g>
|
||||
<!-- WaitForNullKey.Idle->WaitForNullKey.Processing -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>WaitForNullKey.Idle->WaitForNullKey.Processing</title>
|
||||
<path fill="none" stroke="black" d="M596.26,-812.8C596.26,-801.16 596.26,-785.55 596.26,-772.24"/>
|
||||
<polygon fill="black" stroke="black" points="599.76,-772.18 596.26,-762.18 592.76,-772.18 599.76,-772.18"/>
|
||||
<text text-anchor="middle" x="654.26" y="-783.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Idle -->
|
||||
<g id="node6" class="node">
|
||||
<title>SkippingVersions.Idle</title>
|
||||
<path fill="lightgrey" stroke="black" d="M1241.26,-558C1241.26,-558 1049.26,-558 1049.26,-558 1043.26,-558 1037.26,-552 1037.26,-546 1037.26,-546 1037.26,-534 1037.26,-534 1037.26,-528 1043.26,-522 1049.26,-522 1049.26,-522 1241.26,-522 1241.26,-522 1247.26,-522 1253.26,-528 1253.26,-534 1253.26,-534 1253.26,-546 1253.26,-546 1253.26,-552 1247.26,-558 1241.26,-558"/>
|
||||
<text text-anchor="middle" x="1145.26" y="-536.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing -->
|
||||
<g id="node13" class="node">
|
||||
<title>SkippingVersions.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M1241.26,-675C1241.26,-675 1049.26,-675 1049.26,-675 1043.26,-675 1037.26,-669 1037.26,-663 1037.26,-663 1037.26,-651 1037.26,-651 1037.26,-645 1043.26,-639 1049.26,-639 1049.26,-639 1241.26,-639 1241.26,-639 1247.26,-639 1253.26,-645 1253.26,-651 1253.26,-651 1253.26,-663 1253.26,-663 1253.26,-669 1247.26,-675 1241.26,-675"/>
|
||||
<text text-anchor="middle" x="1145.26" y="-653.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Idle->SkippingVersions.Processing -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>SkippingVersions.Idle->SkippingVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1145.26,-558.25C1145.26,-576.77 1145.26,-606.45 1145.26,-628.25"/>
|
||||
<polygon fill="black" stroke="black" points="1141.76,-628.53 1145.26,-638.53 1148.76,-628.53 1141.76,-628.53"/>
|
||||
<text text-anchor="middle" x="1203.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
|
||||
</g>
|
||||
<!-- NotSkippingV0.Processing -->
|
||||
<g id="node8" class="node">
|
||||
<title>NotSkippingV0.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M436.26,-411C436.26,-411 172.26,-411 172.26,-411 166.26,-411 160.26,-405 160.26,-399 160.26,-399 160.26,-387 160.26,-387 160.26,-381 166.26,-375 172.26,-375 172.26,-375 436.26,-375 436.26,-375 442.26,-375 448.26,-381 448.26,-387 448.26,-387 448.26,-399 448.26,-399 448.26,-405 442.26,-411 436.26,-411"/>
|
||||
<text text-anchor="middle" x="304.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV0</text>
|
||||
</g>
|
||||
<!-- NotSkipping.Processing->NotSkippingV0.Processing -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>NotSkipping.Processing->NotSkippingV0.Processing</title>
|
||||
<path fill="none" stroke="black" d="M573.96,-521.95C558.07,-516.64 540.84,-510.46 525.26,-504 460.22,-477.02 387.62,-439.36 343.97,-415.84"/>
|
||||
<polygon fill="black" stroke="black" points="345.57,-412.72 335.11,-411.04 342.24,-418.88 345.57,-412.72"/>
|
||||
<text text-anchor="middle" x="573.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat='v0'</text>
|
||||
</g>
|
||||
<!-- NotSkippingV1.Processing -->
|
||||
<g id="node9" class="node">
|
||||
<title>NotSkippingV1.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M758.26,-411C758.26,-411 494.26,-411 494.26,-411 488.26,-411 482.26,-405 482.26,-399 482.26,-399 482.26,-387 482.26,-387 482.26,-381 488.26,-375 494.26,-375 494.26,-375 758.26,-375 758.26,-375 764.26,-375 770.26,-381 770.26,-387 770.26,-387 770.26,-399 770.26,-399 770.26,-405 764.26,-411 758.26,-411"/>
|
||||
<text text-anchor="middle" x="626.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV1</text>
|
||||
</g>
|
||||
<!-- NotSkipping.Processing->NotSkippingV1.Processing -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>NotSkipping.Processing->NotSkippingV1.Processing</title>
|
||||
<path fill="none" stroke="black" d="M628.91,-521.8C628.39,-496.94 627.44,-450.74 626.83,-421.23"/>
|
||||
<polygon fill="black" stroke="black" points="630.32,-421.11 626.62,-411.18 623.33,-421.25 630.32,-421.11"/>
|
||||
<text text-anchor="middle" x="676.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat='v1'</text>
|
||||
</g>
|
||||
<!-- NotSkippingV0.Processing->NotSkipping.Idle -->
|
||||
<g id="edge12" class="edge">
|
||||
<title>NotSkippingV0.Processing->NotSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M304.26,-411.25C304.26,-455.74 304.26,-574.61 304.26,-628.62"/>
|
||||
<polygon fill="black" stroke="black" points="300.76,-628.81 304.26,-638.81 307.76,-628.81 300.76,-628.81"/>
|
||||
<text text-anchor="middle" x="385.76" y="-543.8" font-family="Times,serif" font-size="14.00">[Version.isPHD(value)]</text>
|
||||
<text text-anchor="middle" x="385.76" y="-528.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingV0.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge11" class="edge">
|
||||
<title>NotSkippingV0.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M448.41,-376.93C508.52,-369.95 565.63,-362.09 570.26,-357 622.9,-299.12 594.8,-196.31 577.11,-147.78"/>
|
||||
<polygon fill="black" stroke="black" points="580.33,-146.4 573.53,-138.28 573.78,-148.87 580.33,-146.4"/>
|
||||
<text text-anchor="middle" x="720.26" y="-297.8" font-family="Times,serif" font-size="14.00">[key.startsWith(<ReplayPrefix>)]</text>
|
||||
<text text-anchor="middle" x="720.26" y="-282.8" font-family="Times,serif" font-size="14.00">/ prefix <- <ReplayPrefix></text>
|
||||
<text text-anchor="middle" x="720.26" y="-267.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- NotSkippingCommon.Processing -->
|
||||
<g id="node10" class="node">
|
||||
<title>NotSkippingCommon.Processing</title>
|
||||
<path fill="lightblue" stroke="black" d="M436.26,-304.5C436.26,-304.5 172.26,-304.5 172.26,-304.5 166.26,-304.5 160.26,-298.5 160.26,-292.5 160.26,-292.5 160.26,-280.5 160.26,-280.5 160.26,-274.5 166.26,-268.5 172.26,-268.5 172.26,-268.5 436.26,-268.5 436.26,-268.5 442.26,-268.5 448.26,-274.5 448.26,-280.5 448.26,-280.5 448.26,-292.5 448.26,-292.5 448.26,-298.5 442.26,-304.5 436.26,-304.5"/>
|
||||
<text text-anchor="middle" x="304.26" y="-282.7" font-family="Times,serif" font-size="16.00">NotSkippingCommon</text>
|
||||
</g>
|
||||
<!-- NotSkippingV0.Processing->NotSkippingCommon.Processing -->
|
||||
<g id="edge13" class="edge">
|
||||
<title>NotSkippingV0.Processing->NotSkippingCommon.Processing</title>
|
||||
<path fill="none" stroke="black" d="M304.26,-374.74C304.26,-358.48 304.26,-333.85 304.26,-314.9"/>
|
||||
<polygon fill="black" stroke="black" points="307.76,-314.78 304.26,-304.78 300.76,-314.78 307.76,-314.78"/>
|
||||
<text text-anchor="middle" x="435.26" y="-345.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(<ReplayPrefix>)</text>
|
||||
<text text-anchor="middle" x="435.26" y="-330.8" font-family="Times,serif" font-size="14.00">and not Version.isPHD(value)]</text>
|
||||
</g>
|
||||
<!-- NotSkippingV1.Processing->NotSkippingCommon.Processing -->
|
||||
<g id="edge14" class="edge">
|
||||
<title>NotSkippingV1.Processing->NotSkippingCommon.Processing</title>
|
||||
<path fill="none" stroke="black" d="M616.43,-374.83C606.75,-359.62 590.48,-338.14 570.26,-327 549.98,-315.83 505.48,-307.38 458.57,-301.23"/>
|
||||
<polygon fill="black" stroke="black" points="458.9,-297.74 448.53,-299.95 458.01,-304.69 458.9,-297.74"/>
|
||||
<text text-anchor="middle" x="632.26" y="-338.3" font-family="Times,serif" font-size="14.00">[always]</text>
|
||||
</g>
|
||||
<!-- NotSkippingCommon.Processing->END -->
|
||||
<g id="edge15" class="edge">
|
||||
<title>NotSkippingCommon.Processing->END</title>
|
||||
<path fill="none" stroke="black" d="M159.92,-279.56C109.8,-274.24 62.13,-264.33 46.26,-246 20.92,-216.72 30.42,-167.54 38.5,-140.42"/>
|
||||
<polygon fill="black" stroke="black" points="41.94,-141.16 41.67,-130.57 35.27,-139.02 41.94,-141.16"/>
|
||||
<text text-anchor="middle" x="152.76" y="-212.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="152.76" y="-197.3" font-family="Times,serif" font-size="14.00">Keys == maxKeys]</text>
|
||||
<text text-anchor="middle" x="152.76" y="-182.3" font-family="Times,serif" font-size="14.00">-> FILTER_END</text>
|
||||
</g>
|
||||
<!-- NotSkippingCommon.Processing->NotSkipping.Idle -->
|
||||
<g id="edge17" class="edge">
|
||||
<title>NotSkippingCommon.Processing->NotSkipping.Idle</title>
|
||||
<path fill="none" stroke="black" d="M214.74,-304.54C146.51,-322.73 57.06,-358.99 13.26,-429 -49.27,-528.95 128.43,-602.49 233.32,-635.95"/>
|
||||
<polygon fill="black" stroke="black" points="232.34,-639.31 242.93,-638.97 234.43,-632.63 232.34,-639.31"/>
|
||||
<text text-anchor="middle" x="156.76" y="-492.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="156.76" y="-477.8" font-family="Times,serif" font-size="14.00">nKeys < maxKeys and</text>
|
||||
<text text-anchor="middle" x="156.76" y="-462.8" font-family="Times,serif" font-size="14.00">not hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="156.76" y="-447.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, versionId, value)</text>
|
||||
<text text-anchor="middle" x="156.76" y="-432.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- NotSkippingCommon.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge16" class="edge">
|
||||
<title>NotSkippingCommon.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M292.14,-268.23C288.18,-261.59 284.27,-253.75 282.26,-246 272.21,-207.28 255.76,-185.96 282.26,-156 293.6,-143.18 374.98,-134.02 447.74,-128.3"/>
|
||||
<polygon fill="black" stroke="black" points="448.24,-131.77 457.94,-127.51 447.7,-124.79 448.24,-131.77"/>
|
||||
<text text-anchor="middle" x="428.26" y="-234.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
|
||||
<text text-anchor="middle" x="428.26" y="-219.8" font-family="Times,serif" font-size="14.00">nKeys < maxKeys and</text>
|
||||
<text text-anchor="middle" x="428.26" y="-204.8" font-family="Times,serif" font-size="14.00">hasDelimiter(key)]</text>
|
||||
<text text-anchor="middle" x="428.26" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix <- prefixOf(key)</text>
|
||||
<text text-anchor="middle" x="428.26" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
|
||||
<text text-anchor="middle" x="428.26" y="-159.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->SkippingPrefix.Idle -->
|
||||
<g id="edge18" class="edge">
|
||||
<title>SkippingPrefix.Processing->SkippingPrefix.Idle</title>
|
||||
<path fill="none" stroke="black" d="M681.57,-36.04C679.28,-50.54 673.9,-71.03 661.26,-84 656.4,-88.99 650.77,-93.28 644.72,-96.95"/>
|
||||
<polygon fill="black" stroke="black" points="642.71,-94.06 635.6,-101.92 646.05,-100.21 642.71,-94.06"/>
|
||||
<text text-anchor="middle" x="759.26" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
|
||||
<text text-anchor="middle" x="759.26" y="-57.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- SkippingPrefix.Processing->NotSkipping.Processing -->
|
||||
<g id="edge19" class="edge">
|
||||
<title>SkippingPrefix.Processing->NotSkipping.Processing</title>
|
||||
<path fill="none" stroke="black" d="M791.46,-33.51C815.84,-38.71 837.21,-45.46 846.26,-54 868.07,-74.57 864.26,-89.02 864.26,-119 864.26,-394 864.26,-394 864.26,-394 864.26,-462.4 791.27,-499.6 726.64,-519.12"/>
|
||||
<polygon fill="black" stroke="black" points="725.39,-515.84 716.77,-521.99 727.35,-522.56 725.39,-515.84"/>
|
||||
<text text-anchor="middle" x="961.26" y="-282.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
|
||||
</g>
|
||||
<!-- WaitForNullKey.Processing->NotSkipping.Processing -->
|
||||
<g id="edge9" class="edge">
|
||||
<title>WaitForNullKey.Processing->NotSkipping.Processing</title>
|
||||
<path fill="none" stroke="black" d="M599.08,-725.78C604.81,-690.67 617.89,-610.59 624.8,-568.31"/>
|
||||
<polygon fill="black" stroke="black" points="628.3,-568.61 626.46,-558.18 621.39,-567.48 628.3,-568.61"/>
|
||||
<text text-anchor="middle" x="707.26" y="-653.3" font-family="Times,serif" font-size="14.00">master(key) != keyMarker</text>
|
||||
</g>
|
||||
<!-- WaitForNullKey.Processing->SkippingVersions.Processing -->
|
||||
<g id="edge10" class="edge">
|
||||
<title>WaitForNullKey.Processing->SkippingVersions.Processing</title>
|
||||
<path fill="none" stroke="black" d="M704.4,-726.26C797.32,-711.87 931.09,-691.16 1026.87,-676.33"/>
|
||||
<polygon fill="black" stroke="black" points="1027.55,-679.77 1036.89,-674.78 1026.47,-672.85 1027.55,-679.77"/>
|
||||
<text text-anchor="middle" x="1001.26" y="-696.8" font-family="Times,serif" font-size="14.00">master(key) == keyMarker</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing->SkippingVersions.Idle -->
|
||||
<g id="edge21" class="edge">
|
||||
<title>SkippingVersions.Processing->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M1241.89,-638.98C1249.74,-634.29 1256.75,-628.4 1262.26,-621 1274.21,-604.96 1274.21,-592.04 1262.26,-576 1258.82,-571.38 1254.79,-567.34 1250.33,-563.82"/>
|
||||
<polygon fill="black" stroke="black" points="1252.11,-560.8 1241.89,-558.02 1248.15,-566.57 1252.11,-560.8"/>
|
||||
<text text-anchor="middle" x="1392.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
|
||||
<text text-anchor="middle" x="1392.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId < versionIdMarker</text>
|
||||
<text text-anchor="middle" x="1392.26" y="-579.8" font-family="Times,serif" font-size="14.00">-> FILTER_SKIP</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing->SkippingVersions.Idle -->
|
||||
<g id="edge22" class="edge">
|
||||
<title>SkippingVersions.Processing->SkippingVersions.Idle</title>
|
||||
<path fill="none" stroke="black" d="M1036.97,-654.38C978.97,-650.96 915.73,-642.25 897.26,-621 884.15,-605.9 884.15,-591.1 897.26,-576 914.65,-555.99 971.71,-547.1 1026.73,-543.28"/>
|
||||
<polygon fill="black" stroke="black" points="1027.21,-546.76 1036.97,-542.62 1026.76,-539.77 1027.21,-546.76"/>
|
||||
<text text-anchor="middle" x="1019.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
|
||||
<text text-anchor="middle" x="1019.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId == versionIdMarker</text>
|
||||
<text text-anchor="middle" x="1019.26" y="-579.8" font-family="Times,serif" font-size="14.00">-> FILTER_ACCEPT</text>
|
||||
</g>
|
||||
<!-- SkippingVersions.Processing->NotSkipping.Processing -->
|
||||
<g id="edge20" class="edge">
|
||||
<title>SkippingVersions.Processing->NotSkipping.Processing</title>
|
||||
<path fill="none" stroke="black" d="M1037.02,-651.24C897.84,-644.67 672.13,-632.37 657.26,-621 641.04,-608.6 634.18,-586.13 631.3,-568.16"/>
|
||||
<polygon fill="black" stroke="black" points="634.76,-567.68 630.02,-558.21 627.82,-568.57 634.76,-567.68"/>
|
||||
<text text-anchor="middle" x="770.26" y="-602.3" font-family="Times,serif" font-size="14.00">master(key) !== keyMarker or </text>
|
||||
<text text-anchor="middle" x="770.26" y="-587.3" font-family="Times,serif" font-size="14.00">versionId > versionIdMarker</text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 21 KiB |
12
index.ts
12
index.ts
|
@ -1,6 +1,9 @@
|
|||
import * as evaluators from './lib/policyEvaluator/evaluator';
|
||||
import evaluatePrincipal from './lib/policyEvaluator/principal';
|
||||
import RequestContext from './lib/policyEvaluator/RequestContext';
|
||||
import RequestContext, {
|
||||
actionNeedQuotaCheck,
|
||||
actionNeedQuotaCheckCopy,
|
||||
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
|
||||
import * as requestUtils from './lib/policyEvaluator/requestUtils';
|
||||
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
|
||||
import { validateUserPolicy } from './lib/policy/policyValidator'
|
||||
|
@ -25,6 +28,7 @@ import * as objectRestore from './lib/s3middleware/objectRestore';
|
|||
import * as lifecycleHelpers from './lib/s3middleware/lifecycleHelpers';
|
||||
export { default as errors } from './lib/errors';
|
||||
export { default as Clustering } from './lib/Clustering';
|
||||
export * as ClusterRPC from './lib/clustering/ClusterRPC';
|
||||
export * as ipCheck from './lib/ipCheck';
|
||||
export * as auth from './lib/auth/auth';
|
||||
export * as constants from './lib/constants';
|
||||
|
@ -48,12 +52,15 @@ export const algorithms = {
|
|||
Skip: require('./lib/algos/list/skip'),
|
||||
},
|
||||
cache: {
|
||||
GapSet: require('./lib/algos/cache/GapSet'),
|
||||
GapCache: require('./lib/algos/cache/GapCache'),
|
||||
LRUCache: require('./lib/algos/cache/LRUCache'),
|
||||
},
|
||||
stream: {
|
||||
MergeStream: require('./lib/algos/stream/MergeStream'),
|
||||
},
|
||||
SortedSet: require('./lib/algos/set/SortedSet'),
|
||||
Heap: require('./lib/algos/heap/Heap'),
|
||||
};
|
||||
|
||||
export const policies = {
|
||||
|
@ -63,6 +70,9 @@ export const policies = {
|
|||
RequestContext,
|
||||
requestUtils,
|
||||
actionMaps,
|
||||
actionNeedQuotaCheck,
|
||||
actionWithDataDeletion,
|
||||
actionNeedQuotaCheckCopy,
|
||||
};
|
||||
|
||||
export const testing = {
|
||||
|
|
|
@ -0,0 +1,363 @@
|
|||
import { OrderedSet } from '@js-sdsl/ordered-set';
|
||||
import {
|
||||
default as GapSet,
|
||||
GapSetEntry,
|
||||
} from './GapSet';
|
||||
|
||||
// the API is similar but is not strictly a superset of GapSetInterface
|
||||
// so we don't extend from it
|
||||
export interface GapCacheInterface {
|
||||
exposureDelayMs: number;
|
||||
maxGapWeight: number;
|
||||
size: number;
|
||||
|
||||
setGap: (firstKey: string, lastKey: string, weight: number) => void;
|
||||
removeOverlappingGaps: (overlappingKeys: string[]) => number;
|
||||
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
|
||||
[Symbol.iterator]: () => Iterator<GapSetEntry>;
|
||||
toArray: () => GapSetEntry[];
|
||||
};
|
||||
|
||||
class GapCacheUpdateSet {
|
||||
newGaps: GapSet;
|
||||
updatedKeys: OrderedSet<string>;
|
||||
|
||||
constructor(maxGapWeight: number) {
|
||||
this.newGaps = new GapSet(maxGapWeight);
|
||||
this.updatedKeys = new OrderedSet();
|
||||
}
|
||||
|
||||
addUpdateBatch(updatedKeys: OrderedSet<string>): void {
|
||||
this.updatedKeys.union(updatedKeys);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Cache of listing "gaps" i.e. ranges of keys that can be skipped
|
||||
* over during listing (because they only contain delete markers as
|
||||
* latest versions).
|
||||
*
|
||||
* Typically, a single GapCache instance would be attached to a raft session.
|
||||
*
|
||||
* The API usage is as follows:
|
||||
*
|
||||
* - Initialize a GapCache instance by calling start() (this starts an internal timer)
|
||||
*
|
||||
* - Insert a gap or update an existing one via setGap()
|
||||
*
|
||||
* - Lookup existing gaps via lookupGap()
|
||||
*
|
||||
* - Invalidate gaps that overlap a specific set of keys via removeOverlappingGaps()
|
||||
*
|
||||
* - Shut down a GapCache instance by calling stop() (this stops the internal timer)
|
||||
*
|
||||
* Gaps inserted via setGap() are not exposed immediately to lookupGap(), but only:
|
||||
*
|
||||
* - after a certain delay always larger than 'exposureDelayMs' and usually shorter
|
||||
* than twice this value (but might be slightly longer in rare cases)
|
||||
*
|
||||
* - and only if they haven't been invalidated by a recent call to removeOverlappingGaps()
|
||||
*
|
||||
* This ensures atomicity between gap creation and invalidation from updates under
|
||||
* the condition that a gap is created from first key to last key within the time defined
|
||||
* by 'exposureDelayMs'.
|
||||
*
|
||||
* The implementation is based on two extra temporary "update sets" on top of the main
|
||||
* exposed gap set, one called "staging" and the other "frozen", each containing a
|
||||
* temporary updated gap set and a list of updated keys to invalidate gaps with (coming
|
||||
* from calls to removeOverlappingGaps()). Every "exposureDelayMs" milliseconds, the frozen
|
||||
* gaps are invalidated by all key updates coming from either of the "staging" or "frozen"
|
||||
* update set, then merged into the exposed gaps set, after which the staging updates become
|
||||
* the frozen updates and won't receive any new gap until the next cycle.
|
||||
*/
|
||||
export default class GapCache implements GapCacheInterface {
|
||||
_exposureDelayMs: number;
|
||||
maxGaps: number;
|
||||
|
||||
_stagingUpdates: GapCacheUpdateSet;
|
||||
_frozenUpdates: GapCacheUpdateSet;
|
||||
_exposedGaps: GapSet;
|
||||
_exposeFrozenInterval: NodeJS.Timeout | null;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
* @param {number} exposureDelayMs - minimum delay between
|
||||
* insertion of a gap via setGap() and its exposure via
|
||||
* lookupGap()
|
||||
* @param {number} maxGaps - maximum number of cached gaps, after
|
||||
* which no new gap can be added by setGap(). (Note: a future
|
||||
* improvement could replace this by an eviction strategy)
|
||||
* @param {number} maxGapWeight - maximum "weight" of individual
|
||||
* cached gaps, which is also the granularity for
|
||||
* invalidation. Individual gaps can be chained together,
|
||||
* which lookupGap() transparently consolidates in the response
|
||||
* into a single large gap.
|
||||
*/
|
||||
constructor(exposureDelayMs: number, maxGaps: number, maxGapWeight: number) {
|
||||
this._exposureDelayMs = exposureDelayMs;
|
||||
this.maxGaps = maxGaps;
|
||||
|
||||
this._stagingUpdates = new GapCacheUpdateSet(maxGapWeight);
|
||||
this._frozenUpdates = new GapCacheUpdateSet(maxGapWeight);
|
||||
this._exposedGaps = new GapSet(maxGapWeight);
|
||||
this._exposeFrozenInterval = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a GapCache from an array of exposed gap entries (used in tests)
|
||||
*
|
||||
* @return {GapCache} - a new GapCache instance
|
||||
*/
|
||||
static createFromArray(
|
||||
gaps: GapSetEntry[],
|
||||
exposureDelayMs: number,
|
||||
maxGaps: number,
|
||||
maxGapWeight: number
|
||||
): GapCache {
|
||||
const gapCache = new GapCache(exposureDelayMs, maxGaps, maxGapWeight);
|
||||
gapCache._exposedGaps = GapSet.createFromArray(gaps, maxGapWeight)
|
||||
return gapCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to remove gaps in the staging and frozen sets
|
||||
* overlapping with previously updated keys, right before the
|
||||
* frozen gaps get exposed.
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
_removeOverlappingGapsBeforeExpose(): void {
|
||||
for (const { updatedKeys } of [this._stagingUpdates, this._frozenUpdates]) {
|
||||
if (updatedKeys.size() === 0) {
|
||||
continue;
|
||||
}
|
||||
for (const { newGaps } of [this._stagingUpdates, this._frozenUpdates]) {
|
||||
if (newGaps.size === 0) {
|
||||
continue;
|
||||
}
|
||||
newGaps.removeOverlappingGaps(updatedKeys);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is the core mechanism that updates the exposed gaps in the
|
||||
* cache. It is called on a regular interval defined by 'exposureDelayMs'.
|
||||
*
|
||||
* It does the following in order:
|
||||
*
|
||||
* - remove gaps from the frozen set that overlap with any key present in a
|
||||
* batch passed to removeOverlappingGaps() since the last two triggers of
|
||||
* _exposeFrozen()
|
||||
*
|
||||
* - merge the remaining gaps from the frozen set to the exposed set, which
|
||||
* makes them visible from calls to lookupGap()
|
||||
*
|
||||
* - rotate by freezing the currently staging updates and initiating a new
|
||||
* staging updates set
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
_exposeFrozen(): void {
|
||||
this._removeOverlappingGapsBeforeExpose();
|
||||
for (const gap of this._frozenUpdates.newGaps) {
|
||||
// Use a trivial strategy to keep the cache size within
|
||||
// limits: refuse to add new gaps when the size is above
|
||||
// the 'maxGaps' threshold. We solely rely on
|
||||
// removeOverlappingGaps() to make space for new gaps.
|
||||
if (this._exposedGaps.size < this.maxGaps) {
|
||||
this._exposedGaps.setGap(gap.firstKey, gap.lastKey, gap.weight);
|
||||
}
|
||||
}
|
||||
this._frozenUpdates = this._stagingUpdates;
|
||||
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the internal GapCache timer
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
start(): void {
|
||||
if (this._exposeFrozenInterval) {
|
||||
return;
|
||||
}
|
||||
this._exposeFrozenInterval = setInterval(
|
||||
() => this._exposeFrozen(),
|
||||
this._exposureDelayMs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the internal GapCache timer
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
stop(): void {
|
||||
if (this._exposeFrozenInterval) {
|
||||
clearInterval(this._exposeFrozenInterval);
|
||||
this._exposeFrozenInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a gap between two keys, associated with a weight to
|
||||
* limit individual gap's spanning ranges in the cache, for a more
|
||||
* granular invalidation.
|
||||
*
|
||||
* The function handles splitting and merging existing gaps to
|
||||
* maintain an optimal weight of cache entries.
|
||||
*
|
||||
* NOTE 1: the caller must ensure that the full length of the gap
|
||||
* between 'firstKey' and 'lastKey' has been built from a listing
|
||||
* snapshot that is more recent than 'exposureDelayMs' milliseconds,
|
||||
* in order to guarantee that the exposed gap will be fully
|
||||
* covered (and potentially invalidated) from recent calls to
|
||||
* removeOverlappingGaps().
|
||||
*
|
||||
* NOTE 2: a usual pattern when building a large gap from multiple
|
||||
* calls to setGap() is to start the next gap from 'lastKey',
|
||||
* which will be passed as 'firstKey' in the next call, so that
|
||||
* gaps can be chained together and consolidated by lookupGap().
|
||||
*
|
||||
* @param {string} firstKey - first key of the gap
|
||||
* @param {string} lastKey - last key of the gap, must be greater
|
||||
* or equal than 'firstKey'
|
||||
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
|
||||
* @return {undefined}
|
||||
*/
|
||||
setGap(firstKey: string, lastKey: string, weight: number): void {
|
||||
this._stagingUpdates.newGaps.setGap(firstKey, lastKey, weight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove gaps that overlap with a given set of keys. Used to
|
||||
* invalidate gaps when keys are inserted or deleted.
|
||||
*
|
||||
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that
|
||||
* overlap with any of this set of keys
|
||||
* @return {number} - how many gaps were removed from the exposed
|
||||
* gaps only (overlapping gaps not yet exposed are also invalidated
|
||||
* but are not accounted for in the returned value)
|
||||
*/
|
||||
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
|
||||
let overlappingKeysSet;
|
||||
if (Array.isArray(overlappingKeys)) {
|
||||
overlappingKeysSet = new OrderedSet(overlappingKeys);
|
||||
} else {
|
||||
overlappingKeysSet = overlappingKeys;
|
||||
}
|
||||
this._stagingUpdates.addUpdateBatch(overlappingKeysSet);
|
||||
return this._exposedGaps.removeOverlappingGaps(overlappingKeysSet);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup the next exposed gap that overlaps with [minKey, maxKey]. Internally
|
||||
* chained gaps are coalesced in the response into a single contiguous large gap.
|
||||
*
|
||||
* @param {string} minKey - minimum key overlapping with the returned gap
|
||||
* @param {string} [maxKey] - maximum key overlapping with the returned gap
|
||||
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
|
||||
* was found, null otherwise, as a Promise
|
||||
*/
|
||||
lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
|
||||
return this._exposedGaps.lookupGap(minKey, maxKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @return {number} - maximum weight of individual gaps
|
||||
*/
|
||||
get maxGapWeight(): number {
|
||||
return this._exposedGaps.maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @param {number} gapWeight - maximum weight of individual gaps
|
||||
*/
|
||||
set maxGapWeight(gapWeight: number) {
|
||||
this._exposedGaps.maxWeight = gapWeight;
|
||||
// also update transient gap sets
|
||||
this._stagingUpdates.newGaps.maxWeight = gapWeight;
|
||||
this._frozenUpdates.newGaps.maxWeight = gapWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the exposure delay in milliseconds, which is the minimum
|
||||
* time after which newly cached gaps will be exposed by
|
||||
* lookupGap().
|
||||
*
|
||||
* @return {number} - exposure delay in milliseconds
|
||||
*/
|
||||
get exposureDelayMs(): number {
|
||||
return this._exposureDelayMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the exposure delay in milliseconds, which is the minimum
|
||||
* time after which newly cached gaps will be exposed by
|
||||
* lookupGap(). Setting this attribute automatically updates the
|
||||
* internal state to honor the new value.
|
||||
*
|
||||
* @param {number} - exposure delay in milliseconds
|
||||
*/
|
||||
set exposureDelayMs(exposureDelayMs: number) {
|
||||
if (exposureDelayMs !== this._exposureDelayMs) {
|
||||
this._exposureDelayMs = exposureDelayMs;
|
||||
if (this._exposeFrozenInterval) {
|
||||
// invalidate all pending gap updates, as the new interval may not be
|
||||
// safe for them
|
||||
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
this._frozenUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
|
||||
// reinitialize the _exposeFrozenInterval timer with the updated delay
|
||||
this.stop();
|
||||
this.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of exposed gaps
|
||||
*
|
||||
* @return {number} number of exposed gaps
|
||||
*/
|
||||
get size(): number {
|
||||
return this._exposedGaps.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over exposed gaps
|
||||
*
|
||||
* @return {Iterator<GapSetEntry>} an iterator over exposed gaps
|
||||
*/
|
||||
[Symbol.iterator](): Iterator<GapSetEntry> {
|
||||
return this._exposedGaps[Symbol.iterator]();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an array of all exposed gaps
|
||||
*
|
||||
* @return {GapSetEntry[]} array of exposed gaps
|
||||
*/
|
||||
toArray(): GapSetEntry[] {
|
||||
return this._exposedGaps.toArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all exposed and staging gaps from the cache.
|
||||
*
|
||||
* Note: retains invalidating updates from removeOverlappingGaps()
|
||||
* for correctness of gaps inserted afterwards.
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
clear(): void {
|
||||
this._stagingUpdates.newGaps = new GapSet(this.maxGapWeight);
|
||||
this._frozenUpdates.newGaps = new GapSet(this.maxGapWeight);
|
||||
this._exposedGaps = new GapSet(this.maxGapWeight);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,366 @@
|
|||
import assert from 'assert';
|
||||
import { OrderedSet } from '@js-sdsl/ordered-set';
|
||||
|
||||
import errors from '../../errors';
|
||||
|
||||
export type GapSetEntry = {
|
||||
firstKey: string,
|
||||
lastKey: string,
|
||||
weight: number,
|
||||
};
|
||||
|
||||
export interface GapSetInterface {
|
||||
maxWeight: number;
|
||||
size: number;
|
||||
|
||||
setGap: (firstKey: string, lastKey: string, weight: number) => GapSetEntry;
|
||||
removeOverlappingGaps: (overlappingKeys: string[]) => number;
|
||||
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
|
||||
[Symbol.iterator]: () => Iterator<GapSetEntry>;
|
||||
toArray: () => GapSetEntry[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Specialized data structure to support caching of listing "gaps",
|
||||
* i.e. ranges of keys that can be skipped over during listing
|
||||
* (because they only contain delete markers as latest versions)
|
||||
*/
|
||||
export default class GapSet implements GapSetInterface, Iterable<GapSetEntry> {
|
||||
_gaps: OrderedSet<GapSetEntry>;
|
||||
_maxWeight: number;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
|
||||
* @param {number} maxWeight - weight threshold for each cached
|
||||
* gap (unitless). Triggers splitting gaps when reached
|
||||
*/
|
||||
constructor(maxWeight: number) {
|
||||
this._gaps = new OrderedSet(
|
||||
[],
|
||||
(left: GapSetEntry, right: GapSetEntry) => (
|
||||
left.firstKey < right.firstKey ? -1 :
|
||||
left.firstKey > right.firstKey ? 1 : 0
|
||||
)
|
||||
);
|
||||
this._maxWeight = maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a GapSet from an array of gap entries (used in tests)
|
||||
*/
|
||||
static createFromArray(gaps: GapSetEntry[], maxWeight: number): GapSet {
|
||||
const gapSet = new GapSet(maxWeight);
|
||||
for (const gap of gaps) {
|
||||
gapSet._gaps.insert(gap);
|
||||
}
|
||||
return gapSet;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a gap between two keys, associated with a weight to limit
|
||||
* individual gap sizes in the cache.
|
||||
*
|
||||
* The function handles splitting and merging existing gaps to
|
||||
* maintain an optimal weight of cache entries.
|
||||
*
|
||||
* @param {string} firstKey - first key of the gap
|
||||
* @param {string} lastKey - last key of the gap, must be greater
|
||||
* or equal than 'firstKey'
|
||||
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
|
||||
* @return {GapSetEntry} - existing or new gap entry
|
||||
*/
|
||||
setGap(firstKey: string, lastKey: string, weight: number): GapSetEntry {
|
||||
assert(lastKey >= firstKey);
|
||||
|
||||
// Step 1/4: Find the closest left-overlapping gap, and either re-use it
|
||||
// or chain it with a new gap depending on the weights if it exists (otherwise
|
||||
// just creates a new gap).
|
||||
const curGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey });
|
||||
let curGap;
|
||||
if (curGapIt.isAccessible()) {
|
||||
curGap = curGapIt.pointer;
|
||||
if (curGap.lastKey >= lastKey) {
|
||||
// return fully overlapping gap already cached
|
||||
return curGap;
|
||||
}
|
||||
}
|
||||
let remainingWeight = weight;
|
||||
if (!curGap // no previous gap
|
||||
|| curGap.lastKey < firstKey // previous gap not overlapping
|
||||
|| (curGap.lastKey === firstKey // previous gap overlapping by one key...
|
||||
&& curGap.weight + weight > this._maxWeight) // ...but we can't extend it
|
||||
) {
|
||||
// create a new gap indexed by 'firstKey'
|
||||
curGap = { firstKey, lastKey: firstKey, weight: 0 };
|
||||
this._gaps.insert(curGap);
|
||||
} else if (curGap.lastKey > firstKey && weight > this._maxWeight) {
|
||||
// previous gap is either fully or partially contained in the new gap
|
||||
// and cannot be extended: substract its weight from the total (heuristic
|
||||
// in case the previous gap doesn't start at 'firstKey', which is the
|
||||
// uncommon case)
|
||||
remainingWeight -= curGap.weight;
|
||||
|
||||
// there may be an existing chained gap starting with the previous gap's
|
||||
// 'lastKey': use it if it exists
|
||||
const chainedGapIt = this._gaps.find(<GapSetEntry>{ firstKey: curGap.lastKey });
|
||||
if (chainedGapIt.isAccessible()) {
|
||||
curGap = chainedGapIt.pointer;
|
||||
} else {
|
||||
// no existing chained gap: chain a new gap to the previous gap
|
||||
curGap = {
|
||||
firstKey: curGap.lastKey,
|
||||
lastKey: curGap.lastKey,
|
||||
weight: 0,
|
||||
};
|
||||
this._gaps.insert(curGap);
|
||||
}
|
||||
}
|
||||
// Step 2/4: Cleanup existing gaps fully included in firstKey -> lastKey, and
|
||||
// aggregate their weights in curGap to define the minimum weight up to the
|
||||
// last merged gap.
|
||||
let nextGap;
|
||||
while (true) {
|
||||
const nextGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: curGap.firstKey });
|
||||
nextGap = nextGapIt.isAccessible() && nextGapIt.pointer;
|
||||
// stop the cleanup when no more gap or if the next gap is not fully
|
||||
// included in curGap
|
||||
if (!nextGap || nextGap.lastKey > lastKey) {
|
||||
break;
|
||||
}
|
||||
this._gaps.eraseElementByIterator(nextGapIt);
|
||||
curGap.lastKey = nextGap.lastKey;
|
||||
curGap.weight += nextGap.weight;
|
||||
}
|
||||
|
||||
// Step 3/4: Extend curGap to lastKey, adjusting the weight.
|
||||
// At this point, curGap weight is the minimum weight of the finished gap, save it
|
||||
// for step 4.
|
||||
let minMergedWeight = curGap.weight;
|
||||
if (curGap.lastKey === firstKey && firstKey !== lastKey) {
|
||||
// extend the existing gap by the full amount 'firstKey -> lastKey'
|
||||
curGap.lastKey = lastKey;
|
||||
curGap.weight += remainingWeight;
|
||||
} else if (curGap.lastKey <= lastKey) {
|
||||
curGap.lastKey = lastKey;
|
||||
curGap.weight = remainingWeight;
|
||||
}
|
||||
|
||||
// Step 4/4: Find the closest right-overlapping gap, and if it exists, either merge
|
||||
// it or chain it with curGap depending on the weights.
|
||||
if (nextGap && nextGap.firstKey <= lastKey) {
|
||||
// nextGap overlaps with the new gap: check if we can merge it
|
||||
minMergedWeight += nextGap.weight;
|
||||
let mergedWeight;
|
||||
if (lastKey === nextGap.firstKey) {
|
||||
// nextGap is chained with curGap: add the full weight of nextGap
|
||||
mergedWeight = curGap.weight + nextGap.weight;
|
||||
} else {
|
||||
// strict overlap: don't add nextGap's weight unless
|
||||
// it's larger than the sum of merged ranges (as it is
|
||||
// then included in `minMergedWeight`)
|
||||
mergedWeight = Math.max(curGap.weight, minMergedWeight);
|
||||
}
|
||||
if (mergedWeight <= this._maxWeight) {
|
||||
// merge nextGap into curGap
|
||||
curGap.lastKey = nextGap.lastKey;
|
||||
curGap.weight = mergedWeight;
|
||||
this._gaps.eraseElementByKey(nextGap);
|
||||
} else {
|
||||
// adjust the last key to chain with nextGap and substract the next
|
||||
// gap's weight from curGap (heuristic)
|
||||
curGap.lastKey = nextGap.firstKey;
|
||||
curGap.weight = Math.max(mergedWeight - nextGap.weight, 0);
|
||||
curGap = nextGap;
|
||||
}
|
||||
}
|
||||
// return a copy of curGap
|
||||
return Object.assign({}, curGap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove gaps that overlap with one or more keys in a given array or
|
||||
* OrderedSet. Used to invalidate gaps when keys are inserted or deleted.
|
||||
*
|
||||
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that overlap
|
||||
* with any of this set of keys
|
||||
* @return {number} - how many gaps were removed
|
||||
*/
|
||||
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
|
||||
// To optimize processing with a large number of keys and/or gaps, this function:
|
||||
//
|
||||
// 1. converts the overlappingKeys array to a OrderedSet (if not already a OrderedSet)
|
||||
// 2. queries both the gaps set and the overlapping keys set in a loop, which allows:
|
||||
// - skipping ranges of overlapping keys at once when there is no new overlapping gap
|
||||
// - skipping ranges of gaps at once when there is no overlapping key
|
||||
//
|
||||
// This way, it is efficient when the number of non-overlapping gaps is large
|
||||
// (which is the most common case in practice).
|
||||
|
||||
let overlappingKeysSet;
|
||||
if (Array.isArray(overlappingKeys)) {
|
||||
overlappingKeysSet = new OrderedSet(overlappingKeys);
|
||||
} else {
|
||||
overlappingKeysSet = overlappingKeys;
|
||||
}
|
||||
const firstKeyIt = overlappingKeysSet.begin();
|
||||
let currentKey = firstKeyIt.isAccessible() && firstKeyIt.pointer;
|
||||
let nRemoved = 0;
|
||||
while (currentKey) {
|
||||
const closestGapIt = this._gaps.reverseUpperBound(<GapSetEntry>{ firstKey: currentKey });
|
||||
if (closestGapIt.isAccessible()) {
|
||||
const closestGap = closestGapIt.pointer;
|
||||
if (currentKey <= closestGap.lastKey) {
|
||||
// currentKey overlaps closestGap: remove the gap
|
||||
this._gaps.eraseElementByIterator(closestGapIt);
|
||||
nRemoved += 1;
|
||||
}
|
||||
}
|
||||
const nextGapIt = this._gaps.lowerBound(<GapSetEntry>{ firstKey: currentKey });
|
||||
if (!nextGapIt.isAccessible()) {
|
||||
// no more gap: we're done
|
||||
return nRemoved;
|
||||
}
|
||||
const nextGap = nextGapIt.pointer;
|
||||
// advance to the last key potentially overlapping with nextGap
|
||||
let currentKeyIt = overlappingKeysSet.reverseLowerBound(nextGap.lastKey);
|
||||
if (currentKeyIt.isAccessible()) {
|
||||
currentKey = currentKeyIt.pointer;
|
||||
if (currentKey >= nextGap.firstKey) {
|
||||
// currentKey overlaps nextGap: remove the gap
|
||||
this._gaps.eraseElementByIterator(nextGapIt);
|
||||
nRemoved += 1;
|
||||
}
|
||||
}
|
||||
// advance to the first key potentially overlapping with another gap
|
||||
currentKeyIt = overlappingKeysSet.lowerBound(nextGap.lastKey);
|
||||
currentKey = currentKeyIt.isAccessible() && currentKeyIt.pointer;
|
||||
}
|
||||
return nRemoved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to coalesce multiple chained gaps into a single gap.
|
||||
*
|
||||
* It is only used to construct lookupGap() return values and
|
||||
* doesn't modify the GapSet.
|
||||
*
|
||||
* NOTE: The function may take a noticeable amount of time and CPU
|
||||
* to execute if a large number of chained gaps have to be
|
||||
* coalesced, but it should never take more than a few seconds. In
|
||||
* most cases it should take less than a millisecond. It regularly
|
||||
* yields to the nodejs event loop to avoid blocking it during a
|
||||
* long execution.
|
||||
*
|
||||
* @param {GapSetEntry} firstGap - first gap of the chain to coalesce with
|
||||
* the next ones in the chain
|
||||
* @return {Promise<GapSetEntry>} - a new coalesced entry, as a Promise
|
||||
*/
|
||||
_coalesceGapChain(firstGap: GapSetEntry): Promise<GapSetEntry> {
|
||||
return new Promise(resolve => {
|
||||
const coalescedGap: GapSetEntry = Object.assign({}, firstGap);
|
||||
const coalesceGapChainIteration = () => {
|
||||
// efficiency trade-off: 100 iterations of log(N) complexity lookups should
|
||||
// not block the event loop for too long
|
||||
for (let opCounter = 0; opCounter < 100; ++opCounter) {
|
||||
const chainedGapIt = this._gaps.find(
|
||||
<GapSetEntry>{ firstKey: coalescedGap.lastKey });
|
||||
if (!chainedGapIt.isAccessible()) {
|
||||
// chain is complete
|
||||
return resolve(coalescedGap);
|
||||
}
|
||||
const chainedGap = chainedGapIt.pointer;
|
||||
if (chainedGap.firstKey === chainedGap.lastKey) {
|
||||
// found a single-key gap: chain is complete
|
||||
return resolve(coalescedGap);
|
||||
}
|
||||
coalescedGap.lastKey = chainedGap.lastKey;
|
||||
coalescedGap.weight += chainedGap.weight;
|
||||
}
|
||||
// yield to the event loop before continuing the process
|
||||
// of coalescing the gap chain
|
||||
return process.nextTick(coalesceGapChainIteration);
|
||||
};
|
||||
coalesceGapChainIteration();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup the next gap that overlaps with [minKey, maxKey]. Internally chained
|
||||
* gaps are coalesced in the response into a single contiguous large gap.
|
||||
*
|
||||
* @param {string} minKey - minimum key overlapping with the returned gap
|
||||
* @param {string} [maxKey] - maximum key overlapping with the returned gap
|
||||
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
|
||||
* was found, null otherwise, as a Promise
|
||||
*/
|
||||
async lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
|
||||
let firstGap: GapSetEntry | null = null;
|
||||
const minGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey: minKey });
|
||||
const minGap = minGapIt.isAccessible() && minGapIt.pointer;
|
||||
if (minGap && minGap.lastKey >= minKey) {
|
||||
firstGap = minGap;
|
||||
} else {
|
||||
const maxGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: minKey });
|
||||
const maxGap = maxGapIt.isAccessible() && maxGapIt.pointer;
|
||||
if (maxGap && (maxKey === undefined || maxGap.firstKey <= maxKey)) {
|
||||
firstGap = maxGap;
|
||||
}
|
||||
}
|
||||
if (!firstGap) {
|
||||
return null;
|
||||
}
|
||||
return this._coalesceGapChain(firstGap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @return {number} - maximum weight of individual gaps
|
||||
*/
|
||||
get maxWeight(): number {
|
||||
return this._maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @param {number} gapWeight - maximum weight of individual gaps
|
||||
*/
|
||||
set maxWeight(gapWeight: number) {
|
||||
this._maxWeight = gapWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of gaps stored in this set.
|
||||
*
|
||||
* @return {number} - number of gaps stored in this set
|
||||
*/
|
||||
get size(): number {
|
||||
return this._gaps.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over each gap of the set, ordered by first key
|
||||
*
|
||||
* @return {Iterator<GapSetEntry>} - an iterator over all gaps
|
||||
* Example:
|
||||
* for (const gap of myGapSet) { ... }
|
||||
*/
|
||||
[Symbol.iterator](): Iterator<GapSetEntry> {
|
||||
return this._gaps[Symbol.iterator]();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an array containing all gaps, ordered by first key
|
||||
*
|
||||
* NOTE: there is a toArray() method in the OrderedSet implementation
|
||||
* but it does not scale well and overflows the stack quickly. This is
|
||||
* why we provide an implementation based on an iterator.
|
||||
*
|
||||
* @return {GapSetEntry[]} - an array containing all gaps
|
||||
*/
|
||||
toArray(): GapSetEntry[] {
|
||||
return [...this];
|
||||
}
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
export enum HeapOrder {
|
||||
Min = -1,
|
||||
Max = 1,
|
||||
}
|
||||
|
||||
export enum CompareResult {
|
||||
LT = -1,
|
||||
EQ = 0,
|
||||
GT = 1,
|
||||
}
|
||||
|
||||
export type CompareFunction = (x: any, y: any) => CompareResult;
|
||||
|
||||
export class Heap {
|
||||
size: number;
|
||||
_maxSize: number;
|
||||
_order: HeapOrder;
|
||||
_heap: any[];
|
||||
_cmpFn: CompareFunction;
|
||||
|
||||
constructor(size: number, order: HeapOrder, cmpFn: CompareFunction) {
|
||||
this.size = 0;
|
||||
this._maxSize = size;
|
||||
this._order = order;
|
||||
this._cmpFn = cmpFn;
|
||||
this._heap = new Array<any>(this._maxSize);
|
||||
}
|
||||
|
||||
_parent(i: number): number {
|
||||
return Math.floor((i - 1) / 2);
|
||||
}
|
||||
|
||||
_left(i: number): number {
|
||||
return Math.floor((2 * i) + 1);
|
||||
}
|
||||
|
||||
_right(i: number): number {
|
||||
return Math.floor((2 * i) + 2);
|
||||
}
|
||||
|
||||
_shouldSwap(childIdx: number, parentIdx: number): boolean {
|
||||
return this._cmpFn(this._heap[childIdx], this._heap[parentIdx]) as number === this._order as number;
|
||||
}
|
||||
|
||||
_swap(i: number, j: number) {
|
||||
const tmp = this._heap[i];
|
||||
this._heap[i] = this._heap[j];
|
||||
this._heap[j] = tmp;
|
||||
}
|
||||
|
||||
_heapify(i: number) {
|
||||
const l = this._left(i);
|
||||
const r = this._right(i);
|
||||
let c = i;
|
||||
|
||||
if (l < this.size && this._shouldSwap(l, c)) {
|
||||
c = l;
|
||||
}
|
||||
|
||||
if (r < this.size && this._shouldSwap(r, c)) {
|
||||
c = r;
|
||||
}
|
||||
|
||||
if (c != i) {
|
||||
this._swap(c, i);
|
||||
this._heapify(c);
|
||||
}
|
||||
}
|
||||
|
||||
add(item: any): any {
|
||||
if (this.size >= this._maxSize) {
|
||||
return new Error('Max heap size reached');
|
||||
}
|
||||
|
||||
++this.size;
|
||||
let c = this.size - 1;
|
||||
this._heap[c] = item;
|
||||
|
||||
while (c > 0) {
|
||||
if (!this._shouldSwap(c, this._parent(c))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
this._swap(c, this._parent(c));
|
||||
c = this._parent(c);
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
remove(): any {
|
||||
if (this.size <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const ret = this._heap[0];
|
||||
this._heap[0] = this._heap[this.size - 1];
|
||||
this._heapify(0);
|
||||
--this.size;
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
peek(): any {
|
||||
if (this.size <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return this._heap[0];
|
||||
};
|
||||
}
|
||||
|
||||
export class MinHeap extends Heap {
|
||||
constructor(size: number, cmpFn: CompareFunction) {
|
||||
super(size, HeapOrder.Min, cmpFn);
|
||||
}
|
||||
}
|
||||
|
||||
export class MaxHeap extends Heap {
|
||||
constructor(size: number, cmpFn: CompareFunction) {
|
||||
super(size, HeapOrder.Max, cmpFn);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
const { FILTER_ACCEPT, SKIP_NONE } = require('./tools');
|
||||
|
||||
// Use a heuristic to amortize the cost of JSON
|
||||
// serialization/deserialization only on largest metadata where the
|
||||
|
@ -92,21 +92,26 @@ class Extension {
|
|||
* @param {object} entry - a listing entry from metadata
|
||||
* expected format: { key, value }
|
||||
* @return {number} - result of filtering the entry:
|
||||
* > 0: entry is accepted and included in the result
|
||||
* = 0: entry is accepted but not included (skipping)
|
||||
* < 0: entry is not accepted, listing should finish
|
||||
* FILTER_ACCEPT: entry is accepted and may or not be included
|
||||
* in the result
|
||||
* FILTER_SKIP: listing may skip directly (with "gte" param) to
|
||||
* the key returned by the skipping() method
|
||||
* FILTER_END: the results are complete, listing can be stopped
|
||||
*/
|
||||
filter(entry) {
|
||||
return entry ? FILTER_SKIP : FILTER_SKIP;
|
||||
filter(/* entry: { key, value } */) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the insight into why filter is skipping an entry. This could be
|
||||
* because it is skipping a range of delimited keys or a range of specific
|
||||
* version when doing master version listing.
|
||||
* Provides the next key at which the listing task is allowed to skip to.
|
||||
* This could allow to skip over:
|
||||
* - a key prefix ending with the delimiter
|
||||
* - all remaining versions of an object when doing a current
|
||||
* versions listing in v0 format
|
||||
* - a cached "gap" of deleted objects when doing a current
|
||||
* versions listing in v0 format
|
||||
*
|
||||
* @return {string} - the insight: a common prefix or a master key,
|
||||
* or SKIP_NONE if there is no insight
|
||||
* @return {string} - the next key at which the listing task is allowed to skip to
|
||||
*/
|
||||
skipping() {
|
||||
return SKIP_NONE;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { inc, checkLimit, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT } = require('./tools');
|
||||
FILTER_END, FILTER_ACCEPT, SKIP_NONE } = require('./tools');
|
||||
const DEFAULT_MAX_KEYS = 1000;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
@ -163,7 +163,7 @@ class MultipartUploads {
|
|||
}
|
||||
|
||||
skipping() {
|
||||
return '';
|
||||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
const Extension = require('./Extension').default;
|
||||
|
||||
const { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
|
||||
const { checkLimit, FILTER_END, FILTER_ACCEPT } = require('./tools');
|
||||
const DEFAULT_MAX_KEYS = 10000;
|
||||
|
||||
/**
|
||||
|
@ -91,7 +91,7 @@ class List extends Extension {
|
|||
* < 0 : listing done
|
||||
*/
|
||||
filter(elem) {
|
||||
// Check first in case of maxkeys <= 0
|
||||
// Check if the result array is full
|
||||
if (this.keys >= this.maxKeys) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ class List extends Extension {
|
|||
this.filterKeyStartsWith !== undefined) &&
|
||||
typeof elem === 'object' &&
|
||||
!this.customFilter(elem.value)) {
|
||||
return FILTER_SKIP;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
if (typeof elem === 'object') {
|
||||
this.res.push({
|
||||
|
|
|
@ -1,274 +0,0 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Extension = require('./Extension').default;
|
||||
const { inc, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
/**
|
||||
* Find the common prefix in the path
|
||||
*
|
||||
* @param {String} key - path of the object
|
||||
* @param {String} delimiter - separator
|
||||
* @param {Number} delimiterIndex - 'folder' index in the path
|
||||
* @return {String} - CommonPrefix
|
||||
*/
|
||||
function getCommonPrefix(key, delimiter, delimiterIndex) {
|
||||
return key.substring(0, delimiterIndex + delimiter.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
|
||||
* @prop {String[]} Contents - 'files' to list
|
||||
* @prop {Boolean} IsTruncated - truncated listing flag
|
||||
* @prop {String|undefined} NextMarker - marker per amazon format
|
||||
* @prop {Number} keys - count of listed keys
|
||||
* @prop {String|undefined} delimiter - separator per amazon format
|
||||
* @prop {String|undefined} prefix - prefix per amazon format
|
||||
* @prop {Number} maxKeys - number of keys to list
|
||||
*/
|
||||
class Delimiter extends Extension {
|
||||
/**
|
||||
* Create a new Delimiter instance
|
||||
* @constructor
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} [parameters.delimiter] - delimiter per amazon
|
||||
* format
|
||||
* @param {String} [parameters.prefix] - prefix per amazon
|
||||
* format
|
||||
* @param {String} [parameters.marker] - marker per amazon
|
||||
* format
|
||||
* @param {Number} [parameters.maxKeys] - number of keys to list
|
||||
* @param {Boolean} [parameters.v2] - indicates whether v2
|
||||
* format
|
||||
* @param {String} [parameters.startAfter] - marker per amazon
|
||||
* format
|
||||
* @param {String} [parameters.continuationToken] - obfuscated amazon
|
||||
* token
|
||||
* @param {Boolean} [parameters.alphabeticalOrder] - Either the result is
|
||||
* alphabetically ordered
|
||||
* or not
|
||||
* @param {RequestLogger} logger - The logger of the
|
||||
* request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger);
|
||||
// original listing parameters
|
||||
this.delimiter = parameters.delimiter;
|
||||
this.prefix = parameters.prefix;
|
||||
this.marker = parameters.marker;
|
||||
this.maxKeys = parameters.maxKeys || 1000;
|
||||
this.startAfter = parameters.startAfter;
|
||||
this.continuationToken = parameters.continuationToken;
|
||||
this.alphabeticalOrder =
|
||||
typeof parameters.alphabeticalOrder !== 'undefined' ?
|
||||
parameters.alphabeticalOrder : true;
|
||||
|
||||
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
|
||||
// results
|
||||
this.CommonPrefixes = [];
|
||||
this.Contents = [];
|
||||
this.IsTruncated = false;
|
||||
this.NextMarker = parameters.marker;
|
||||
this.NextContinuationToken =
|
||||
parameters.continuationToken || parameters.startAfter;
|
||||
|
||||
this.startMarker = parameters.v2 ? 'startAfter' : 'marker';
|
||||
this.continueMarker = parameters.v2 ? 'continuationToken' : 'marker';
|
||||
this.nextContinueMarker = parameters.v2 ?
|
||||
'NextContinuationToken' : 'NextMarker';
|
||||
|
||||
if (this.delimiter !== undefined &&
|
||||
this[this.nextContinueMarker] !== undefined &&
|
||||
this[this.nextContinueMarker].startsWith(this.prefix || '')) {
|
||||
const nextDelimiterIndex =
|
||||
this[this.nextContinueMarker].indexOf(this.delimiter,
|
||||
this.prefix ? this.prefix.length : 0);
|
||||
this[this.nextContinueMarker] =
|
||||
this[this.nextContinueMarker].slice(0, nextDelimiterIndex +
|
||||
this.delimiter.length);
|
||||
}
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
genMDParams: this.genMDParamsV0,
|
||||
getObjectKey: this.getObjectKeyV0,
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
genMDParams: this.genMDParamsV1,
|
||||
getObjectKey: this.getObjectKeyV1,
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params = {};
|
||||
if (this.prefix) {
|
||||
params.gte = this.prefix;
|
||||
params.lt = inc(this.prefix);
|
||||
}
|
||||
const startVal = this[this.continueMarker] || this[this.startMarker];
|
||||
if (startVal) {
|
||||
if (params.gte && params.gte > startVal) {
|
||||
return params;
|
||||
}
|
||||
delete params.gte;
|
||||
params.gt = startVal;
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
const params = this.genMDParamsV0();
|
||||
return listingParamsMasterKeysV0ToV1(params);
|
||||
}
|
||||
|
||||
/**
|
||||
* check if the max keys count has been reached and set the
|
||||
* final state of the result if it is the case
|
||||
* @return {Boolean} - indicates if the iteration has to stop
|
||||
*/
|
||||
_reachedMaxKeys() {
|
||||
if (this.keys >= this.maxKeys) {
|
||||
// In cases of maxKeys <= 0 -> IsTruncated = false
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a (key, value) tuple to the listing
|
||||
* Set the NextMarker to the current key
|
||||
* Increment the keys counter
|
||||
* @param {String} key - The key to add
|
||||
* @param {String} value - The value of the key
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
addContents(key, value) {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
this.Contents.push({ key, value: this.trimMetadata(value) });
|
||||
this[this.nextContinueMarker] = key;
|
||||
++this.keys;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
getObjectKeyV0(obj) {
|
||||
return obj.key;
|
||||
}
|
||||
|
||||
getObjectKeyV1(obj) {
|
||||
return obj.key.slice(DbPrefixes.Master.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration, based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filter(obj) {
|
||||
const key = this.getObjectKey(obj);
|
||||
const value = obj.value;
|
||||
if ((this.prefix && !key.startsWith(this.prefix))
|
||||
|| (this.alphabeticalOrder
|
||||
&& typeof this[this.nextContinueMarker] === 'string'
|
||||
&& key <= this[this.nextContinueMarker])) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
if (this.delimiter) {
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex === -1) {
|
||||
return this.addContents(key, value);
|
||||
}
|
||||
return this.addCommonPrefix(key, delimiterIndex);
|
||||
}
|
||||
return this.addContents(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Common Prefix in the list
|
||||
* @param {String} key - object name
|
||||
* @param {Number} index - after prefix starting point
|
||||
* @return {Boolean} - indicates if iteration should continue
|
||||
*/
|
||||
addCommonPrefix(key, index) {
|
||||
const commonPrefix = getCommonPrefix(key, this.delimiter, index);
|
||||
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
|
||||
&& this[this.nextContinueMarker] !== commonPrefix) {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
this.CommonPrefixes.push(commonPrefix);
|
||||
this[this.nextContinueMarker] = commonPrefix;
|
||||
++this.keys;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
|
||||
/**
|
||||
* If repd happens to want to skip listing on a bucket in v0
|
||||
* versioning key format, here is an idea.
|
||||
*
|
||||
* @return {string} - the present range (NextMarker) if repd believes
|
||||
* that it's enough and should move on
|
||||
*/
|
||||
skippingV0() {
|
||||
return this[this.nextContinueMarker];
|
||||
}
|
||||
|
||||
/**
|
||||
* If repd happens to want to skip listing on a bucket in v1
|
||||
* versioning key format, here is an idea.
|
||||
*
|
||||
* @return {string} - the present range (NextMarker) if repd believes
|
||||
* that it's enough and should move on
|
||||
*/
|
||||
skippingV1() {
|
||||
return DbPrefixes.Master + this[this.nextContinueMarker];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object containing all mandatory fields to use once the
|
||||
* iteration is done, doesn't show a NextMarker field if the output
|
||||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result() {
|
||||
/* NextMarker is only provided when delimiter is used.
|
||||
* specified in v1 listing documentation
|
||||
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
*/
|
||||
const result = {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Contents: this.Contents,
|
||||
IsTruncated: this.IsTruncated,
|
||||
Delimiter: this.delimiter,
|
||||
};
|
||||
if (this.parameters.v2) {
|
||||
result.NextContinuationToken = this.IsTruncated
|
||||
? this.NextContinuationToken : undefined;
|
||||
} else {
|
||||
result.NextMarker = (this.IsTruncated && this.delimiter)
|
||||
? this.NextMarker : undefined;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { Delimiter };
|
|
@ -0,0 +1,356 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Extension = require('./Extension').default;
|
||||
const { inc, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
export interface FilterState {
|
||||
id: number,
|
||||
};
|
||||
|
||||
export interface FilterReturnValue {
|
||||
FILTER_ACCEPT,
|
||||
FILTER_SKIP,
|
||||
FILTER_END,
|
||||
};
|
||||
|
||||
export const enum DelimiterFilterStateId {
|
||||
NotSkipping = 1,
|
||||
SkippingPrefix = 2,
|
||||
};
|
||||
|
||||
export interface DelimiterFilterState_NotSkipping extends FilterState {
|
||||
id: DelimiterFilterStateId.NotSkipping,
|
||||
};
|
||||
|
||||
export interface DelimiterFilterState_SkippingPrefix extends FilterState {
|
||||
id: DelimiterFilterStateId.SkippingPrefix,
|
||||
prefix: string;
|
||||
};
|
||||
|
||||
type KeyHandler = (key: string, value: string) => FilterReturnValue;
|
||||
|
||||
export type ResultObject = {
|
||||
CommonPrefixes: string[];
|
||||
Contents: {
|
||||
key: string;
|
||||
value: string;
|
||||
}[];
|
||||
IsTruncated: boolean;
|
||||
Delimiter ?: string;
|
||||
NextMarker ?: string;
|
||||
NextContinuationToken ?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
|
||||
* @prop {String[]} Contents - 'files' to list
|
||||
* @prop {Boolean} IsTruncated - truncated listing flag
|
||||
* @prop {String|undefined} NextMarker - marker per amazon format
|
||||
* @prop {Number} keys - count of listed keys
|
||||
* @prop {String|undefined} delimiter - separator per amazon format
|
||||
* @prop {String|undefined} prefix - prefix per amazon format
|
||||
* @prop {Number} maxKeys - number of keys to list
|
||||
*/
|
||||
export class Delimiter extends Extension {
|
||||
|
||||
state: FilterState;
|
||||
keyHandlers: { [id: number]: KeyHandler };
|
||||
|
||||
/**
|
||||
* Create a new Delimiter instance
|
||||
* @constructor
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} [parameters.delimiter] - delimiter per amazon
|
||||
* format
|
||||
* @param {String} [parameters.prefix] - prefix per amazon
|
||||
* format
|
||||
* @param {String} [parameters.marker] - marker per amazon
|
||||
* format
|
||||
* @param {Number} [parameters.maxKeys] - number of keys to list
|
||||
* @param {Boolean} [parameters.v2] - indicates whether v2
|
||||
* format
|
||||
* @param {String} [parameters.startAfter] - marker per amazon
|
||||
* format
|
||||
* @param {String} [parameters.continuationToken] - obfuscated amazon
|
||||
* token
|
||||
* @param {RequestLogger} logger - The logger of the
|
||||
* request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger);
|
||||
// original listing parameters
|
||||
this.delimiter = parameters.delimiter;
|
||||
this.prefix = parameters.prefix;
|
||||
this.maxKeys = parameters.maxKeys || 1000;
|
||||
|
||||
if (parameters.v2) {
|
||||
this.marker = parameters.continuationToken || parameters.startAfter;
|
||||
} else {
|
||||
this.marker = parameters.marker;
|
||||
}
|
||||
this.nextMarker = this.marker;
|
||||
|
||||
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
|
||||
// results
|
||||
this.CommonPrefixes = [];
|
||||
this.Contents = [];
|
||||
this.IsTruncated = false;
|
||||
this.keyHandlers = {};
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
genMDParams: this.genMDParamsV0,
|
||||
getObjectKey: this.getObjectKeyV0,
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
genMDParams: this.genMDParamsV1,
|
||||
getObjectKey: this.getObjectKeyV1,
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
|
||||
// if there is a delimiter, we may skip ranges by prefix,
|
||||
// hence using the NotSkippingPrefix flavor that checks the
|
||||
// subprefix up to the delimiter for the NotSkipping state
|
||||
if (this.delimiter) {
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingPrefix.bind(this));
|
||||
} else {
|
||||
// listing without a delimiter never has to skip over any
|
||||
// prefix -> use NeverSkipping flavor for the NotSkipping
|
||||
// state
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NeverSkipping.bind(this));
|
||||
}
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.SkippingPrefix,
|
||||
this.keyHandler_SkippingPrefix.bind(this));
|
||||
|
||||
this.state = <DelimiterFilterState_NotSkipping> {
|
||||
id: DelimiterFilterStateId.NotSkipping,
|
||||
};
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params: { gt ?: string, gte ?: string, lt ?: string } = {};
|
||||
if (this.prefix) {
|
||||
params.gte = this.prefix;
|
||||
params.lt = inc(this.prefix);
|
||||
}
|
||||
if (this.marker && this.delimiter) {
|
||||
const commonPrefix = this.getCommonPrefix(this.marker);
|
||||
if (commonPrefix) {
|
||||
const afterPrefix = inc(commonPrefix);
|
||||
if (!params.gte || afterPrefix > params.gte) {
|
||||
params.gte = afterPrefix;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.marker && (!params.gte || this.marker >= params.gte)) {
|
||||
delete params.gte;
|
||||
params.gt = this.marker;
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
const params = this.genMDParamsV0();
|
||||
return listingParamsMasterKeysV0ToV1(params);
|
||||
}
|
||||
|
||||
/**
|
||||
* check if the max keys count has been reached and set the
|
||||
* final state of the result if it is the case
|
||||
* @return {Boolean} - indicates if the iteration has to stop
|
||||
*/
|
||||
_reachedMaxKeys(): boolean {
|
||||
if (this.keys >= this.maxKeys) {
|
||||
// In cases of maxKeys <= 0 -> IsTruncated = false
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a (key, value) tuple to the listing
|
||||
* Set the NextMarker to the current key
|
||||
* Increment the keys counter
|
||||
* @param {String} key - The key to add
|
||||
* @param {String} value - The value of the key
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
addContents(key: string, value: string): void {
|
||||
this.Contents.push({ key, value: this.trimMetadata(value) });
|
||||
++this.keys;
|
||||
this.nextMarker = key;
|
||||
}
|
||||
|
||||
getCommonPrefix(key: string): string | undefined {
|
||||
if (!this.delimiter) {
|
||||
return undefined;
|
||||
}
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex === -1) {
|
||||
return undefined;
|
||||
}
|
||||
return key.substring(0, delimiterIndex + this.delimiter.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Common Prefix in the list
|
||||
* @param {String} commonPrefix - common prefix to add
|
||||
* @param {String} key - full key starting with commonPrefix
|
||||
* @return {Boolean} - indicates if iteration should continue
|
||||
*/
|
||||
addCommonPrefix(commonPrefix: string, key: string): void {
|
||||
// add the new prefix to the list
|
||||
this.CommonPrefixes.push(commonPrefix);
|
||||
++this.keys;
|
||||
this.nextMarker = commonPrefix;
|
||||
}
|
||||
|
||||
addCommonPrefixOrContents(key: string, value: string): string | undefined {
|
||||
// add the subprefix to the common prefixes if the key has the delimiter
|
||||
const commonPrefix = this.getCommonPrefix(key);
|
||||
if (commonPrefix) {
|
||||
this.addCommonPrefix(commonPrefix, key);
|
||||
return commonPrefix;
|
||||
}
|
||||
this.addContents(key, value);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
getObjectKeyV0(obj: { key: string }): string {
|
||||
return obj.key;
|
||||
}
|
||||
|
||||
getObjectKeyV1(obj: { key: string }): string {
|
||||
return obj.key.slice(DbPrefixes.Master.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration, based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filter(obj: { key: string, value: string }): FilterReturnValue {
|
||||
const key = this.getObjectKey(obj);
|
||||
const value = obj.value;
|
||||
|
||||
return this.handleKey(key, value);
|
||||
}
|
||||
|
||||
setState(state: FilterState): void {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
setKeyHandler(stateId: number, keyHandler: KeyHandler): void {
|
||||
this.keyHandlers[stateId] = keyHandler;
|
||||
}
|
||||
|
||||
handleKey(key: string, value: string): FilterReturnValue {
|
||||
return this.keyHandlers[this.state.id](key, value);
|
||||
}
|
||||
|
||||
keyHandler_NeverSkipping(key: string, value: string): FilterReturnValue {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
this.addContents(key, value);
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingPrefix(key: string, value: string): FilterReturnValue {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
const commonPrefix = this.addCommonPrefixOrContents(key, value);
|
||||
if (commonPrefix) {
|
||||
// transition into SkippingPrefix state to skip all following keys
|
||||
// while they start with the same prefix
|
||||
this.setState(<DelimiterFilterState_SkippingPrefix> {
|
||||
id: DelimiterFilterStateId.SkippingPrefix,
|
||||
prefix: commonPrefix,
|
||||
});
|
||||
}
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
keyHandler_SkippingPrefix(key: string, value: string): FilterReturnValue {
|
||||
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
|
||||
if (key.startsWith(prefix)) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this.setState(<DelimiterFilterState_NotSkipping> {
|
||||
id: DelimiterFilterStateId.NotSkipping,
|
||||
});
|
||||
return this.handleKey(key, value);
|
||||
}
|
||||
|
||||
skippingBase(): string | undefined {
|
||||
switch (this.state.id) {
|
||||
case DelimiterFilterStateId.SkippingPrefix:
|
||||
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
|
||||
return inc(prefix);
|
||||
|
||||
default:
|
||||
return SKIP_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
return this.skippingBase();
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
const skipTo = this.skippingBase();
|
||||
if (skipTo === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
}
|
||||
return DbPrefixes.Master + skipTo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object containing all mandatory fields to use once the
|
||||
* iteration is done, doesn't show a NextMarker field if the output
|
||||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result(): ResultObject {
|
||||
/* NextMarker is only provided when delimiter is used.
|
||||
* specified in v1 listing documentation
|
||||
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
*/
|
||||
const result: ResultObject = {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Contents: this.Contents,
|
||||
IsTruncated: this.IsTruncated,
|
||||
Delimiter: this.delimiter,
|
||||
};
|
||||
if (this.parameters.v2) {
|
||||
result.NextContinuationToken = this.IsTruncated
|
||||
? this.nextMarker : undefined;
|
||||
} else {
|
||||
result.NextMarker = (this.IsTruncated && this.delimiter)
|
||||
? this.nextMarker : undefined;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
const { DelimiterMaster } = require('./delimiterMaster');
|
||||
const { FILTER_ACCEPT, FILTER_END } = require('./tools');
|
||||
|
||||
type ResultObject = {
|
||||
Contents: {
|
||||
key: string;
|
||||
value: string;
|
||||
}[];
|
||||
IsTruncated: boolean;
|
||||
NextMarker ?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class DelimiterMaster
|
||||
* to return the master/current versions.
|
||||
*/
|
||||
class DelimiterCurrent extends DelimiterMaster {
|
||||
/**
|
||||
* Delimiter listing of current versions.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
|
||||
* @param {String} parameters.excludedDataStoreName - excluded datatore name
|
||||
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger, vFormat);
|
||||
|
||||
this.beforeDate = parameters.beforeDate;
|
||||
this.excludedDataStoreName = parameters.excludedDataStoreName;
|
||||
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
|
||||
this.scannedKeys = 0;
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params = super.genMDParamsV0();
|
||||
// lastModified and dataStoreName parameters are used by metadata that enables built-in filtering,
|
||||
// a feature currently exclusive to MongoDB
|
||||
if (this.beforeDate) {
|
||||
params.lastModified = {
|
||||
lt: this.beforeDate,
|
||||
};
|
||||
}
|
||||
|
||||
if (this.excludedDataStoreName) {
|
||||
params.dataStoreName = {
|
||||
ne: this.excludedDataStoreName,
|
||||
}
|
||||
}
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the stringified entry's value.
|
||||
* @param s - sringified value
|
||||
* @return - undefined if parsing fails, otherwise it contains the parsed value.
|
||||
*/
|
||||
_parse(s) {
|
||||
let p;
|
||||
try {
|
||||
p = JSON.parse(s);
|
||||
} catch (e: any) {
|
||||
this.logger.warn(
|
||||
'Could not parse Object Metadata while listing',
|
||||
{ err: e.toString() });
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* check if the max keys count has been reached and set the
|
||||
* final state of the result if it is the case
|
||||
*
|
||||
* specialized implementation on DelimiterCurrent to also check
|
||||
* the number of scanned keys
|
||||
*
|
||||
* @return {Boolean} - indicates if the iteration has to stop
|
||||
*/
|
||||
_reachedMaxKeys(): boolean {
|
||||
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
|
||||
this.IsTruncated = true;
|
||||
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
|
||||
{
|
||||
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
|
||||
scannedKeys: this.scannedKeys,
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return super._reachedMaxKeys();
|
||||
}
|
||||
|
||||
addContents(key, value) {
|
||||
++this.scannedKeys;
|
||||
const parsedValue = this._parse(value);
|
||||
// if parsing fails, skip the key.
|
||||
if (parsedValue) {
|
||||
const lastModified = parsedValue['last-modified'];
|
||||
const dataStoreName = parsedValue.dataStoreName;
|
||||
// We then check if the current version is older than the "beforeDate" and
|
||||
// "excludedDataStoreName" is not specified or if specified and the data store name is different.
|
||||
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) &&
|
||||
(!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName)) {
|
||||
super.addContents(key, value);
|
||||
}
|
||||
// In the event of a timeout occurring before any content is added,
|
||||
// NextMarker is updated even if the object is not eligible.
|
||||
// It minimizes the amount of data that the client needs to re-process if the request times out.
|
||||
this.nextMarker = key;
|
||||
}
|
||||
}
|
||||
|
||||
result(): object {
|
||||
const result: ResultObject = {
|
||||
Contents: this.Contents,
|
||||
IsTruncated: this.IsTruncated,
|
||||
};
|
||||
|
||||
if (this.IsTruncated) {
|
||||
result.NextMarker = this.nextMarker;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
module.exports = { DelimiterCurrent };
|
|
@ -1,196 +0,0 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Delimiter = require('./delimiter').Delimiter;
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { BucketVersioningKeyFormat } = VSConst;
|
||||
const { FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes } = VSConst;
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class Delimiter
|
||||
* to return the raw master versions of existing objects.
|
||||
*/
|
||||
class DelimiterMaster extends Delimiter {
|
||||
/**
|
||||
* Delimiter listing of master versions.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} parameters.delimiter - delimiter per amazon format
|
||||
* @param {String} parameters.prefix - prefix per amazon format
|
||||
* @param {String} parameters.marker - marker per amazon format
|
||||
* @param {Number} parameters.maxKeys - number of keys to list
|
||||
* @param {Boolean} parameters.v2 - indicates whether v2 format
|
||||
* @param {String} parameters.startAfter - marker per amazon v2 format
|
||||
* @param {String} parameters.continuationToken - obfuscated amazon token
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger, vFormat);
|
||||
// non-PHD master version or a version whose master is a PHD version
|
||||
this.prvKey = undefined;
|
||||
this.prvPHDKey = undefined;
|
||||
this.inReplayPrefix = false;
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
filter: this.filterV0,
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
filter: this.filterV1,
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration for buckets in v0 format,
|
||||
* based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV0(obj) {
|
||||
let key = obj.key;
|
||||
const value = obj.value;
|
||||
|
||||
if (key.startsWith(DbPrefixes.Replay)) {
|
||||
this.inReplayPrefix = true;
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this.inReplayPrefix = false;
|
||||
|
||||
/* Skip keys not starting with the prefix or not alphabetically
|
||||
* ordered. */
|
||||
if ((this.prefix && !key.startsWith(this.prefix))
|
||||
|| (typeof this[this.nextContinueMarker] === 'string' &&
|
||||
key <= this[this.nextContinueMarker])) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
|
||||
/* Skip version keys (<key><versionIdSeparator><version>) if we already
|
||||
* have a master version. */
|
||||
const versionIdIndex = key.indexOf(VID_SEP);
|
||||
if (versionIdIndex >= 0) {
|
||||
key = key.slice(0, versionIdIndex);
|
||||
/* - key === this.prvKey is triggered when a master version has
|
||||
* been accepted for this key,
|
||||
* - key === this.NextMarker or this.NextContinueToken is triggered
|
||||
* when a listing page ends on an accepted obj and the next page
|
||||
* starts with a version of this object.
|
||||
* In that case prvKey is default set to undefined
|
||||
* in the constructor and comparing to NextMarker is the only
|
||||
* way to know we should not accept this version. This test is
|
||||
* not redundant with the one at the beginning of this function,
|
||||
* we are comparing here the key without the version suffix,
|
||||
* - key startsWith the previous NextMarker happens because we set
|
||||
* NextMarker to the common prefix instead of the whole key
|
||||
* value. (TODO: remove this test once ZENKO-1048 is fixed)
|
||||
* */
|
||||
if (key === this.prvKey || key === this[this.nextContinueMarker] ||
|
||||
(this.delimiter &&
|
||||
key.startsWith(this[this.nextContinueMarker]))) {
|
||||
/* master version already filtered */
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
}
|
||||
if (Version.isPHD(value)) {
|
||||
/* master version is a PHD version, we want to wait for the next
|
||||
* one:
|
||||
* - Set the prvKey to undefined to not skip the next version,
|
||||
* - return accept to avoid users to skip the next values in range
|
||||
* (skip scan mechanism in metadata backend like Metadata or
|
||||
* MongoClient). */
|
||||
this.prvKey = undefined;
|
||||
this.prvPHDKey = key;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
if (Version.isDeleteMarker(value)) {
|
||||
/* This entry is a deleteMarker which has not been filtered by the
|
||||
* version test. Either :
|
||||
* - it is a deleteMarker on the master version, we want to SKIP
|
||||
* all the following entries with this key (no master version),
|
||||
* - or a deleteMarker following a PHD (setting prvKey to undefined
|
||||
* when an entry is a PHD avoids the skip on version for the
|
||||
* next entry). In that case we expect the master version to
|
||||
* follow. */
|
||||
if (key === this.prvPHDKey) {
|
||||
this.prvKey = undefined;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
this.prvKey = key;
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
|
||||
this.prvKey = key;
|
||||
if (this.delimiter) {
|
||||
// check if the key has the delimiter
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex >= 0) {
|
||||
// try to add the prefix to the list
|
||||
return this.addCommonPrefix(key, delimiterIndex);
|
||||
}
|
||||
}
|
||||
return this.addContents(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration for buckets in v1 format,
|
||||
* based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV1(obj) {
|
||||
// Filtering master keys in v1 is simply listing the master
|
||||
// keys, as the state of version keys do not change the
|
||||
// result, so we can use Delimiter method directly.
|
||||
return super.filter(obj);
|
||||
}
|
||||
|
||||
skippingBase() {
|
||||
if (this[this.nextContinueMarker]) {
|
||||
// next marker or next continuation token:
|
||||
// - foo/ : skipping foo/
|
||||
// - foo : skipping foo.
|
||||
const index = this[this.nextContinueMarker].
|
||||
lastIndexOf(this.delimiter);
|
||||
if (index === this[this.nextContinueMarker].length - 1) {
|
||||
return this[this.nextContinueMarker];
|
||||
}
|
||||
return this[this.nextContinueMarker] + VID_SEP;
|
||||
}
|
||||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
if (this.inReplayPrefix) {
|
||||
return DbPrefixes.Replay;
|
||||
}
|
||||
return this.skippingBase();
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
const skipTo = this.skippingBase();
|
||||
if (skipTo === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
}
|
||||
return DbPrefixes.Master + skipTo;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterMaster };
|
|
@ -0,0 +1,620 @@
|
|||
import {
|
||||
Delimiter,
|
||||
FilterState,
|
||||
FilterReturnValue,
|
||||
DelimiterFilterStateId,
|
||||
DelimiterFilterState_NotSkipping,
|
||||
DelimiterFilterState_SkippingPrefix,
|
||||
ResultObject,
|
||||
} from './delimiter';
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { BucketVersioningKeyFormat } = VSConst;
|
||||
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END, SKIP_NONE, inc } = require('./tools');
|
||||
|
||||
import { GapSetEntry } from '../cache/GapSet';
|
||||
import { GapCacheInterface } from '../cache/GapCache';
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes } = VSConst;
|
||||
|
||||
export const enum DelimiterMasterFilterStateId {
|
||||
SkippingVersionsV0 = 101,
|
||||
WaitVersionAfterPHDV0 = 102,
|
||||
SkippingGapV0 = 103,
|
||||
};
|
||||
|
||||
interface DelimiterMasterFilterState_SkippingVersionsV0 extends FilterState {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
masterKey: string,
|
||||
};
|
||||
|
||||
interface DelimiterMasterFilterState_WaitVersionAfterPHDV0 extends FilterState {
|
||||
id: DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
|
||||
masterKey: string,
|
||||
};
|
||||
|
||||
interface DelimiterMasterFilterState_SkippingGapV0 extends FilterState {
|
||||
id: DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
};
|
||||
|
||||
export const enum GapCachingState {
|
||||
NoGapCache = 0, // there is no gap cache
|
||||
UnknownGap = 1, // waiting for a cache lookup
|
||||
GapLookupInProgress = 2, // asynchronous gap lookup in progress
|
||||
GapCached = 3, // an upcoming or already skippable gap is cached
|
||||
NoMoreGap = 4, // the cache doesn't have any more gaps inside the listed range
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoGapCache = {
|
||||
state: GapCachingState.NoGapCache;
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoCachedGap = {
|
||||
state: GapCachingState.UnknownGap
|
||||
| GapCachingState.GapLookupInProgress
|
||||
gapCache: GapCacheInterface;
|
||||
};
|
||||
|
||||
type GapCachingInfo_GapCached = {
|
||||
state: GapCachingState.GapCached;
|
||||
gapCache: GapCacheInterface;
|
||||
gapCached: GapSetEntry;
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoMoreGap = {
|
||||
state: GapCachingState.NoMoreGap;
|
||||
};
|
||||
|
||||
type GapCachingInfo = GapCachingInfo_NoGapCache
|
||||
| GapCachingInfo_NoCachedGap
|
||||
| GapCachingInfo_GapCached
|
||||
| GapCachingInfo_NoMoreGap;
|
||||
|
||||
|
||||
export const enum GapBuildingState {
|
||||
Disabled = 0, // no gap cache or no gap building needed (e.g. in V1 versioning format)
|
||||
NotBuilding = 1, // not currently building a gap (i.e. not listing within a gap)
|
||||
Building = 2, // currently building a gap (i.e. listing within a gap)
|
||||
Expired = 3, // not allowed to build due to exposure delay timeout
|
||||
};
|
||||
|
||||
type GapBuildingInfo_NothingToBuild = {
|
||||
state: GapBuildingState.Disabled | GapBuildingState.Expired;
|
||||
};
|
||||
|
||||
type GapBuildingParams = {
|
||||
/**
|
||||
* minimum weight for a gap to be created in the cache
|
||||
*/
|
||||
minGapWeight: number;
|
||||
/**
|
||||
* trigger a cache setGap() call every N skippable keys
|
||||
*/
|
||||
triggerSaveGapWeight: number;
|
||||
/**
|
||||
* timestamp to assess whether we're still inside the validity period to
|
||||
* be allowed to build gaps
|
||||
*/
|
||||
initTimestamp: number;
|
||||
};
|
||||
|
||||
type GapBuildingInfo_NotBuilding = {
|
||||
state: GapBuildingState.NotBuilding;
|
||||
gapCache: GapCacheInterface;
|
||||
params: GapBuildingParams;
|
||||
};
|
||||
|
||||
type GapBuildingInfo_Building = {
|
||||
state: GapBuildingState.Building;
|
||||
gapCache: GapCacheInterface;
|
||||
params: GapBuildingParams;
|
||||
/**
|
||||
* Gap currently being created
|
||||
*/
|
||||
gap: GapSetEntry;
|
||||
/**
|
||||
* total current weight of the gap being created
|
||||
*/
|
||||
gapWeight: number;
|
||||
};
|
||||
|
||||
type GapBuildingInfo = GapBuildingInfo_NothingToBuild
|
||||
| GapBuildingInfo_NotBuilding
|
||||
| GapBuildingInfo_Building;
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class Delimiter
|
||||
* to return the raw master versions of existing objects.
|
||||
*/
|
||||
export class DelimiterMaster extends Delimiter {
|
||||
|
||||
_gapCaching: GapCachingInfo;
|
||||
_gapBuilding: GapBuildingInfo;
|
||||
_refreshedBuildingParams: GapBuildingParams | null;
|
||||
|
||||
/**
|
||||
* Delimiter listing of master versions.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} [parameters.delimiter] - delimiter per amazon format
|
||||
* @param {String} [parameters.prefix] - prefix per amazon format
|
||||
* @param {String} [parameters.marker] - marker per amazon format
|
||||
* @param {Number} [parameters.maxKeys] - number of keys to list
|
||||
* @param {Boolean} [parameters.v2] - indicates whether v2 format
|
||||
* @param {String} [parameters.startAfter] - marker per amazon v2 format
|
||||
* @param {String} [parameters.continuationToken] - obfuscated amazon token
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat="v0"] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat?: string) {
|
||||
super(parameters, logger, vFormat);
|
||||
|
||||
if (this.vFormat === BucketVersioningKeyFormat.v0) {
|
||||
// override Delimiter's implementation of NotSkipping for
|
||||
// DelimiterMaster logic (skipping versions and special
|
||||
// handling of delete markers and PHDs)
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingPrefixNorVersionsV0.bind(this));
|
||||
|
||||
// add extra state handlers specific to DelimiterMaster with v0 format
|
||||
this.setKeyHandler(
|
||||
DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
this.keyHandler_SkippingVersionsV0.bind(this));
|
||||
|
||||
this.setKeyHandler(
|
||||
DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
|
||||
this.keyHandler_WaitVersionAfterPHDV0.bind(this));
|
||||
|
||||
this.setKeyHandler(
|
||||
DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
this.keyHandler_SkippingGapV0.bind(this));
|
||||
|
||||
if (this.marker) {
|
||||
// distinct initial state to include some special logic
|
||||
// before the first master key is found that does not have
|
||||
// to be checked afterwards
|
||||
this.state = <DelimiterMasterFilterState_SkippingVersionsV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
masterKey: this.marker,
|
||||
};
|
||||
} else {
|
||||
this.state = <DelimiterFilterState_NotSkipping> {
|
||||
id: DelimiterFilterStateId.NotSkipping,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
// save base implementation of the `NotSkipping` state in
|
||||
// Delimiter before overriding it with ours, to be able to call it from there
|
||||
this.keyHandler_NotSkipping_Delimiter = this.keyHandlers[DelimiterFilterStateId.NotSkipping];
|
||||
this.setKeyHandler(
|
||||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
|
||||
}
|
||||
// in v1, we can directly use Delimiter's implementation,
|
||||
// which is already set to the proper state
|
||||
|
||||
// default initialization of the gap cache and building states, can be
|
||||
// set by refreshGapCache()
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.NoGapCache,
|
||||
};
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Disabled,
|
||||
};
|
||||
this._refreshedBuildingParams = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the validity period left before a refresh of the gap cache is needed
|
||||
* to continue building new gaps.
|
||||
*
|
||||
* @return {number|null} one of:
|
||||
* - the remaining time in milliseconds in which gaps can be added to the
|
||||
* cache before a call to refreshGapCache() is required
|
||||
* - or 0 if there is no time left and a call to refreshGapCache() is required
|
||||
* to resume caching gaps
|
||||
* - or null if refreshing the cache is never needed (because the gap cache
|
||||
* is either not available or not used)
|
||||
*/
|
||||
getGapBuildingValidityPeriodMs(): number | null {
|
||||
let gapBuilding;
|
||||
switch (this._gapBuilding.state) {
|
||||
case GapBuildingState.Disabled:
|
||||
return null;
|
||||
case GapBuildingState.Expired:
|
||||
return 0;
|
||||
case GapBuildingState.NotBuilding:
|
||||
gapBuilding = <GapBuildingInfo_NotBuilding> this._gapBuilding;
|
||||
break;
|
||||
case GapBuildingState.Building:
|
||||
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
break;
|
||||
}
|
||||
const { gapCache, params } = gapBuilding;
|
||||
const elapsedTime = Date.now() - params.initTimestamp;
|
||||
return Math.max(gapCache.exposureDelayMs - elapsedTime, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the gaps caching logic (gaps are series of current delete markers
|
||||
* in V0 bucket metadata format). It has two effects:
|
||||
*
|
||||
* - starts exposing existing and future gaps from the cache to efficiently
|
||||
* skip over series of current delete markers that have been seen and cached
|
||||
* earlier
|
||||
*
|
||||
* - enables building and caching new gaps (or extend existing ones), for a
|
||||
* limited time period defined by the `gapCacheProxy.exposureDelayMs` value
|
||||
* in milliseconds. To refresh the validity period and resume building and
|
||||
* caching new gaps, one must restart a new listing from the database (starting
|
||||
* at the current listing key, included), then call refreshGapCache() again.
|
||||
*
|
||||
* @param {GapCacheInterface} gapCacheProxy - API proxy to the gaps cache
|
||||
* (the proxy should handle prefixing object keys with the bucket name)
|
||||
* @param {number} [minGapWeight=100] - minimum weight of a gap for it to be
|
||||
* added in the cache
|
||||
* @param {number} [triggerSaveGapWeight] - cumulative weight to wait for
|
||||
* before saving the current building gap. Cannot be greater than
|
||||
* `gapCacheProxy.maxGapWeight` (the value is thresholded to `maxGapWeight`
|
||||
* otherwise). Defaults to `gapCacheProxy.maxGapWeight / 2`.
|
||||
* @return {undefined}
|
||||
*/
|
||||
refreshGapCache(
|
||||
gapCacheProxy: GapCacheInterface,
|
||||
minGapWeight?: number,
|
||||
triggerSaveGapWeight?: number
|
||||
): void {
|
||||
if (this.vFormat !== BucketVersioningKeyFormat.v0) {
|
||||
return;
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.NoGapCache) {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache: gapCacheProxy,
|
||||
};
|
||||
}
|
||||
const refreshedBuildingParams: GapBuildingParams = {
|
||||
minGapWeight: minGapWeight || 100,
|
||||
triggerSaveGapWeight: triggerSaveGapWeight
|
||||
|| Math.trunc(gapCacheProxy.maxGapWeight / 2),
|
||||
initTimestamp: Date.now(),
|
||||
};
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
// refreshed params will be applied as soon as the current building gap is saved
|
||||
this._refreshedBuildingParams = refreshedBuildingParams;
|
||||
} else {
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.NotBuilding,
|
||||
gapCache: gapCacheProxy,
|
||||
params: refreshedBuildingParams,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a lookup of the closest upcoming or already skippable gap.
|
||||
*
|
||||
* @param {string} fromKey - lookup a gap not before 'fromKey'
|
||||
* @return {undefined} - the lookup is asynchronous and its
|
||||
* response is handled inside this function
|
||||
*/
|
||||
_triggerGapLookup(gapCaching: GapCachingInfo_NoCachedGap, fromKey: string): void {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.GapLookupInProgress,
|
||||
gapCache: gapCaching.gapCache,
|
||||
};
|
||||
const maxKey = this.prefix ? inc(this.prefix) : undefined;
|
||||
gapCaching.gapCache.lookupGap(fromKey, maxKey).then(_gap => {
|
||||
const gap = <GapSetEntry | null> _gap;
|
||||
if (gap) {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.GapCached,
|
||||
gapCache: gapCaching.gapCache,
|
||||
gapCached: gap,
|
||||
};
|
||||
} else {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.NoMoreGap,
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_checkGapOnMasterDeleteMarker(key: string): FilterReturnValue {
|
||||
switch (this._gapBuilding.state) {
|
||||
case GapBuildingState.Disabled:
|
||||
case GapBuildingState.Expired:
|
||||
break;
|
||||
case GapBuildingState.NotBuilding:
|
||||
this._createBuildingGap(key, 1);
|
||||
break;
|
||||
case GapBuildingState.Building:
|
||||
this._updateBuildingGap(key);
|
||||
break;
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.GapCached) {
|
||||
const { gapCached } = this._gapCaching;
|
||||
if (key >= gapCached.firstKey) {
|
||||
if (key <= gapCached.lastKey) {
|
||||
// we are inside the last looked up cached gap: transition to
|
||||
// 'SkippingGapV0' state
|
||||
this.setState(<DelimiterMasterFilterState_SkippingGapV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
});
|
||||
// cut the current gap before skipping, it will be merged or
|
||||
// chained with the existing one (depending on its weight)
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
// substract 1 from the weight because we are going to chain this gap,
|
||||
// which has an overlap of one key.
|
||||
this._gapBuilding.gap.weight -= 1;
|
||||
this._cutBuildingGap();
|
||||
}
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
// as we are past the cached gap, we will need another lookup
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache: this._gapCaching.gapCache,
|
||||
};
|
||||
}
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.UnknownGap) {
|
||||
this._triggerGapLookup(this._gapCaching, key);
|
||||
}
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
filter_onNewMasterKeyV0(key: string, value: string): FilterReturnValue {
|
||||
// if this master key is a delete marker, accept it without
|
||||
// adding the version to the contents
|
||||
if (Version.isDeleteMarker(value)) {
|
||||
// update the state to start skipping versions of the new master key
|
||||
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
masterKey: key,
|
||||
});
|
||||
return this._checkGapOnMasterDeleteMarker(key);
|
||||
}
|
||||
if (Version.isPHD(value)) {
|
||||
// master version is a PHD version: wait for the first
|
||||
// following version that will be considered as the actual
|
||||
// master key
|
||||
this.setState(<DelimiterMasterFilterState_WaitVersionAfterPHDV0> {
|
||||
id: DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
|
||||
masterKey: key,
|
||||
});
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// cut the current gap as soon as a non-deleted entry is seen
|
||||
this._cutBuildingGap();
|
||||
|
||||
if (key.startsWith(DbPrefixes.Replay)) {
|
||||
// skip internal replay prefix entirely
|
||||
this.setState(<DelimiterFilterState_SkippingPrefix> {
|
||||
id: DelimiterFilterStateId.SkippingPrefix,
|
||||
prefix: DbPrefixes.Replay,
|
||||
});
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
||||
const commonPrefix = this.addCommonPrefixOrContents(key, value);
|
||||
if (commonPrefix) {
|
||||
// transition into SkippingPrefix state to skip all following keys
|
||||
// while they start with the same prefix
|
||||
this.setState(<DelimiterFilterState_SkippingPrefix> {
|
||||
id: DelimiterFilterStateId.SkippingPrefix,
|
||||
prefix: commonPrefix,
|
||||
});
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// update the state to start skipping versions of the new master key
|
||||
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
masterKey: key,
|
||||
});
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingPrefixNorVersionsV0(key: string, value: string): FilterReturnValue {
|
||||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
}
|
||||
|
||||
filter_onNewMasterKeyV1(key: string, value: string): FilterReturnValue {
|
||||
// if this master key is a delete marker, accept it without
|
||||
// adding the version to the contents
|
||||
if (Version.isDeleteMarker(value)) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// use base Delimiter's implementation
|
||||
return this.keyHandler_NotSkipping_Delimiter(key, value);
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingPrefixNorVersionsV1(key: string, value: string): FilterReturnValue {
|
||||
return this.filter_onNewMasterKeyV1(key, value);
|
||||
}
|
||||
|
||||
keyHandler_SkippingVersionsV0(key: string, value: string): FilterReturnValue {
|
||||
/* In the SkippingVersionsV0 state, skip all version keys
|
||||
* (<key><versionIdSeparator><version>) */
|
||||
const versionIdIndex = key.indexOf(VID_SEP);
|
||||
if (versionIdIndex !== -1) {
|
||||
// version keys count in the building gap weight because they must
|
||||
// also be listed until skipped
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
this._updateBuildingGap(key);
|
||||
}
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
}
|
||||
|
||||
keyHandler_WaitVersionAfterPHDV0(key: string, value: string): FilterReturnValue {
|
||||
// After a PHD key is encountered, the next version key of the
|
||||
// same object if it exists is the new master key, hence
|
||||
// consider it as such and call 'onNewMasterKeyV0' (the test
|
||||
// 'masterKey == phdKey' is probably redundant when we already
|
||||
// know we have a versioned key, since all objects in v0 have
|
||||
// a master key, but keeping it in doubt)
|
||||
const { masterKey: phdKey } = <DelimiterMasterFilterState_WaitVersionAfterPHDV0> this.state;
|
||||
const versionIdIndex = key.indexOf(VID_SEP);
|
||||
if (versionIdIndex !== -1) {
|
||||
const masterKey = key.slice(0, versionIdIndex);
|
||||
if (masterKey === phdKey) {
|
||||
return this.filter_onNewMasterKeyV0(masterKey, value);
|
||||
}
|
||||
}
|
||||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
}
|
||||
|
||||
keyHandler_SkippingGapV0(key: string, value: string): FilterReturnValue {
|
||||
const { gapCache, gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
|
||||
if (key <= gapCached.lastKey) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache,
|
||||
};
|
||||
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
});
|
||||
// Start a gap with weight=0 from the latest skippable key. This will
|
||||
// allow to extend the gap just skipped with a chained gap in case
|
||||
// other delete markers are seen after the existing gap is skipped.
|
||||
this._createBuildingGap(gapCached.lastKey, 0, gapCached.weight);
|
||||
|
||||
return this.handleKey(key, value);
|
||||
}
|
||||
|
||||
skippingBase(): string | undefined {
|
||||
switch (this.state.id) {
|
||||
case DelimiterMasterFilterStateId.SkippingVersionsV0:
|
||||
const { masterKey } = <DelimiterMasterFilterState_SkippingVersionsV0> this.state;
|
||||
return masterKey + inc(VID_SEP);
|
||||
|
||||
case DelimiterMasterFilterStateId.SkippingGapV0:
|
||||
const { gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
|
||||
return gapCached.lastKey;
|
||||
|
||||
default:
|
||||
return super.skippingBase();
|
||||
}
|
||||
}
|
||||
|
||||
result(): ResultObject {
|
||||
this._cutBuildingGap();
|
||||
return super.result();
|
||||
}
|
||||
|
||||
_checkRefreshedBuildingParams(params: GapBuildingParams): GapBuildingParams {
|
||||
if (this._refreshedBuildingParams) {
|
||||
const newParams = this._refreshedBuildingParams;
|
||||
this._refreshedBuildingParams = null;
|
||||
return newParams;
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the gap being built if allowed (i.e. still within the
|
||||
* allocated exposure time window).
|
||||
*
|
||||
* @return {boolean} - true if the gap was saved, false if we are
|
||||
* outside the allocated exposure time window.
|
||||
*/
|
||||
_saveBuildingGap(): boolean {
|
||||
const { gapCache, params, gap, gapWeight } =
|
||||
<GapBuildingInfo_Building> this._gapBuilding;
|
||||
const totalElapsed = Date.now() - params.initTimestamp;
|
||||
if (totalElapsed >= gapCache.exposureDelayMs) {
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Expired,
|
||||
};
|
||||
this._refreshedBuildingParams = null;
|
||||
return false;
|
||||
}
|
||||
const { firstKey, lastKey, weight } = gap;
|
||||
gapCache.setGap(firstKey, lastKey, weight);
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Building,
|
||||
gapCache,
|
||||
params: this._checkRefreshedBuildingParams(params),
|
||||
gap: {
|
||||
firstKey: gap.lastKey,
|
||||
lastKey: gap.lastKey,
|
||||
weight: 0,
|
||||
},
|
||||
gapWeight,
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new gap to be extended afterwards
|
||||
*
|
||||
* @param {string} newKey - gap's first key
|
||||
* @param {number} startWeight - initial weight of the building gap (usually 0 or 1)
|
||||
* @param {number} [cachedWeight] - if continuing a cached gap, weight of the existing
|
||||
* cached portion
|
||||
* @return {undefined}
|
||||
*/
|
||||
_createBuildingGap(newKey: string, startWeight: number, cachedWeight?: number): void {
|
||||
if (this._gapBuilding.state === GapBuildingState.NotBuilding) {
|
||||
const { gapCache, params } = <GapBuildingInfo_NotBuilding> this._gapBuilding;
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Building,
|
||||
gapCache,
|
||||
params: this._checkRefreshedBuildingParams(params),
|
||||
gap: {
|
||||
firstKey: newKey,
|
||||
lastKey: newKey,
|
||||
weight: startWeight,
|
||||
},
|
||||
gapWeight: (cachedWeight || 0) + startWeight,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
_updateBuildingGap(newKey: string): void {
|
||||
const gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
const { params, gap } = gapBuilding;
|
||||
gap.lastKey = newKey;
|
||||
gap.weight += 1;
|
||||
gapBuilding.gapWeight += 1;
|
||||
// the GapCache API requires updating a gap regularly because it can only split
|
||||
// it once per update, by the known last key. In practice the default behavior
|
||||
// is to trigger an update after a number of keys that is half the maximum weight.
|
||||
// It is also useful for other listings to benefit from the cache sooner.
|
||||
if (gapBuilding.gapWeight >= params.minGapWeight &&
|
||||
gap.weight >= params.triggerSaveGapWeight) {
|
||||
this._saveBuildingGap();
|
||||
}
|
||||
}
|
||||
|
||||
_cutBuildingGap(): void {
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
let gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
let { gapCache, params, gap, gapWeight } = gapBuilding;
|
||||
// only set gaps that are significant enough in weight and
|
||||
// with a non-empty extension
|
||||
if (gapWeight >= params.minGapWeight && gap.weight > 0) {
|
||||
// we're done if we were not allowed to save the gap
|
||||
if (!this._saveBuildingGap()) {
|
||||
return;
|
||||
}
|
||||
// params may have been refreshed, reload them
|
||||
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
params = gapBuilding.params;
|
||||
}
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.NotBuilding,
|
||||
gapCache,
|
||||
params,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
const { DelimiterVersions } = require('./delimiterVersions');
|
||||
const { FILTER_END, FILTER_SKIP } = require('./tools');
|
||||
|
||||
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class DelimiterVersions
|
||||
* to return the raw non-current versions objects.
|
||||
*/
|
||||
class DelimiterNonCurrent extends DelimiterVersions {
|
||||
/**
|
||||
* Delimiter listing of non-current versions.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} parameters.keyMarker - key marker
|
||||
* @param {String} parameters.versionIdMarker - version id marker
|
||||
* @param {String} parameters.beforeDate - limit the response to keys with stale date older than beforeDate.
|
||||
* “stale date” is the date on when a version becomes non-current.
|
||||
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
|
||||
* @param {String} parameters.excludedDataStoreName - exclude dataStoreName matches from the versions
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger, vFormat);
|
||||
|
||||
this.beforeDate = parameters.beforeDate;
|
||||
this.excludedDataStoreName = parameters.excludedDataStoreName;
|
||||
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
|
||||
|
||||
// internal state
|
||||
this.prevKey = null;
|
||||
this.staleDate = null;
|
||||
|
||||
this.scannedKeys = 0;
|
||||
}
|
||||
|
||||
getLastModified(value) {
|
||||
let lastModified;
|
||||
try {
|
||||
const v = JSON.parse(value);
|
||||
lastModified = v['last-modified'];
|
||||
} catch (e) {
|
||||
this.logger.warn('could not parse Object Metadata while listing',
|
||||
{
|
||||
method: 'getLastModified',
|
||||
err: e.toString(),
|
||||
});
|
||||
}
|
||||
return lastModified;
|
||||
}
|
||||
|
||||
// Overwrite keyHandler_SkippingVersions to include the last version from the previous listing.
|
||||
// The creation (last-modified) date of this version will be the stale date for the following version.
|
||||
// eslint-disable-next-line camelcase
|
||||
keyHandler_SkippingVersions(key, versionId, value) {
|
||||
if (key === this.keyMarker) {
|
||||
// since the nonversioned key equals the marker, there is
|
||||
// necessarily a versionId in this key
|
||||
const _versionId = versionId;
|
||||
if (_versionId < this.versionIdMarker) {
|
||||
// skip all versions until marker
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
}
|
||||
this.setState({
|
||||
id: 1 /* NotSkipping */,
|
||||
});
|
||||
return this.handleKey(key, versionId, value);
|
||||
}
|
||||
|
||||
filter(obj) {
|
||||
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
|
||||
this.IsTruncated = true;
|
||||
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
|
||||
{
|
||||
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
|
||||
scannedKeys: this.scannedKeys,
|
||||
});
|
||||
return FILTER_END;
|
||||
}
|
||||
++this.scannedKeys;
|
||||
return super.filter(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* NOTE: Each version of a specific key is sorted from the latest to the oldest
|
||||
* thanks to the way version ids are generated.
|
||||
* DESCRIPTION: Skip the version if it represents the master key, but keep its last-modified date in memory,
|
||||
* which will be the stale date of the following version.
|
||||
* The following version is pushed only:
|
||||
* - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
|
||||
* - if "beforeDate" is not specified or if specified and the "stale date" is older.
|
||||
* - if "excludedDataStoreName" is not specified or if specified and the data store name is different
|
||||
* The in-memory "stale date" is then updated with the version's last-modified date to be used for
|
||||
* the following version.
|
||||
* The process stops and returns the available results if either:
|
||||
* - no more metadata key is left to be processed
|
||||
* - the listing reaches the maximum number of key to be returned
|
||||
* - the internal timeout is reached
|
||||
* @param {String} key - The key to add
|
||||
* @param {String} versionId - The version id
|
||||
* @param {String} value - The value of the key
|
||||
* @return {undefined}
|
||||
*/
|
||||
addVersion(key, versionId, value) {
|
||||
this.nextKeyMarker = key;
|
||||
this.nextVersionIdMarker = versionId;
|
||||
|
||||
// Skip the version if it represents the non-current version, but keep its last-modified date,
|
||||
// which will be the stale date of the following version.
|
||||
const isCurrentVersion = key !== this.prevKey;
|
||||
if (isCurrentVersion) {
|
||||
this.staleDate = this.getLastModified(value);
|
||||
this.prevKey = key;
|
||||
return;
|
||||
}
|
||||
|
||||
// The following version is pushed only:
|
||||
// - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
|
||||
// - if "beforeDate" is not specified or if specified and the "stale date" is older.
|
||||
// - if "excludedDataStoreName" is not specified or if specified and the data store name is different
|
||||
let lastModified;
|
||||
if (this.staleDate && (!this.beforeDate || this.staleDate < this.beforeDate)) {
|
||||
const parsedValue = this._parse(value);
|
||||
// if parsing fails, skip the key.
|
||||
if (parsedValue) {
|
||||
const dataStoreName = parsedValue.dataStoreName;
|
||||
lastModified = parsedValue['last-modified'];
|
||||
if (!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName) {
|
||||
const s = this._stringify(parsedValue, this.staleDate);
|
||||
// check that _stringify succeeds to only push objects with a defined staleDate.
|
||||
if (s) {
|
||||
this.Versions.push({ key, value: s });
|
||||
++this.keys;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The in-memory "stale date" is then updated with the version's last-modified date to be used for
|
||||
// the following version.
|
||||
this.staleDate = lastModified || this.getLastModified(value);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the stringified entry's value and remove the location property if too large.
|
||||
* @param {string} s - sringified value
|
||||
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
|
||||
*/
|
||||
_parse(s) {
|
||||
let p;
|
||||
try {
|
||||
p = JSON.parse(s);
|
||||
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
|
||||
delete p.location;
|
||||
}
|
||||
} catch (e) {
|
||||
this.logger.warn('Could not parse Object Metadata while listing', {
|
||||
method: 'DelimiterNonCurrent._parse',
|
||||
err: e.toString(),
|
||||
});
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
_stringify(parsedMD, staleDate) {
|
||||
const p = parsedMD;
|
||||
let s = undefined;
|
||||
p.staleDate = staleDate;
|
||||
try {
|
||||
s = JSON.stringify(p);
|
||||
} catch (e) {
|
||||
this.logger.warn('could not stringify Object Metadata while listing', {
|
||||
method: 'DelimiterNonCurrent._stringify',
|
||||
err: e.toString(),
|
||||
});
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
result() {
|
||||
const { Versions, IsTruncated, NextKeyMarker, NextVersionIdMarker } = super.result();
|
||||
|
||||
const result = {
|
||||
Contents: Versions,
|
||||
IsTruncated,
|
||||
};
|
||||
|
||||
if (NextKeyMarker) {
|
||||
result.NextKeyMarker = NextKeyMarker;
|
||||
}
|
||||
|
||||
if (NextVersionIdMarker) {
|
||||
result.NextVersionIdMarker = NextVersionIdMarker;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
module.exports = { DelimiterNonCurrent };
|
|
@ -0,0 +1,204 @@
|
|||
const DelimiterVersions = require('./delimiterVersions').DelimiterVersions;
|
||||
const { FILTER_END } = require('./tools');
|
||||
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class DelimiterVersions
|
||||
* to return the orphan delete markers. Orphan delete markers are also
|
||||
* refered as expired object delete marker.
|
||||
* They are delete marker with zero noncurrent versions.
|
||||
*/
|
||||
class DelimiterOrphanDeleteMarker extends DelimiterVersions {
|
||||
/**
|
||||
* Delimiter listing of orphan delete markers.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
|
||||
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
const {
|
||||
marker,
|
||||
maxKeys,
|
||||
prefix,
|
||||
beforeDate,
|
||||
maxScannedLifecycleListingEntries,
|
||||
} = parameters;
|
||||
|
||||
const versionParams = {
|
||||
// The orphan delete marker logic uses the term 'marker' instead of 'keyMarker',
|
||||
// as the latter could suggest the presence of a 'versionIdMarker'.
|
||||
keyMarker: marker,
|
||||
maxKeys,
|
||||
prefix,
|
||||
};
|
||||
super(versionParams, logger, vFormat);
|
||||
|
||||
this.maxScannedLifecycleListingEntries = maxScannedLifecycleListingEntries;
|
||||
this.beforeDate = beforeDate;
|
||||
// this.prevKeyName is used as a marker for the next listing when the current one reaches its entry limit.
|
||||
// We cannot rely on this.keyName, as it contains the name of the current key.
|
||||
// In the event of a listing interruption due to reaching the maximum scanned entries,
|
||||
// relying on this.keyName would cause the next listing to skip the current key because S3 starts
|
||||
// listing after the marker.
|
||||
this.prevKeyName = null;
|
||||
this.keyName = null;
|
||||
this.value = null;
|
||||
this.scannedKeys = 0;
|
||||
}
|
||||
|
||||
_reachedMaxKeys() {
|
||||
if (this.keys >= this.maxKeys) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_addOrphan() {
|
||||
const parsedValue = this._parse(this.value);
|
||||
// if parsing fails, skip the key.
|
||||
if (parsedValue) {
|
||||
const lastModified = parsedValue['last-modified'];
|
||||
const isDeleteMarker = parsedValue.isDeleteMarker;
|
||||
// We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
|
||||
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) && isDeleteMarker) {
|
||||
// Prefer returning an untrimmed data rather than stopping the service in case of parsing failure.
|
||||
const s = this._stringify(parsedValue) || this.value;
|
||||
this.Versions.push({ key: this.keyName, value: s });
|
||||
this.nextKeyMarker = this.keyName;
|
||||
++this.keys;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the stringified entry's value and remove the location property if too large.
|
||||
* @param {string} s - sringified value
|
||||
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
|
||||
*/
|
||||
_parse(s) {
|
||||
let p;
|
||||
try {
|
||||
p = JSON.parse(s);
|
||||
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
|
||||
delete p.location;
|
||||
}
|
||||
} catch (e) {
|
||||
this.logger.warn('Could not parse Object Metadata while listing', {
|
||||
method: 'DelimiterOrphanDeleteMarker._parse',
|
||||
err: e.toString(),
|
||||
});
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
_stringify(value) {
|
||||
const p = value;
|
||||
let s = undefined;
|
||||
try {
|
||||
s = JSON.stringify(p);
|
||||
} catch (e) {
|
||||
this.logger.warn('could not stringify Object Metadata while listing',
|
||||
{
|
||||
method: 'DelimiterOrphanDeleteMarker._stringify',
|
||||
err: e.toString(),
|
||||
});
|
||||
}
|
||||
return s;
|
||||
}
|
||||
/**
|
||||
* The purpose of _isMaxScannedEntriesReached is to restrict the number of scanned entries,
|
||||
* thus controlling resource overhead (CPU...).
|
||||
* @return {boolean} isMaxScannedEntriesReached - true if the maximum limit on the number
|
||||
* of entries scanned has been reached, false otherwise.
|
||||
*/
|
||||
_isMaxScannedEntriesReached() {
|
||||
return this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries;
|
||||
}
|
||||
|
||||
filter(obj) {
|
||||
if (this._isMaxScannedEntriesReached()) {
|
||||
this.nextKeyMarker = this.prevKeyName;
|
||||
this.IsTruncated = true;
|
||||
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
|
||||
{
|
||||
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
|
||||
scannedKeys: this.scannedKeys,
|
||||
});
|
||||
return FILTER_END;
|
||||
}
|
||||
++this.scannedKeys;
|
||||
return super.filter(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* NOTE: Each version of a specific key is sorted from the latest to the oldest
|
||||
* thanks to the way version ids are generated.
|
||||
* DESCRIPTION: For a given key, the latest version is kept in memory since it is the current version.
|
||||
* If the following version reference a new key, it means that the previous one was an orphan version.
|
||||
* We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
|
||||
* The process stops and returns the available results if either:
|
||||
* - no more metadata key is left to be processed
|
||||
* - the listing reaches the maximum number of key to be returned
|
||||
* - the internal timeout is reached
|
||||
* NOTE: we cannot leverage MongoDB to list keys older than "beforeDate"
|
||||
* because then we will not be able to assess its orphanage.
|
||||
* @param {String} key - The object key.
|
||||
* @param {String} versionId - The object version id.
|
||||
* @param {String} value - The value of the key
|
||||
* @return {undefined}
|
||||
*/
|
||||
addVersion(key, versionId, value) {
|
||||
// For a given key, the youngest version is kept in memory since it represents the current version.
|
||||
if (key !== this.keyName) {
|
||||
// If this.value is defined, it means that <this.keyName, this.value> pair is "allowed" to be an orphan.
|
||||
if (this.value) {
|
||||
this._addOrphan();
|
||||
}
|
||||
this.prevKeyName = this.keyName;
|
||||
this.keyName = key;
|
||||
this.value = value;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// If the key is not the current version, we can skip it in the next listing
|
||||
// in the case where the current listing is interrupted due to reaching the maximum scanned entries.
|
||||
this.prevKeyName = key;
|
||||
this.keyName = key;
|
||||
this.value = null;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
result() {
|
||||
// Only check for remaining last orphan delete marker if the listing is not interrupted.
|
||||
// This will help avoid false positives.
|
||||
if (!this._isMaxScannedEntriesReached()) {
|
||||
// The following check makes sure the last orphan delete marker is not forgotten.
|
||||
if (this.keys < this.maxKeys) {
|
||||
if (this.value) {
|
||||
this._addOrphan();
|
||||
}
|
||||
// The following make sure that if makeKeys is reached, isTruncated is set to true.
|
||||
// We moved the "isTruncated" from _reachedMaxKeys to make sure we take into account the last entity
|
||||
// if listing is truncated right before the last entity and the last entity is a orphan delete marker.
|
||||
} else {
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
}
|
||||
}
|
||||
|
||||
const result = {
|
||||
Contents: this.Versions,
|
||||
IsTruncated: this.IsTruncated,
|
||||
};
|
||||
|
||||
if (this.IsTruncated) {
|
||||
result.NextMarker = this.nextKeyMarker;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterOrphanDeleteMarker };
|
|
@ -1,283 +0,0 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Delimiter = require('./delimiter').Delimiter;
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
|
||||
require('./tools');
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
|
||||
* @prop {String[]} Contents - 'files' to list
|
||||
* @prop {Boolean} IsTruncated - truncated listing flag
|
||||
* @prop {String|undefined} NextMarker - marker per amazon format
|
||||
* @prop {Number} keys - count of listed keys
|
||||
* @prop {String|undefined} delimiter - separator per amazon format
|
||||
* @prop {String|undefined} prefix - prefix per amazon format
|
||||
* @prop {Number} maxKeys - number of keys to list
|
||||
*/
|
||||
class DelimiterVersions extends Delimiter {
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger, vFormat);
|
||||
// specific to version listing
|
||||
this.keyMarker = parameters.keyMarker;
|
||||
this.versionIdMarker = parameters.versionIdMarker;
|
||||
// internal state
|
||||
this.masterKey = undefined;
|
||||
this.masterVersionId = undefined;
|
||||
// listing results
|
||||
this.NextMarker = parameters.keyMarker;
|
||||
this.NextVersionIdMarker = undefined;
|
||||
this.inReplayPrefix = false;
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
genMDParams: this.genMDParamsV0,
|
||||
filter: this.filterV0,
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
genMDParams: this.genMDParamsV1,
|
||||
filter: this.filterV1,
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params = {};
|
||||
if (this.parameters.prefix) {
|
||||
params.gte = this.parameters.prefix;
|
||||
params.lt = inc(this.parameters.prefix);
|
||||
}
|
||||
if (this.parameters.keyMarker) {
|
||||
if (params.gte && params.gte > this.parameters.keyMarker) {
|
||||
return params;
|
||||
}
|
||||
delete params.gte;
|
||||
if (this.parameters.versionIdMarker) {
|
||||
// versionIdMarker should always come with keyMarker
|
||||
// but may not be the other way around
|
||||
params.gt = this.parameters.keyMarker
|
||||
+ VID_SEP
|
||||
+ this.parameters.versionIdMarker;
|
||||
} else {
|
||||
params.gt = inc(this.parameters.keyMarker + VID_SEP);
|
||||
}
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
// return an array of two listing params sets to ask for
|
||||
// synchronized listing of M and V ranges
|
||||
const params = [{}, {}];
|
||||
if (this.parameters.prefix) {
|
||||
params[0].gte = DbPrefixes.Master + this.parameters.prefix;
|
||||
params[0].lt = DbPrefixes.Master + inc(this.parameters.prefix);
|
||||
params[1].gte = DbPrefixes.Version + this.parameters.prefix;
|
||||
params[1].lt = DbPrefixes.Version + inc(this.parameters.prefix);
|
||||
} else {
|
||||
params[0].gte = DbPrefixes.Master;
|
||||
params[0].lt = inc(DbPrefixes.Master); // stop after the last master key
|
||||
params[1].gte = DbPrefixes.Version;
|
||||
params[1].lt = inc(DbPrefixes.Version); // stop after the last version key
|
||||
}
|
||||
if (this.parameters.keyMarker) {
|
||||
if (params[1].gte <= DbPrefixes.Version + this.parameters.keyMarker) {
|
||||
delete params[0].gte;
|
||||
delete params[1].gte;
|
||||
params[0].gt = DbPrefixes.Master + inc(this.parameters.keyMarker + VID_SEP);
|
||||
if (this.parameters.versionIdMarker) {
|
||||
// versionIdMarker should always come with keyMarker
|
||||
// but may not be the other way around
|
||||
params[1].gt = DbPrefixes.Version
|
||||
+ this.parameters.keyMarker
|
||||
+ VID_SEP
|
||||
+ this.parameters.versionIdMarker;
|
||||
} else {
|
||||
params[1].gt = DbPrefixes.Version
|
||||
+ inc(this.parameters.keyMarker + VID_SEP);
|
||||
}
|
||||
}
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to synchronize listing of M and V prefixes by object key
|
||||
*
|
||||
* @param {object} masterObj object listed from first range
|
||||
* returned by genMDParamsV1() (the master keys range)
|
||||
* @param {object} versionObj object listed from second range
|
||||
* returned by genMDParamsV1() (the version keys range)
|
||||
* @return {number} comparison result:
|
||||
* * -1 if master key < version key
|
||||
* * 1 if master key > version key
|
||||
*/
|
||||
compareObjects(masterObj, versionObj) {
|
||||
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
|
||||
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
|
||||
return masterKey < versionKey ? -1 : 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a (key, versionId, value) tuple to the listing.
|
||||
* Set the NextMarker to the current key
|
||||
* Increment the keys counter
|
||||
* @param {object} obj - the entry to add to the listing result
|
||||
* @param {String} obj.key - The key to add
|
||||
* @param {String} obj.versionId - versionId
|
||||
* @param {String} obj.value - The value of the key
|
||||
* @return {Boolean} - indicates if iteration should continue
|
||||
*/
|
||||
addContents(obj) {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
this.Contents.push({
|
||||
key: obj.key,
|
||||
value: this.trimMetadata(obj.value),
|
||||
versionId: obj.versionId,
|
||||
});
|
||||
this.NextMarker = obj.key;
|
||||
this.NextVersionIdMarker = obj.versionId;
|
||||
++this.keys;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration if bucket is in v0
|
||||
* versioning key format, based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV0(obj) {
|
||||
if (obj.key.startsWith(DbPrefixes.Replay)) {
|
||||
this.inReplayPrefix = true;
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this.inReplayPrefix = false;
|
||||
|
||||
if (Version.isPHD(obj.value)) {
|
||||
// return accept to avoid skipping the next values in range
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return this.filterCommon(obj.key, obj.value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration if bucket is in v1
|
||||
* versioning key format, based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV1(obj) {
|
||||
if (Version.isPHD(obj.value)) {
|
||||
// return accept to avoid skipping the next values in range
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// this function receives both M and V keys, but their prefix
|
||||
// length is the same so we can remove their prefix without
|
||||
// looking at the type of key
|
||||
return this.filterCommon(obj.key.slice(DbPrefixes.Master.length),
|
||||
obj.value);
|
||||
}
|
||||
|
||||
filterCommon(key, value) {
|
||||
if (this.prefix && !key.startsWith(this.prefix)) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
let nonversionedKey;
|
||||
let versionId = undefined;
|
||||
const versionIdIndex = key.indexOf(VID_SEP);
|
||||
if (versionIdIndex < 0) {
|
||||
nonversionedKey = key;
|
||||
this.masterKey = key;
|
||||
this.masterVersionId =
|
||||
Version.from(value).getVersionId() || 'null';
|
||||
versionId = this.masterVersionId;
|
||||
} else {
|
||||
nonversionedKey = key.slice(0, versionIdIndex);
|
||||
versionId = key.slice(versionIdIndex + 1);
|
||||
// skip a version key if it is the master version
|
||||
if (this.masterKey === nonversionedKey && this.masterVersionId === versionId) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this.masterKey = undefined;
|
||||
this.masterVersionId = undefined;
|
||||
}
|
||||
if (this.delimiter) {
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = nonversionedKey.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex >= 0) {
|
||||
return this.addCommonPrefix(nonversionedKey, delimiterIndex);
|
||||
}
|
||||
}
|
||||
return this.addContents({ key: nonversionedKey, value, versionId });
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
if (this.inReplayPrefix) {
|
||||
return DbPrefixes.Replay;
|
||||
}
|
||||
if (this.NextMarker) {
|
||||
const index = this.NextMarker.lastIndexOf(this.delimiter);
|
||||
if (index === this.NextMarker.length - 1) {
|
||||
return this.NextMarker;
|
||||
}
|
||||
}
|
||||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
const skipV0 = this.skippingV0();
|
||||
if (skipV0 === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
}
|
||||
// skip to the same object key in both M and V range listings
|
||||
return [DbPrefixes.Master + skipV0,
|
||||
DbPrefixes.Version + skipV0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object containing all mandatory fields to use once the
|
||||
* iteration is done, doesn't show a NextMarker field if the output
|
||||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result() {
|
||||
/* NextMarker is only provided when delimiter is used.
|
||||
* specified in v1 listing documentation
|
||||
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
*/
|
||||
return {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Versions: this.Contents,
|
||||
IsTruncated: this.IsTruncated,
|
||||
NextKeyMarker: this.IsTruncated ? this.NextMarker : undefined,
|
||||
NextVersionIdMarker: this.IsTruncated ?
|
||||
this.NextVersionIdMarker : undefined,
|
||||
Delimiter: this.delimiter,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterVersions };
|
|
@ -0,0 +1,535 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Extension = require('./Extension').default;
|
||||
|
||||
import {
|
||||
FilterState,
|
||||
FilterReturnValue,
|
||||
} from './delimiter';
|
||||
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
|
||||
require('./tools');
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
export const enum DelimiterVersionsFilterStateId {
|
||||
NotSkipping = 1,
|
||||
SkippingPrefix = 2,
|
||||
SkippingVersions = 3,
|
||||
};
|
||||
|
||||
export interface DelimiterVersionsFilterState_NotSkipping extends FilterState {
|
||||
id: DelimiterVersionsFilterStateId.NotSkipping,
|
||||
};
|
||||
|
||||
export interface DelimiterVersionsFilterState_SkippingPrefix extends FilterState {
|
||||
id: DelimiterVersionsFilterStateId.SkippingPrefix,
|
||||
prefix: string;
|
||||
};
|
||||
|
||||
export interface DelimiterVersionsFilterState_SkippingVersions extends FilterState {
|
||||
id: DelimiterVersionsFilterStateId.SkippingVersions,
|
||||
gt: string;
|
||||
};
|
||||
|
||||
type KeyHandler = (key: string, versionId: string | undefined, value: string) => FilterReturnValue;
|
||||
|
||||
type ResultObject = {
|
||||
CommonPrefixes: string[],
|
||||
Versions: {
|
||||
key: string;
|
||||
value: string;
|
||||
versionId: string;
|
||||
}[];
|
||||
IsTruncated: boolean;
|
||||
Delimiter ?: string;
|
||||
NextKeyMarker ?: string;
|
||||
NextVersionIdMarker ?: string;
|
||||
};
|
||||
|
||||
type GenMDParamsItem = {
|
||||
gt ?: string,
|
||||
gte ?: string,
|
||||
lt ?: string,
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
|
||||
* @prop {String[]} Contents - 'files' to list
|
||||
* @prop {Boolean} IsTruncated - truncated listing flag
|
||||
* @prop {String|undefined} NextMarker - marker per amazon format
|
||||
* @prop {Number} keys - count of listed keys
|
||||
* @prop {String|undefined} delimiter - separator per amazon format
|
||||
* @prop {String|undefined} prefix - prefix per amazon format
|
||||
* @prop {Number} maxKeys - number of keys to list
|
||||
*/
|
||||
export class DelimiterVersions extends Extension {
|
||||
|
||||
state: FilterState;
|
||||
keyHandlers: { [id: number]: KeyHandler };
|
||||
|
||||
constructor(parameters, logger, vFormat) {
|
||||
super(parameters, logger);
|
||||
// original listing parameters
|
||||
this.delimiter = parameters.delimiter;
|
||||
this.prefix = parameters.prefix;
|
||||
this.maxKeys = parameters.maxKeys || 1000;
|
||||
// specific to version listing
|
||||
this.keyMarker = parameters.keyMarker;
|
||||
this.versionIdMarker = parameters.versionIdMarker;
|
||||
// internal state
|
||||
this.masterKey = undefined;
|
||||
this.masterVersionId = undefined;
|
||||
this.nullKey = null;
|
||||
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
|
||||
// listing results
|
||||
this.CommonPrefixes = [];
|
||||
this.Versions = [];
|
||||
this.IsTruncated = false;
|
||||
this.nextKeyMarker = parameters.keyMarker;
|
||||
this.nextVersionIdMarker = undefined;
|
||||
|
||||
this.keyHandlers = {};
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
genMDParams: this.genMDParamsV0,
|
||||
getObjectKey: this.getObjectKeyV0,
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
genMDParams: this.genMDParamsV1,
|
||||
getObjectKey: this.getObjectKeyV1,
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
|
||||
if (this.vFormat === BucketVersioningKeyFormat.v0) {
|
||||
this.setKeyHandler(
|
||||
DelimiterVersionsFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingV0.bind(this));
|
||||
} else {
|
||||
this.setKeyHandler(
|
||||
DelimiterVersionsFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingV1.bind(this));
|
||||
}
|
||||
this.setKeyHandler(
|
||||
DelimiterVersionsFilterStateId.SkippingPrefix,
|
||||
this.keyHandler_SkippingPrefix.bind(this));
|
||||
|
||||
this.setKeyHandler(
|
||||
DelimiterVersionsFilterStateId.SkippingVersions,
|
||||
this.keyHandler_SkippingVersions.bind(this));
|
||||
|
||||
if (this.versionIdMarker) {
|
||||
this.state = <DelimiterVersionsFilterState_SkippingVersions> {
|
||||
id: DelimiterVersionsFilterStateId.SkippingVersions,
|
||||
gt: `${this.keyMarker}${VID_SEP}${this.versionIdMarker}`,
|
||||
};
|
||||
} else {
|
||||
this.state = <DelimiterVersionsFilterState_NotSkipping> {
|
||||
id: DelimiterVersionsFilterStateId.NotSkipping,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params: GenMDParamsItem = {};
|
||||
if (this.prefix) {
|
||||
params.gte = this.prefix;
|
||||
params.lt = inc(this.prefix);
|
||||
}
|
||||
if (this.keyMarker && this.delimiter) {
|
||||
const commonPrefix = this.getCommonPrefix(this.keyMarker);
|
||||
if (commonPrefix) {
|
||||
const afterPrefix = inc(commonPrefix);
|
||||
if (!params.gte || afterPrefix > params.gte) {
|
||||
params.gte = afterPrefix;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.keyMarker && (!params.gte || this.keyMarker >= params.gte)) {
|
||||
delete params.gte;
|
||||
if (this.versionIdMarker) {
|
||||
// start from the beginning of versions so we can
|
||||
// check if there's a null key and fetch it
|
||||
// (afterwards, we can skip the rest of versions until
|
||||
// we reach versionIdMarker)
|
||||
params.gte = `${this.keyMarker}${VID_SEP}`;
|
||||
} else {
|
||||
params.gt = `${this.keyMarker}${inc(VID_SEP)}`;
|
||||
}
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
// return an array of two listing params sets to ask for
|
||||
// synchronized listing of M and V ranges
|
||||
const v0Params: GenMDParamsItem = this.genMDParamsV0();
|
||||
const mParams: GenMDParamsItem = {};
|
||||
const vParams: GenMDParamsItem = {};
|
||||
if (v0Params.gt) {
|
||||
mParams.gt = `${DbPrefixes.Master}${v0Params.gt}`;
|
||||
vParams.gt = `${DbPrefixes.Version}${v0Params.gt}`;
|
||||
} else if (v0Params.gte) {
|
||||
mParams.gte = `${DbPrefixes.Master}${v0Params.gte}`;
|
||||
vParams.gte = `${DbPrefixes.Version}${v0Params.gte}`;
|
||||
} else {
|
||||
mParams.gte = DbPrefixes.Master;
|
||||
vParams.gte = DbPrefixes.Version;
|
||||
}
|
||||
if (v0Params.lt) {
|
||||
mParams.lt = `${DbPrefixes.Master}${v0Params.lt}`;
|
||||
vParams.lt = `${DbPrefixes.Version}${v0Params.lt}`;
|
||||
} else {
|
||||
mParams.lt = inc(DbPrefixes.Master);
|
||||
vParams.lt = inc(DbPrefixes.Version);
|
||||
}
|
||||
return [mParams, vParams];
|
||||
}
|
||||
|
||||
/**
|
||||
* check if the max keys count has been reached and set the
|
||||
* final state of the result if it is the case
|
||||
* @return {Boolean} - indicates if the iteration has to stop
|
||||
*/
|
||||
_reachedMaxKeys(): boolean {
|
||||
if (this.keys >= this.maxKeys) {
|
||||
// In cases of maxKeys <= 0 -> IsTruncated = false
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to synchronize listing of M and V prefixes by object key
|
||||
*
|
||||
* @param {object} masterObj object listed from first range
|
||||
* returned by genMDParamsV1() (the master keys range)
|
||||
* @param {object} versionObj object listed from second range
|
||||
* returned by genMDParamsV1() (the version keys range)
|
||||
* @return {number} comparison result:
|
||||
* * -1 if master key < version key
|
||||
* * 1 if master key > version key
|
||||
*/
|
||||
compareObjects(masterObj, versionObj) {
|
||||
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
|
||||
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
|
||||
return masterKey < versionKey ? -1 : 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a listing key into its nonversioned key and version ID components
|
||||
*
|
||||
* @param {string} key - full listing key
|
||||
* @return {object} obj
|
||||
* @return {string} obj.key - nonversioned part of key
|
||||
* @return {string} [obj.versionId] - version ID in the key
|
||||
*/
|
||||
parseKey(fullKey: string): { key: string, versionId ?: string } {
|
||||
const versionIdIndex = fullKey.indexOf(VID_SEP);
|
||||
if (versionIdIndex === -1) {
|
||||
return { key: fullKey };
|
||||
}
|
||||
const nonversionedKey: string = fullKey.slice(0, versionIdIndex);
|
||||
let versionId: string = fullKey.slice(versionIdIndex + 1);
|
||||
return { key: nonversionedKey, versionId };
|
||||
}
|
||||
|
||||
/**
|
||||
* Include a key in the listing output, in the Versions or CommonPrefix result
|
||||
*
|
||||
* @param {string} key - key (without version ID)
|
||||
* @param {string} versionId - version ID
|
||||
* @param {string} value - metadata value
|
||||
* @return {undefined}
|
||||
*/
|
||||
addKey(key: string, versionId: string, value: string) {
|
||||
// add the subprefix to the common prefixes if the key has the delimiter
|
||||
const commonPrefix = this.getCommonPrefix(key);
|
||||
if (commonPrefix) {
|
||||
this.addCommonPrefix(commonPrefix);
|
||||
// transition into SkippingPrefix state to skip all following keys
|
||||
// while they start with the same prefix
|
||||
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
|
||||
id: DelimiterVersionsFilterStateId.SkippingPrefix,
|
||||
prefix: commonPrefix,
|
||||
});
|
||||
} else {
|
||||
this.addVersion(key, versionId, value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a (key, versionId, value) tuple to the listing.
|
||||
* Set the NextMarker to the current key
|
||||
* Increment the keys counter
|
||||
* @param {String} key - The key to add
|
||||
* @param {String} versionId - versionId
|
||||
* @param {String} value - The value of the key
|
||||
* @return {undefined}
|
||||
*/
|
||||
addVersion(key: string, versionId: string, value: string) {
|
||||
this.Versions.push({
|
||||
key,
|
||||
versionId,
|
||||
value: this.trimMetadata(value),
|
||||
});
|
||||
this.nextKeyMarker = key;
|
||||
this.nextVersionIdMarker = versionId;
|
||||
++this.keys;
|
||||
}
|
||||
|
||||
getCommonPrefix(key: string): string | undefined {
|
||||
if (!this.delimiter) {
|
||||
return undefined;
|
||||
}
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex === -1) {
|
||||
return undefined;
|
||||
}
|
||||
return key.substring(0, delimiterIndex + this.delimiter.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Common Prefix in the list
|
||||
* @param {String} commonPrefix - common prefix to add
|
||||
* @return {undefined}
|
||||
*/
|
||||
addCommonPrefix(commonPrefix: string): void {
|
||||
// add the new prefix to the list
|
||||
this.CommonPrefixes.push(commonPrefix);
|
||||
++this.keys;
|
||||
this.nextKeyMarker = commonPrefix;
|
||||
this.nextVersionIdMarker = undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache the current null key, to save it for outputting it later at
|
||||
* the correct position
|
||||
*
|
||||
* @param {String} key - nonversioned key of the null key
|
||||
* @param {String} versionId - real version ID of the null key
|
||||
* @param {String} value - value of the null key
|
||||
* @return {undefined}
|
||||
*/
|
||||
cacheNullKey(key: string, versionId: string, value: string): void {
|
||||
this.nullKey = { key, versionId, value };
|
||||
}
|
||||
|
||||
getObjectKeyV0(obj: { key: string }): string {
|
||||
return obj.key;
|
||||
}
|
||||
|
||||
getObjectKeyV1(obj: { key: string }): string {
|
||||
return obj.key.slice(DbPrefixes.Master.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to apply on each iteration, based on:
|
||||
* - prefix
|
||||
* - delimiter
|
||||
* - maxKeys
|
||||
* The marker is being handled directly by levelDB
|
||||
* @param {Object} obj - The key and value of the element
|
||||
* @param {String} obj.key - The key of the element
|
||||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filter(obj: { key: string, value: string }): FilterReturnValue {
|
||||
const key = this.getObjectKey(obj);
|
||||
const value = obj.value;
|
||||
|
||||
const { key: nonversionedKey, versionId: keyVersionId } = this.parseKey(key);
|
||||
if (this.nullKey) {
|
||||
if (this.nullKey.key !== nonversionedKey
|
||||
|| this.nullKey.versionId < <string> keyVersionId) {
|
||||
this.handleKey(
|
||||
this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
|
||||
this.nullKey = null;
|
||||
}
|
||||
}
|
||||
if (keyVersionId === '') {
|
||||
// null key
|
||||
this.cacheNullKey(nonversionedKey, Version.from(value).getVersionId(), value);
|
||||
if (this.state.id === DelimiterVersionsFilterStateId.SkippingVersions) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return this.handleKey(nonversionedKey, keyVersionId, value);
|
||||
}
|
||||
|
||||
setState(state: FilterState): void {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
setKeyHandler(stateId: number, keyHandler: KeyHandler): void {
|
||||
this.keyHandlers[stateId] = keyHandler;
|
||||
}
|
||||
|
||||
handleKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
return this.keyHandlers[this.state.id](key, versionId, value);
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingV0(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
if (key.startsWith(DbPrefixes.Replay)) {
|
||||
// skip internal replay prefix entirely
|
||||
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
|
||||
id: DelimiterVersionsFilterStateId.SkippingPrefix,
|
||||
prefix: DbPrefixes.Replay,
|
||||
});
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
if (Version.isPHD(value)) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return this.filter_onNewKey(key, versionId, value);
|
||||
}
|
||||
|
||||
keyHandler_NotSkippingV1(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
// NOTE: this check on PHD is only useful for Artesca, S3C
|
||||
// does not use PHDs in V1 format
|
||||
if (Version.isPHD(value)) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
return this.filter_onNewKey(key, versionId, value);
|
||||
}
|
||||
|
||||
filter_onNewKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
if (versionId === undefined) {
|
||||
this.masterKey = key;
|
||||
this.masterVersionId = Version.from(value).getVersionId() || 'null';
|
||||
this.addKey(this.masterKey, this.masterVersionId, value);
|
||||
} else {
|
||||
if (this.masterKey === key && this.masterVersionId === versionId) {
|
||||
// do not add a version key if it is the master version
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
this.addKey(key, versionId, value);
|
||||
}
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
keyHandler_SkippingPrefix(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
|
||||
if (key.startsWith(prefix)) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
|
||||
id: DelimiterVersionsFilterStateId.NotSkipping,
|
||||
});
|
||||
return this.handleKey(key, versionId, value);
|
||||
}
|
||||
|
||||
keyHandler_SkippingVersions(key: string, versionId: string | undefined, value: string): FilterReturnValue {
|
||||
if (key === this.keyMarker) {
|
||||
// since the nonversioned key equals the marker, there is
|
||||
// necessarily a versionId in this key
|
||||
const _versionId = <string> versionId;
|
||||
if (_versionId < this.versionIdMarker) {
|
||||
// skip all versions until marker
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
if (_versionId === this.versionIdMarker) {
|
||||
// nothing left to skip, so return ACCEPT, but don't add this version
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
}
|
||||
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
|
||||
id: DelimiterVersionsFilterStateId.NotSkipping,
|
||||
});
|
||||
return this.handleKey(key, versionId, value);
|
||||
}
|
||||
|
||||
skippingBase(): string | undefined {
|
||||
switch (this.state.id) {
|
||||
case DelimiterVersionsFilterStateId.SkippingPrefix:
|
||||
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
|
||||
return inc(prefix);
|
||||
|
||||
case DelimiterVersionsFilterStateId.SkippingVersions:
|
||||
const { gt } = <DelimiterVersionsFilterState_SkippingVersions> this.state;
|
||||
// the contract of skipping() is to return the first key
|
||||
// that can be skipped to, so adding a null byte to skip
|
||||
// over the existing versioned key set in 'gt'
|
||||
return `${gt}\0`;
|
||||
|
||||
default:
|
||||
return SKIP_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
return this.skippingBase();
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
const skipTo = this.skippingBase();
|
||||
if (skipTo === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
}
|
||||
// skip to the same object key in both M and V range listings
|
||||
return [
|
||||
`${DbPrefixes.Master}${skipTo}`,
|
||||
`${DbPrefixes.Version}${skipTo}`,
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object containing all mandatory fields to use once the
|
||||
* iteration is done, doesn't show a NextMarker field if the output
|
||||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result() {
|
||||
// Add the last null key if still in cache (when it is the
|
||||
// last version of the last key)
|
||||
//
|
||||
// NOTE: _reachedMaxKeys sets IsTruncated to true when it
|
||||
// returns true. Here we want this because either:
|
||||
//
|
||||
// - we did not reach the max keys yet so the result is not
|
||||
// - truncated, and there is still room for the null key in
|
||||
// - the results
|
||||
//
|
||||
// - OR we reached it already while having to process a new
|
||||
// key (so the result is truncated even without the null key)
|
||||
//
|
||||
// - OR we are *just* below the limit but the null key to add
|
||||
// does not fit, so we know the result is now truncated
|
||||
// because there remains the null key to be output.
|
||||
//
|
||||
if (this.nullKey) {
|
||||
this.handleKey(this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
|
||||
}
|
||||
const result: ResultObject = {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Versions: this.Versions,
|
||||
IsTruncated: this.IsTruncated,
|
||||
};
|
||||
if (this.delimiter) {
|
||||
result.Delimiter = this.delimiter;
|
||||
}
|
||||
if (this.IsTruncated) {
|
||||
result.NextKeyMarker = this.nextKeyMarker;
|
||||
if (this.nextVersionIdMarker) {
|
||||
result.NextVersionIdMarker = this.nextVersionIdMarker;
|
||||
}
|
||||
};
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterVersions };
|
|
@ -6,4 +6,7 @@ module.exports = {
|
|||
DelimiterMaster: require('./delimiterMaster')
|
||||
.DelimiterMaster,
|
||||
MPU: require('./MPU').MultipartUploads,
|
||||
DelimiterCurrent: require('./delimiterCurrent').DelimiterCurrent,
|
||||
DelimiterNonCurrent: require('./delimiterNonCurrent').DelimiterNonCurrent,
|
||||
DelimiterOrphanDeleteMarker: require('./delimiterOrphanDeleteMarker').DelimiterOrphanDeleteMarker,
|
||||
};
|
||||
|
|
|
@ -52,21 +52,21 @@ class Skip {
|
|||
assert(this.skipRangeCb);
|
||||
|
||||
const filteringResult = this.extension.filter(entry);
|
||||
const skippingRange = this.extension.skipping();
|
||||
const skipTo = this.extension.skipping();
|
||||
|
||||
if (filteringResult === FILTER_END) {
|
||||
this.listingEndCb();
|
||||
} else if (filteringResult === FILTER_SKIP
|
||||
&& skippingRange !== SKIP_NONE) {
|
||||
&& skipTo !== SKIP_NONE) {
|
||||
if (++this.streakLength >= MAX_STREAK_LENGTH) {
|
||||
let newRange;
|
||||
if (Array.isArray(skippingRange)) {
|
||||
if (Array.isArray(skipTo)) {
|
||||
newRange = [];
|
||||
for (let i = 0; i < skippingRange.length; ++i) {
|
||||
newRange.push(this._inc(skippingRange[i]));
|
||||
for (let i = 0; i < skipTo.length; ++i) {
|
||||
newRange.push(skipTo[i]);
|
||||
}
|
||||
} else {
|
||||
newRange = this._inc(skippingRange);
|
||||
newRange = skipTo;
|
||||
}
|
||||
/* Avoid to loop on the same range again and again. */
|
||||
if (newRange === this.gteParams) {
|
||||
|
@ -79,16 +79,6 @@ class Skip {
|
|||
this.streakLength = 0;
|
||||
}
|
||||
}
|
||||
|
||||
_inc(str) {
|
||||
if (!str) {
|
||||
return str;
|
||||
}
|
||||
const lastCharValue = str.charCodeAt(str.length - 1);
|
||||
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
|
||||
|
||||
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ function vaultSignatureCb(
|
|||
err: Error | null,
|
||||
authInfo: { message: { body: any } },
|
||||
log: Logger,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any) => void,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
|
||||
streamingV4Params?: any
|
||||
) {
|
||||
// vaultclient API guarantees that it returns:
|
||||
|
@ -38,7 +38,9 @@ function vaultSignatureCb(
|
|||
}
|
||||
// @ts-ignore
|
||||
log.addDefaultFields(auditLog);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params, {
|
||||
accountQuota: info.accountQuota || {},
|
||||
});
|
||||
}
|
||||
|
||||
export type AuthV4RequestParams = {
|
||||
|
@ -384,4 +386,19 @@ export default class Vault {
|
|||
return callback(null, respBody);
|
||||
});
|
||||
}
|
||||
|
||||
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
|
||||
// call the report function of the client
|
||||
if (!this.client.report) {
|
||||
return callback(null, {});
|
||||
}
|
||||
// @ts-ignore
|
||||
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
|
||||
if (err) {
|
||||
log.debug(`error from ${this.implName}`, { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,6 +163,20 @@ function doAuth(
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function will generate a version 4 content-md5 header
|
||||
* It looks at the request path to determine what kind of header encoding is required
|
||||
*
|
||||
* @param path - the request path
|
||||
* @param payload - the request payload to hash
|
||||
*/
|
||||
function generateContentMD5Header(
|
||||
path: string,
|
||||
payload: string,
|
||||
) {
|
||||
const encoding = path && path.startsWith('/_/backbeat/') ? 'hex' : 'base64';
|
||||
return crypto.createHash('md5').update(payload, 'binary').digest(encoding);
|
||||
}
|
||||
/**
|
||||
* This function will generate a version 4 header
|
||||
*
|
||||
|
@ -175,6 +189,7 @@ function doAuth(
|
|||
* @param [proxyPath] - path that gets proxied by reverse proxy
|
||||
* @param [sessionToken] - security token if the access/secret keys
|
||||
* are temporary credentials from STS
|
||||
* @param [payload] - body of the request if any
|
||||
*/
|
||||
function generateV4Headers(
|
||||
request: any,
|
||||
|
@ -182,8 +197,9 @@ function generateV4Headers(
|
|||
accessKey: string,
|
||||
secretKeyValue: string,
|
||||
awsService: string,
|
||||
proxyPath: string,
|
||||
sessionToken: string
|
||||
proxyPath?: string,
|
||||
sessionToken?: string,
|
||||
payload?: string,
|
||||
) {
|
||||
Object.assign(request, { headers: {} });
|
||||
const amzDate = convertUTCtoISO8601(Date.now());
|
||||
|
@ -196,7 +212,7 @@ function generateV4Headers(
|
|||
const timestamp = amzDate;
|
||||
const algorithm = 'AWS4-HMAC-SHA256';
|
||||
|
||||
let payload = '';
|
||||
payload = payload || '';
|
||||
if (request.method === 'POST') {
|
||||
payload = queryString.stringify(data, undefined, undefined, {
|
||||
encodeURIComponent,
|
||||
|
@ -207,6 +223,7 @@ function generateV4Headers(
|
|||
request.setHeader('host', request._headers.host);
|
||||
request.setHeader('x-amz-date', amzDate);
|
||||
request.setHeader('x-amz-content-sha256', payloadChecksum);
|
||||
request.setHeader('content-md5', generateContentMD5Header(request.path, payload));
|
||||
|
||||
if (sessionToken) {
|
||||
request.setHeader('x-amz-security-token', sessionToken);
|
||||
|
@ -217,6 +234,7 @@ function generateV4Headers(
|
|||
.filter(headerName =>
|
||||
headerName.startsWith('x-amz-')
|
||||
|| headerName.startsWith('x-scal-')
|
||||
|| headerName === 'content-md5'
|
||||
|| headerName === 'host',
|
||||
).sort().join(';');
|
||||
const params = { request, signedHeaders, payloadChecksum,
|
||||
|
|
|
@ -133,23 +133,37 @@ export default class ChainBackend extends BaseBackend {
|
|||
return;
|
||||
}
|
||||
|
||||
resp.message.body.forEach(policy => {
|
||||
const key = (policy.arn || '') + (policy.versionId || '');
|
||||
const check = (policy) => {
|
||||
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
|
||||
if (!policyMap[key] || !policyMap[key].isAllowed) {
|
||||
policyMap[key] = policy;
|
||||
}
|
||||
// else is duplicate policy
|
||||
};
|
||||
|
||||
resp.message.body.forEach(policy => {
|
||||
if (Array.isArray(policy)) {
|
||||
policy.forEach(authResult => check(authResult));
|
||||
} else {
|
||||
check(policy);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return Object.keys(policyMap).map(key => {
|
||||
const policyRes: any = { isAllowed: policyMap[key].isAllowed };
|
||||
const policyRes: any = { isAllowed: policyMap[key].isAllowed };
|
||||
if (policyMap[key].arn !== '') {
|
||||
policyRes.arn = policyMap[key].arn;
|
||||
}
|
||||
if (policyMap[key].versionId) {
|
||||
policyRes.versionId = policyMap[key].versionId;
|
||||
}
|
||||
if (policyMap[key].isImplicit !== undefined) {
|
||||
policyRes.isImplicit = policyMap[key].isImplicit;
|
||||
}
|
||||
if (policyMap[key].action) {
|
||||
policyRes.action = policyMap[key].action;
|
||||
}
|
||||
return policyRes;
|
||||
});
|
||||
}
|
||||
|
@ -198,4 +212,22 @@ export default class ChainBackend extends BaseBackend {
|
|||
return callback(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
report(reqUid: string, callback: any) {
|
||||
this._forEachClient((client, done) =>
|
||||
client.report(reqUid, done),
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const mergedRes = res.reduce((acc, val) => {
|
||||
Object.keys(val).forEach(k => {
|
||||
acc[k] = val[k];
|
||||
});
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return callback(null, mergedRes);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -161,6 +161,10 @@ class InMemoryBackend extends BaseBackend {
|
|||
};
|
||||
return cb(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
report(log: Logger, callback: any) {
|
||||
return callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -35,15 +35,16 @@ export default function awsURIencode(
|
|||
encodeSlash?: boolean,
|
||||
noEncodeStar?: boolean
|
||||
) {
|
||||
const encSlash = encodeSlash === undefined ? true : encodeSlash;
|
||||
let encoded = '';
|
||||
/**
|
||||
* Duplicate query params are not suppported by AWS S3 APIs. These params
|
||||
* are parsed as Arrays by Node.js HTTP parser which breaks this method
|
||||
*/
|
||||
if (typeof input !== 'string') {
|
||||
return encoded;
|
||||
return '';
|
||||
}
|
||||
let encoded = "";
|
||||
const slash = encodeSlash === undefined || encodeSlash ? '%2F' : '/';
|
||||
const star = noEncodeStar !== undefined && noEncodeStar ? '*' : '%2A';
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
let ch = input.charAt(i);
|
||||
if ((ch >= 'A' && ch <= 'Z') ||
|
||||
|
@ -55,9 +56,9 @@ export default function awsURIencode(
|
|||
} else if (ch === ' ') {
|
||||
encoded = encoded.concat('%20');
|
||||
} else if (ch === '/') {
|
||||
encoded = encoded.concat(encSlash ? '%2F' : ch);
|
||||
encoded = encoded.concat(slash);
|
||||
} else if (ch === '*') {
|
||||
encoded = encoded.concat(noEncodeStar ? '*' : '%2A');
|
||||
encoded = encoded.concat(star);
|
||||
} else {
|
||||
if (ch >= '\uD800' && ch <= '\uDBFF') {
|
||||
// If this character is a high surrogate peek the next character
|
||||
|
|
|
@ -0,0 +1,569 @@
|
|||
import cluster, { Worker } from 'cluster';
|
||||
import * as werelogs from 'werelogs';
|
||||
|
||||
import { default as errors } from '../../lib/errors';
|
||||
|
||||
const rpcLogger = new werelogs.Logger('ClusterRPC');
|
||||
|
||||
/**
|
||||
* Remote procedure calls support between cluster workers.
|
||||
*
|
||||
* When using the cluster module, new processes are forked and are
|
||||
* dispatched workloads, usually HTTP requests. The ClusterRPC module
|
||||
* implements a RPC system to send commands to all cluster worker
|
||||
* processes at once from any particular worker, and retrieve their
|
||||
* individual command results, like a distributed map operation.
|
||||
*
|
||||
* The existing nodejs cluster IPC channel is setup from the primary
|
||||
* to each worker, but not between workers, so there has to be a hop
|
||||
* by the primary.
|
||||
*
|
||||
* How a command is treated:
|
||||
*
|
||||
* - a worker sends a command message to the primary
|
||||
*
|
||||
* - the primary then forwards that command to each existing worker
|
||||
* (including the requestor)
|
||||
*
|
||||
* - each worker then executes the command and returns a result or an
|
||||
* error
|
||||
*
|
||||
* - the primary gathers all workers results into an array
|
||||
*
|
||||
* - finally, the primary dispatches the results array to the original
|
||||
* requesting worker
|
||||
*
|
||||
*
|
||||
* Limitations:
|
||||
*
|
||||
* - The command payload must be serializable, which means that:
|
||||
* - it should not contain circular references
|
||||
* - it should be of a reasonable size to be sent in a single RPC message
|
||||
*
|
||||
* - The "toWorkers" parameter of value "*" targets the set of workers
|
||||
* that are available at the time the command is dispatched. Any new
|
||||
* worker spawned after the command has been dispatched for
|
||||
* processing, but before the command completes, don't execute
|
||||
* the command and hence are not part of the results array.
|
||||
*
|
||||
*
|
||||
* To set it up:
|
||||
*
|
||||
* - On the primary:
|
||||
* if (cluster.isPrimary) {
|
||||
* setupRPCPrimary();
|
||||
* }
|
||||
*
|
||||
* - On the workers:
|
||||
* if (!cluster.isPrimary) {
|
||||
* setupRPCWorker({
|
||||
* handler1: (payload: object, uids: string, callback: HandlerCallback) => void,
|
||||
* handler2: ...
|
||||
* });
|
||||
* }
|
||||
* Handler functions will be passed the command payload, request
|
||||
* serialized uids, and must call the callback when the worker is done
|
||||
* processing the command:
|
||||
* callback(error: Error | null | undefined, result?: any)
|
||||
*
|
||||
* When this setup is done, any worker can start sending commands by calling
|
||||
* the async function sendWorkerCommand().
|
||||
*/
|
||||
|
||||
// exported types
|
||||
|
||||
export type ResultObject = {
|
||||
error: Error | null;
|
||||
result: any;
|
||||
};
|
||||
|
||||
/**
|
||||
* saved Promise for sendWorkerCommand
|
||||
*/
|
||||
export type CommandPromise = {
|
||||
resolve: (results?: ResultObject[]) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timeout | null;
|
||||
};
|
||||
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
|
||||
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
|
||||
export type HandlersMap = {
|
||||
[index: string]: HandlerFunction;
|
||||
};
|
||||
export type PrimaryHandlerFunction = (worker: Worker, payload: object, uids: string, callback: HandlerCallback) => void;
|
||||
export type PrimaryHandlersMap = Record<string, PrimaryHandlerFunction>;
|
||||
|
||||
// private types
|
||||
|
||||
type RPCMessage<T extends string, P> = {
|
||||
type: T;
|
||||
uids: string;
|
||||
payload: P;
|
||||
};
|
||||
|
||||
type RPCCommandMessage = RPCMessage<'cluster-rpc:command', any> & {
|
||||
toWorkers: string;
|
||||
toHandler: string;
|
||||
};
|
||||
|
||||
type MarshalledResultObject = {
|
||||
error: string | null;
|
||||
errorCode?: number;
|
||||
result: any;
|
||||
};
|
||||
|
||||
type RPCCommandResultMessage = RPCMessage<'cluster-rpc:commandResult', MarshalledResultObject>;
|
||||
|
||||
type RPCCommandResultsMessage = RPCMessage<'cluster-rpc:commandResults', {
|
||||
results: MarshalledResultObject[];
|
||||
}>;
|
||||
|
||||
type RPCCommandErrorMessage = RPCMessage<'cluster-rpc:commandError', {
|
||||
error: string;
|
||||
}>;
|
||||
|
||||
interface RPCSetupOptions {
|
||||
/**
|
||||
* As werelogs is not a peerDependency, arsenal and a parent project
|
||||
* might have their own separate versions duplicated in dependencies.
|
||||
* The config are therefore not shared.
|
||||
* Use this to propagate werelogs config to arsenal's ClusterRPC.
|
||||
*/
|
||||
werelogsConfig?: Parameters<typeof werelogs.configure>[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* In primary: store worker IDs that are waiting to be dispatched
|
||||
* their command's results, as a mapping.
|
||||
*/
|
||||
const uidsToWorkerId: {
|
||||
[index: string]: number;
|
||||
} = {};
|
||||
|
||||
|
||||
/**
|
||||
* In primary: store worker responses for commands in progress as a
|
||||
* mapping.
|
||||
*
|
||||
* Result objects are 'null' while the worker is still processing the
|
||||
* command. When a worker finishes processing it stores the result as:
|
||||
* {
|
||||
* error: string | null,
|
||||
* result: any
|
||||
* }
|
||||
*/
|
||||
const uidsToCommandResults: {
|
||||
[index: string]: {
|
||||
[index: number]: MarshalledResultObject | null;
|
||||
};
|
||||
} = {};
|
||||
|
||||
/**
|
||||
* In workers: store promise callbacks for commands waiting to be
|
||||
* dispatched, as a mapping.
|
||||
*/
|
||||
const uidsToCommandPromise: {
|
||||
[index: string]: CommandPromise;
|
||||
} = {};
|
||||
|
||||
|
||||
function _isRpcMessage(message) {
|
||||
return (message !== null &&
|
||||
typeof message === 'object' &&
|
||||
typeof message.type === 'string' &&
|
||||
message.type.startsWith('cluster-rpc:'));
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup cluster RPC system on the primary
|
||||
*
|
||||
* @param {object} [handlers] - mapping of handler names to handler functions
|
||||
* handler function:
|
||||
* `handler({Worker} worker, {object} payload, {string} uids, {function} callback)`
|
||||
* handler callback must be called when worker is done with the command:
|
||||
* `callback({Error|null} error, {any} [result])`
|
||||
* @return {undefined}
|
||||
*/
|
||||
export function setupRPCPrimary(handlers?: PrimaryHandlersMap, options?: RPCSetupOptions) {
|
||||
if (options?.werelogsConfig) {
|
||||
werelogs.configure(options.werelogsConfig);
|
||||
}
|
||||
cluster.on('message', (worker, message) => {
|
||||
if (_isRpcMessage(message)) {
|
||||
_handlePrimaryMessage(worker, message, handlers);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup RPCs on a cluster worker process
|
||||
*
|
||||
* @param {object} handlers - mapping of handler names to handler functions
|
||||
* handler function:
|
||||
* handler({object} payload, {string} uids, {function} callback)
|
||||
* handler callback must be called when worker is done with the command:
|
||||
* callback({Error|null} error, {any} [result])
|
||||
* @return {undefined}
|
||||
* }
|
||||
*/
|
||||
export function setupRPCWorker(handlers: HandlersMap, options?: RPCSetupOptions) {
|
||||
if (!process.send) {
|
||||
throw new Error('fatal: cannot setup cluster RPC: "process.send" is not available');
|
||||
}
|
||||
if (options?.werelogsConfig) {
|
||||
werelogs.configure(options.werelogsConfig);
|
||||
}
|
||||
process.on('message', (message: RPCCommandMessage | RPCCommandResultsMessage) => {
|
||||
if (_isRpcMessage(message)) {
|
||||
_handleWorkerMessage(message, handlers);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a command for workers to execute in parallel, and wait for results
|
||||
*
|
||||
* @param {string} toWorkers - which workers should execute the command
|
||||
* Currently the supported values are:
|
||||
* - "*", meaning all workers will execute the command
|
||||
* - "PRIMARY", meaning primary process will execute the command
|
||||
* @param {string} toHandler - name of handler that will execute the
|
||||
* command in workers, as declared in setupRPCWorker() parameter object
|
||||
* @param {string} uids - unique identifier of the command, must be
|
||||
* unique across all commands in progress
|
||||
* @param {object} payload - message payload, sent as-is to the handler
|
||||
* @param {number} [timeoutMs=60000] - timeout the command with a
|
||||
* "RequestTimeout" error after this number of milliseconds - set to 0
|
||||
* to disable timeouts (the command may then hang forever)
|
||||
* @returns {Promise}
|
||||
*/
|
||||
export async function sendWorkerCommand(
|
||||
toWorkers: string,
|
||||
toHandler: string,
|
||||
uids: string,
|
||||
payload: object,
|
||||
timeoutMs: number = 60000
|
||||
) {
|
||||
if (typeof uids !== 'string') {
|
||||
rpcLogger.error('missing or invalid "uids" field', { uids });
|
||||
throw errors.MissingParameter;
|
||||
}
|
||||
if (uidsToCommandPromise[uids] !== undefined) {
|
||||
rpcLogger.error('a command is already in progress with same uids', { uids });
|
||||
throw errors.OperationAborted;
|
||||
}
|
||||
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
|
||||
return new Promise((resolve, reject) => {
|
||||
let timeout: NodeJS.Timeout | null = null;
|
||||
if (timeoutMs) {
|
||||
timeout = setTimeout(() => {
|
||||
delete uidsToCommandPromise[uids];
|
||||
reject(errors.RequestTimeout);
|
||||
}, timeoutMs);
|
||||
}
|
||||
uidsToCommandPromise[uids] = { resolve, reject, timeout };
|
||||
const message: RPCCommandMessage = {
|
||||
type: 'cluster-rpc:command',
|
||||
toWorkers,
|
||||
toHandler,
|
||||
uids,
|
||||
payload,
|
||||
};
|
||||
return process.send?.(message);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of commands in flight
|
||||
* @returns {number}
|
||||
*/
|
||||
export function getPendingCommandsCount() {
|
||||
return Object.keys(uidsToCommandPromise).length;
|
||||
}
|
||||
|
||||
|
||||
function _dispatchCommandResultsToWorker(
|
||||
worker: Worker,
|
||||
uids: string,
|
||||
resultsArray: MarshalledResultObject[]
|
||||
): void {
|
||||
const message: RPCCommandResultsMessage = {
|
||||
type: 'cluster-rpc:commandResults',
|
||||
uids,
|
||||
payload: {
|
||||
results: resultsArray,
|
||||
},
|
||||
};
|
||||
worker.send(message);
|
||||
}
|
||||
|
||||
function _dispatchCommandErrorToWorker(
|
||||
worker: Worker,
|
||||
uids: string,
|
||||
error: Error,
|
||||
): void {
|
||||
const message: RPCCommandErrorMessage = {
|
||||
type: 'cluster-rpc:commandError',
|
||||
uids,
|
||||
payload: {
|
||||
error: error.message,
|
||||
},
|
||||
};
|
||||
worker.send(message);
|
||||
}
|
||||
|
||||
function _sendPrimaryCommandResult(
|
||||
worker: Worker,
|
||||
uids: string,
|
||||
error: (Error & { code?: number }) | null | undefined,
|
||||
result?: any
|
||||
): void {
|
||||
const message: RPCCommandResultsMessage = {
|
||||
type: 'cluster-rpc:commandResults',
|
||||
uids,
|
||||
payload: {
|
||||
results: [{ error: error?.message || null, errorCode: error?.code, result }],
|
||||
},
|
||||
};
|
||||
worker.send?.(message);
|
||||
}
|
||||
|
||||
function _handlePrimaryCommandMessage(
|
||||
fromWorker: Worker,
|
||||
logger: any,
|
||||
message: RPCCommandMessage,
|
||||
handlers?: PrimaryHandlersMap
|
||||
): void {
|
||||
const { toWorkers, toHandler, uids, payload } = message;
|
||||
if (toWorkers === '*') {
|
||||
if (uidsToWorkerId[uids] !== undefined) {
|
||||
logger.warn('new command already has a waiting worker with same uids', {
|
||||
uids, workerId: uidsToWorkerId[uids],
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
const commandResults = {};
|
||||
for (const workerId of Object.keys(cluster.workers || {})) {
|
||||
commandResults[workerId] = null;
|
||||
}
|
||||
uidsToWorkerId[uids] = fromWorker?.id;
|
||||
uidsToCommandResults[uids] = commandResults;
|
||||
|
||||
for (const [workerId, worker] of Object.entries(cluster.workers || {})) {
|
||||
logger.debug('sending command message to worker', {
|
||||
workerId, toHandler, payload,
|
||||
});
|
||||
if (worker) {
|
||||
worker.send(message);
|
||||
}
|
||||
}
|
||||
} else if (toWorkers === 'PRIMARY') {
|
||||
const { toHandler, uids, payload } = message;
|
||||
const cb: HandlerCallback = (err, result) => _sendPrimaryCommandResult(fromWorker, uids, err, result);
|
||||
|
||||
if (toHandler in (handlers || {})) {
|
||||
return handlers![toHandler](fromWorker, payload, uids, cb);
|
||||
}
|
||||
logger.error('no such handler in "toHandler" field from worker command message', {
|
||||
toHandler,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
} else {
|
||||
logger.error('unsupported "toWorkers" field from worker command message', {
|
||||
toWorkers,
|
||||
});
|
||||
if (fromWorker) {
|
||||
_dispatchCommandErrorToWorker(fromWorker, uids, errors.NotImplemented);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function _handlePrimaryCommandResultMessage(
|
||||
fromWorkerId: number,
|
||||
logger: any,
|
||||
message: RPCCommandResultMessage
|
||||
): void {
|
||||
const { uids, payload } = message;
|
||||
const commandResults = uidsToCommandResults[uids];
|
||||
if (!commandResults) {
|
||||
logger.warn('received command response message from worker for command not in flight', {
|
||||
workerId: fromWorkerId,
|
||||
uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
if (commandResults[fromWorkerId] === undefined) {
|
||||
logger.warn('received command response message with unexpected worker ID', {
|
||||
workerId: fromWorkerId,
|
||||
uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
if (commandResults[fromWorkerId] !== null) {
|
||||
logger.warn('ignoring duplicate command response from worker', {
|
||||
workerId: fromWorkerId,
|
||||
uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
commandResults[fromWorkerId] = payload;
|
||||
const commandResultsArray = Object.values(commandResults);
|
||||
if (commandResultsArray.every(response => response !== null)) {
|
||||
logger.debug('all workers responded to command', { uids });
|
||||
const completeCommandResultsArray = <MarshalledResultObject[]> commandResultsArray;
|
||||
const toWorkerId = uidsToWorkerId[uids];
|
||||
const toWorker = cluster.workers?.[toWorkerId];
|
||||
|
||||
delete uidsToCommandResults[uids];
|
||||
delete uidsToWorkerId[uids];
|
||||
|
||||
if (!toWorker) {
|
||||
logger.warn('worker shut down while its command was executing', {
|
||||
workerId: toWorkerId, uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
// send back response to original worker
|
||||
_dispatchCommandResultsToWorker(toWorker, uids, completeCommandResultsArray);
|
||||
}
|
||||
}
|
||||
|
||||
function _handlePrimaryMessage(
|
||||
fromWorker: Worker,
|
||||
message: RPCCommandMessage | RPCCommandResultMessage,
|
||||
handlers?: PrimaryHandlersMap
|
||||
): void {
|
||||
const { type: messageType, uids } = message;
|
||||
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
|
||||
logger.debug('primary received message from worker', {
|
||||
workerId: fromWorker?.id, rpcMessage: message,
|
||||
});
|
||||
if (messageType === 'cluster-rpc:command') {
|
||||
return _handlePrimaryCommandMessage(fromWorker, logger, message, handlers);
|
||||
}
|
||||
if (messageType === 'cluster-rpc:commandResult') {
|
||||
return _handlePrimaryCommandResultMessage(fromWorker?.id, logger, message);
|
||||
}
|
||||
logger.error('unsupported message type', {
|
||||
workerId: fromWorker?.id, messageType, uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function _sendWorkerCommandResult(
|
||||
uids: string,
|
||||
error: Error | null | undefined,
|
||||
result?: any
|
||||
): void {
|
||||
const message: RPCCommandResultMessage = {
|
||||
type: 'cluster-rpc:commandResult',
|
||||
uids,
|
||||
payload: {
|
||||
error: error ? error.message : null,
|
||||
result,
|
||||
},
|
||||
};
|
||||
process.send?.(message);
|
||||
}
|
||||
|
||||
function _handleWorkerCommandMessage(
|
||||
logger: any,
|
||||
message: RPCCommandMessage,
|
||||
handlers: HandlersMap
|
||||
): void {
|
||||
const { toHandler, uids, payload } = message;
|
||||
const cb: HandlerCallback = (err, result) => _sendWorkerCommandResult(uids, err, result);
|
||||
|
||||
if (toHandler in handlers) {
|
||||
return handlers[toHandler](payload, uids, cb);
|
||||
}
|
||||
logger.error('no such handler in "toHandler" field from worker command message', {
|
||||
toHandler,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
function _handleWorkerCommandResultsMessage(
|
||||
logger: any,
|
||||
message: RPCCommandResultsMessage,
|
||||
): void {
|
||||
const { uids, payload } = message;
|
||||
const { results } = payload;
|
||||
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
|
||||
if (commandPromise === undefined) {
|
||||
logger.error('missing promise for command results', { uids, payload });
|
||||
return undefined;
|
||||
}
|
||||
if (commandPromise.timeout) {
|
||||
clearTimeout(commandPromise.timeout);
|
||||
}
|
||||
delete uidsToCommandPromise[uids];
|
||||
const unmarshalledResults = results.map(workerResult => {
|
||||
let workerError: Error | null = null;
|
||||
if (workerResult.error) {
|
||||
if (workerResult.error in errors) {
|
||||
workerError = errors[workerResult.error];
|
||||
} else {
|
||||
workerError = new Error(workerResult.error);
|
||||
}
|
||||
}
|
||||
if (workerError && workerResult.errorCode) {
|
||||
(workerError as Error & { code: number }).code = workerResult.errorCode;
|
||||
}
|
||||
const unmarshalledResult: ResultObject = {
|
||||
error: workerError,
|
||||
result: workerResult.result,
|
||||
};
|
||||
return unmarshalledResult;
|
||||
});
|
||||
return commandPromise.resolve(unmarshalledResults);
|
||||
}
|
||||
|
||||
function _handleWorkerCommandErrorMessage(
|
||||
logger: any,
|
||||
message: RPCCommandErrorMessage,
|
||||
): void {
|
||||
const { uids, payload } = message;
|
||||
const { error } = payload;
|
||||
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
|
||||
if (commandPromise === undefined) {
|
||||
logger.error('missing promise for command results', { uids, payload });
|
||||
return undefined;
|
||||
}
|
||||
if (commandPromise.timeout) {
|
||||
clearTimeout(commandPromise.timeout);
|
||||
}
|
||||
delete uidsToCommandPromise[uids];
|
||||
let commandError: Error | null = null;
|
||||
if (error in errors) {
|
||||
commandError = errors[error];
|
||||
} else {
|
||||
commandError = new Error(error);
|
||||
}
|
||||
return commandPromise.reject(<Error> commandError);
|
||||
}
|
||||
|
||||
function _handleWorkerMessage(
|
||||
message: RPCCommandMessage | RPCCommandResultsMessage | RPCCommandErrorMessage,
|
||||
handlers: HandlersMap
|
||||
): void {
|
||||
const { type: messageType, uids } = message;
|
||||
const workerId = cluster.worker?.id;
|
||||
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
|
||||
logger.debug('worker received message from primary', {
|
||||
workerId, rpcMessage: message,
|
||||
});
|
||||
if (messageType === 'cluster-rpc:command') {
|
||||
return _handleWorkerCommandMessage(logger, message, handlers);
|
||||
}
|
||||
if (messageType === 'cluster-rpc:commandResults') {
|
||||
return _handleWorkerCommandResultsMessage(logger, message);
|
||||
}
|
||||
if (messageType === 'cluster-rpc:commandError') {
|
||||
return _handleWorkerCommandErrorMessage(logger, message);
|
||||
}
|
||||
logger.error('unsupported message type', {
|
||||
workerId, messageType,
|
||||
});
|
||||
return undefined;
|
||||
}
|
|
@ -35,7 +35,13 @@ export const emptyFileMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
|
|||
// Version 4 add the Creation-Time and Content-Language attributes,
|
||||
// and add support for x-ms-meta-* headers in UserMetadata
|
||||
// Version 5 adds the azureInfo structure
|
||||
export const mdModelVersion = 5;
|
||||
// Version 6 adds a "deleted" flag that is updated to true before
|
||||
// the object gets deleted. This is done to keep object metadata in the
|
||||
// oplog when deleting the object, as oplog deletion events don't contain
|
||||
// any metadata of the object.
|
||||
// version 6 also adds the "isPHD" flag that is used to indicate that the master
|
||||
// object is a placeholder and is not up to date.
|
||||
export const mdModelVersion = 6;
|
||||
/*
|
||||
* Splitter is used to build the object name for the overview of a
|
||||
* multipart upload and to build the object names for each part of a
|
||||
|
@ -131,6 +137,14 @@ export const supportedNotificationEvents = new Set([
|
|||
's3:ObjectTagging:Put',
|
||||
's3:ObjectTagging:Delete',
|
||||
's3:ObjectAcl:Put',
|
||||
's3:ObjectRestore:*',
|
||||
's3:ObjectRestore:Post',
|
||||
's3:ObjectRestore:Completed',
|
||||
's3:ObjectRestore:Delete',
|
||||
's3:LifecycleTransition',
|
||||
's3:LifecycleExpiration:*',
|
||||
's3:LifecycleExpiration:DeleteMarkerCreated',
|
||||
's3:LifecycleExpiration:Delete',
|
||||
]);
|
||||
export const notificationArnPrefix = 'arn:scality:bucketnotif';
|
||||
// HTTP server keep-alive timeout is set to a higher value than
|
||||
|
@ -157,3 +171,7 @@ export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
|
|||
Number(process.env.METADATA_MAX_CACHED_BUCKETS) : 1000;
|
||||
|
||||
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
|
||||
export const maxBatchingConcurrentOperations = 5;
|
||||
|
||||
/** For policy resource arn check we allow empty account ID to not break compatibility */
|
||||
export const policyArnAllowedEmptyAccountId = ['utapi', 'scuba'];
|
||||
|
|
|
@ -148,7 +148,7 @@ export class IndexTransaction {
|
|||
'missing condition for conditional put'
|
||||
);
|
||||
}
|
||||
if (typeof condition.notExists !== 'string') {
|
||||
if (typeof condition.notExists !== 'string' && typeof condition.exists !== 'string') {
|
||||
throw propError(
|
||||
'unsupportedConditionalOperation',
|
||||
'missing key or supported condition'
|
||||
|
|
|
@ -42,7 +42,7 @@ export const BucketAlreadyOwnedByYou: ErrorFormat = {
|
|||
code: 409,
|
||||
|
||||
description:
|
||||
'Your previous request to create the named bucket succeeded and you already own it. You get this error in all AWS regions except US Standard, us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists S3 will not do anything).',
|
||||
'A bucket with this name exists and is already owned by you',
|
||||
};
|
||||
|
||||
export const BucketNotEmpty: ErrorFormat = {
|
||||
|
@ -365,6 +365,11 @@ export const NoSuchWebsiteConfiguration: ErrorFormat = {
|
|||
description: 'The specified bucket does not have a website configuration',
|
||||
};
|
||||
|
||||
export const NoSuchTagSet: ErrorFormat = {
|
||||
code: 404,
|
||||
description: 'The TagSet does not exist',
|
||||
};
|
||||
|
||||
export const NoSuchUpload: ErrorFormat = {
|
||||
code: 404,
|
||||
description:
|
||||
|
@ -685,6 +690,11 @@ export const ReportNotPresent: ErrorFormat = {
|
|||
'The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.',
|
||||
};
|
||||
|
||||
export const Found: ErrorFormat = {
|
||||
code: 302,
|
||||
description: 'Resource Found'
|
||||
};
|
||||
|
||||
// ------------- Special non-AWS S3 errors -------------
|
||||
|
||||
export const MPUinProgress: ErrorFormat = {
|
||||
|
@ -1032,3 +1042,15 @@ export const AuthMethodNotImplemented: ErrorFormat = {
|
|||
description: 'AuthMethodNotImplemented',
|
||||
code: 501,
|
||||
};
|
||||
|
||||
// --------------------- quotaErros ---------------------
|
||||
|
||||
export const NoSuchQuota: ErrorFormat = {
|
||||
code: 404,
|
||||
description: 'The specified resource does not have a quota.',
|
||||
};
|
||||
|
||||
export const QuotaExceeded: ErrorFormat = {
|
||||
code: 429,
|
||||
description: 'The quota set for the resource is exceeded.',
|
||||
};
|
||||
|
|
|
@ -1,26 +1,19 @@
|
|||
import promClient from 'prom-client';
|
||||
|
||||
const collectDefaultMetricsIntervalMs =
|
||||
process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS !== undefined ?
|
||||
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
|
||||
10000;
|
||||
|
||||
promClient.collectDefaultMetrics({ timeout: collectDefaultMetricsIntervalMs });
|
||||
|
||||
export default class ZenkoMetrics {
|
||||
static createCounter(params: promClient.CounterConfiguration) {
|
||||
static createCounter(params: promClient.CounterConfiguration<string>) {
|
||||
return new promClient.Counter(params);
|
||||
}
|
||||
|
||||
static createGauge(params: promClient.GaugeConfiguration) {
|
||||
static createGauge(params: promClient.GaugeConfiguration<string>) {
|
||||
return new promClient.Gauge(params);
|
||||
}
|
||||
|
||||
static createHistogram(params: promClient.HistogramConfiguration) {
|
||||
static createHistogram(params: promClient.HistogramConfiguration<string>) {
|
||||
return new promClient.Histogram(params);
|
||||
}
|
||||
|
||||
static createSummary(params: promClient.SummaryConfiguration) {
|
||||
static createSummary(params: promClient.SummaryConfiguration<string>) {
|
||||
return new promClient.Summary(params);
|
||||
}
|
||||
|
||||
|
@ -28,11 +21,15 @@ export default class ZenkoMetrics {
|
|||
return promClient.register.getSingleMetric(name);
|
||||
}
|
||||
|
||||
static asPrometheus() {
|
||||
static async asPrometheus() {
|
||||
return promClient.register.metrics();
|
||||
}
|
||||
|
||||
static asPrometheusContentType() {
|
||||
return promClient.register.contentType;
|
||||
}
|
||||
|
||||
static collectDefaultMetrics() {
|
||||
return promClient.collectDefaultMetrics();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { legacyLocations } from '../constants';
|
||||
import escapeForXml from '../s3middleware/escapeForXml';
|
||||
|
||||
|
|
|
@ -8,11 +8,12 @@ import ObjectLockConfiguration from './ObjectLockConfiguration';
|
|||
import BucketPolicy from './BucketPolicy';
|
||||
import NotificationConfiguration from './NotificationConfiguration';
|
||||
import { ACL as OACL } from './ObjectMD';
|
||||
import { areTagsValid, BucketTag } from '../s3middleware/tagging';
|
||||
|
||||
// WHEN UPDATING THIS NUMBER, UPDATE BucketInfoModelVersion.md CHANGELOG
|
||||
// BucketInfoModelVersion.md can be found in documentation/ at the root
|
||||
// of this repository
|
||||
const modelVersion = 14;
|
||||
const modelVersion = 16;
|
||||
|
||||
export type CORS = {
|
||||
id: string;
|
||||
|
@ -36,6 +37,41 @@ export type VersioningConfiguration = {
|
|||
MfaDelete: any;
|
||||
};
|
||||
|
||||
export type VeeamSOSApi = {
|
||||
SystemInfo?: {
|
||||
ProtocolVersion: string,
|
||||
ModelName: string,
|
||||
ProtocolCapabilities: {
|
||||
CapacityInfo: boolean,
|
||||
UploadSessions: boolean,
|
||||
IAMSTS?: boolean,
|
||||
},
|
||||
APIEndpoints?: {
|
||||
IAMEndpoint: string,
|
||||
STSEndpoint: string,
|
||||
},
|
||||
SystemRecommendations?: {
|
||||
S3ConcurrentTaskLimit: number,
|
||||
S3MultiObjectDelete: number,
|
||||
StorageCurrentTasksLimit: number,
|
||||
KbBlockSize: number,
|
||||
}
|
||||
LastModified?: string,
|
||||
},
|
||||
CapacityInfo?: {
|
||||
Capacity: number,
|
||||
Available: number,
|
||||
Used: number,
|
||||
LastModified?: string,
|
||||
},
|
||||
};
|
||||
|
||||
// Capabilities contains all specifics from external products supported by
|
||||
// our S3 implementation, at bucket level
|
||||
export type Capabilities = {
|
||||
VeeamSOSApi?: VeeamSOSApi,
|
||||
};
|
||||
|
||||
export type ACL = OACL & { WRITE: string[] }
|
||||
|
||||
export default class BucketInfo {
|
||||
|
@ -59,11 +95,13 @@ export default class BucketInfo {
|
|||
_objectLockEnabled?: boolean;
|
||||
_objectLockConfiguration?: any;
|
||||
_notificationConfiguration?: any;
|
||||
_tags?: { key: string; value: string }[] | null;
|
||||
_tags?: Array<BucketTag>;
|
||||
_readLocationConstraint: string | null;
|
||||
_isNFS: boolean | null;
|
||||
_azureInfo: any | null;
|
||||
_ingestion: { status: 'enabled' | 'disabled' } | null;
|
||||
_capabilities?: Capabilities;
|
||||
_quotaMax: number | 0;
|
||||
|
||||
/**
|
||||
* Represents all bucket information.
|
||||
|
@ -118,6 +156,9 @@ export default class BucketInfo {
|
|||
* @param [objectLockEnabled] - true when object lock enabled
|
||||
* @param [objectLockConfiguration] - object lock configuration
|
||||
* @param [notificationConfiguration] - bucket notification configuration
|
||||
* @param [tags] - bucket tag set
|
||||
* @param [capabilities] - capabilities for the bucket
|
||||
* @param quotaMax - bucket quota
|
||||
*/
|
||||
constructor(
|
||||
name: string,
|
||||
|
@ -144,6 +185,9 @@ export default class BucketInfo {
|
|||
objectLockEnabled?: boolean,
|
||||
objectLockConfiguration?: any,
|
||||
notificationConfiguration?: any,
|
||||
tags?: Array<BucketTag> | [],
|
||||
capabilities?: Capabilities,
|
||||
quotaMax?: number | 0,
|
||||
) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof owner, 'string');
|
||||
|
@ -240,6 +284,15 @@ export default class BucketInfo {
|
|||
READ_ACP: [],
|
||||
};
|
||||
|
||||
if (tags === undefined) {
|
||||
tags = [] as BucketTag[];
|
||||
}
|
||||
assert.strictEqual(areTagsValid(tags), true);
|
||||
if (quotaMax) {
|
||||
assert.strictEqual(typeof quotaMax, 'number');
|
||||
assert(quotaMax >= 0, 'Quota cannot be negative');
|
||||
}
|
||||
|
||||
// IF UPDATING PROPERTIES, INCREMENT MODELVERSION NUMBER ABOVE
|
||||
this._acl = aclInstance;
|
||||
this._name = name;
|
||||
|
@ -265,6 +318,9 @@ export default class BucketInfo {
|
|||
this._objectLockEnabled = objectLockEnabled || false;
|
||||
this._objectLockConfiguration = objectLockConfiguration || null;
|
||||
this._notificationConfiguration = notificationConfiguration || null;
|
||||
this._tags = tags;
|
||||
this._capabilities = capabilities || undefined;
|
||||
this._quotaMax = quotaMax || 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -298,6 +354,9 @@ export default class BucketInfo {
|
|||
objectLockEnabled: this._objectLockEnabled,
|
||||
objectLockConfiguration: this._objectLockConfiguration,
|
||||
notificationConfiguration: this._notificationConfiguration,
|
||||
tags: this._tags,
|
||||
capabilities: this._capabilities,
|
||||
quotaMax: this._quotaMax,
|
||||
};
|
||||
const final = this._websiteConfiguration
|
||||
? {
|
||||
|
@ -323,7 +382,8 @@ export default class BucketInfo {
|
|||
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
|
||||
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
|
||||
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
|
||||
obj.objectLockConfiguration, obj.notificationConfiguration);
|
||||
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags,
|
||||
obj.capabilities, obj.quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -350,7 +410,8 @@ export default class BucketInfo {
|
|||
data._bucketPolicy, data._uid, data._readLocationConstraint,
|
||||
data._isNFS, data._ingestion, data._azureInfo,
|
||||
data._objectLockEnabled, data._objectLockConfiguration,
|
||||
data._notificationConfiguration);
|
||||
data._notificationConfiguration, data._tags, data._capabilities,
|
||||
data._quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -841,4 +902,69 @@ export default class BucketInfo {
|
|||
this._objectLockEnabled = enabled;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value of bucket tags
|
||||
* @return - Array of bucket tags
|
||||
*/
|
||||
getTags() {
|
||||
return this._tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket tags
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setTags(tags: Array<BucketTag>) {
|
||||
this._tags = tags;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value of bucket capabilities
|
||||
* @return - capabilities of the bucket
|
||||
*/
|
||||
getCapabilities() {
|
||||
return this._capabilities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific bucket capability
|
||||
*
|
||||
* @param capability? - if provided, will return a specific capacity
|
||||
* @return - capability of the bucket
|
||||
*/
|
||||
getCapability(capability: string) : VeeamSOSApi | undefined {
|
||||
if (capability && this._capabilities && this._capabilities[capability]) {
|
||||
return this._capabilities[capability];
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket capabilities
|
||||
* @return - bucket info instance
|
||||
*/
|
||||
setCapabilities(capabilities: Capabilities) {
|
||||
this._capabilities = capabilities;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the bucket quota information
|
||||
* @return quotaMax
|
||||
*/
|
||||
getQuota() {
|
||||
return this._quotaMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket quota
|
||||
* @param quota - quota to be set
|
||||
* @return - bucket quota info
|
||||
*/
|
||||
setQuota(quota: number) {
|
||||
this._quotaMax = quota || 0;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -666,13 +666,38 @@ export default class LifecycleConfiguration {
|
|||
* @return Returns an error object or `null`
|
||||
*/
|
||||
_checkDate(date: string) {
|
||||
const isoRegex = new RegExp('^(-?(?:[1-9][0-9]*)?[0-9]{4})-' +
|
||||
'(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])' +
|
||||
':([0-5][0-9]):([0-5][0-9])(.[0-9]+)?(Z)?$');
|
||||
if (!isoRegex.test(date)) {
|
||||
const isoRegex = new RegExp(
|
||||
"^(-?(?:[1-9][0-9]*)?[0-9]{4})" + // Year
|
||||
"-(1[0-2]|0[1-9])" + // Month
|
||||
"-(3[01]|0[1-9]|[12][0-9])" + // Day
|
||||
"T(2[0-3]|[01][0-9])" + // Hour
|
||||
":([0-5][0-9])" + // Minute
|
||||
":([0-5][0-9])" + // Second
|
||||
"(\\.[0-9]+)?" + // Fractional second
|
||||
"(Z|[+-][01][0-9]:[0-5][0-9])?$", // Timezone
|
||||
"g"
|
||||
);
|
||||
const matches = [...date.matchAll(isoRegex)];
|
||||
if (matches.length !== 1) {
|
||||
const msg = 'Date must be in ISO 8601 format';
|
||||
return errors.InvalidArgument.customizeDescription(msg);
|
||||
}
|
||||
// Check for a timezone in the last match group. If none, add a Z to indicate UTC.
|
||||
if (!matches[0][matches[0].length-1]) {
|
||||
date += 'Z';
|
||||
}
|
||||
const dateObj = new Date(date);
|
||||
if (Number.isNaN(dateObj.getTime())) {
|
||||
const msg = 'Date is not a valid date';
|
||||
return errors.InvalidArgument.customizeDescription(msg);
|
||||
}
|
||||
if (dateObj.getUTCHours() !== 0
|
||||
|| dateObj.getUTCMinutes() !== 0
|
||||
|| dateObj.getUTCSeconds() !== 0
|
||||
|| dateObj.getUTCMilliseconds() !== 0) {
|
||||
const msg = '\'Date\' must be at midnight GMT';
|
||||
return errors.InvalidArgument.customizeDescription(msg);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -844,6 +869,7 @@ export default class LifecycleConfiguration {
|
|||
* days: <value>,
|
||||
* date: <value>,
|
||||
* deleteMarker: <value>
|
||||
* newerNoncurrentVersions: <value>,
|
||||
* },
|
||||
* ],
|
||||
* }
|
||||
|
@ -856,7 +882,8 @@ export default class LifecycleConfiguration {
|
|||
actionName: string;
|
||||
days?: number;
|
||||
date?: number;
|
||||
deleteMarker?: boolean
|
||||
deleteMarker?: boolean;
|
||||
newerNoncurrentVersions?: number
|
||||
}[];
|
||||
} = {
|
||||
propName: 'actions',
|
||||
|
@ -885,8 +912,14 @@ export default class LifecycleConfiguration {
|
|||
if (action.error) {
|
||||
actionsObj.error = action.error;
|
||||
} else {
|
||||
const actionTimes = ['days', 'date', 'deleteMarker',
|
||||
'transition', 'nonCurrentVersionTransition'];
|
||||
const actionTimes = [
|
||||
'days',
|
||||
'date',
|
||||
'deleteMarker',
|
||||
'transition',
|
||||
'nonCurrentVersionTransition',
|
||||
'newerNoncurrentVersions'
|
||||
];
|
||||
actionTimes.forEach(t => {
|
||||
if (action[t]) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
|
@ -1032,6 +1065,7 @@ export default class LifecycleConfiguration {
|
|||
* nvExpObj = {
|
||||
* error: <error>,
|
||||
* days: <value>,
|
||||
* newerNoncurrentVersions: <value>,
|
||||
* }
|
||||
*/
|
||||
_parseNoncurrentVersionExpiration(rule: any) {
|
||||
|
@ -1042,14 +1076,41 @@ export default class LifecycleConfiguration {
|
|||
'NoncurrentDays');
|
||||
return { error };
|
||||
}
|
||||
|
||||
const actionParams: {
|
||||
error?: ArsenalError;
|
||||
days: number;
|
||||
newerNoncurrentVersions: number;
|
||||
} = {
|
||||
days: 0,
|
||||
newerNoncurrentVersions: 0,
|
||||
};
|
||||
|
||||
const daysInt = parseInt(subNVExp.NoncurrentDays[0], 10);
|
||||
if (daysInt < 1) {
|
||||
const msg = 'NoncurrentDays is not a positive integer';
|
||||
const error = errors.InvalidArgument.customizeDescription(msg);
|
||||
return { error };
|
||||
} else {
|
||||
return { days: daysInt };
|
||||
actionParams.days = daysInt;
|
||||
}
|
||||
|
||||
if (subNVExp.NewerNoncurrentVersions) {
|
||||
const newerVersionsInt = parseInt(subNVExp.NewerNoncurrentVersions[0], 10);
|
||||
|
||||
if (Number.isNaN(newerVersionsInt) || newerVersionsInt < 1) {
|
||||
const msg = 'NewerNoncurrentVersions is not a positive integer';
|
||||
const error = errors.InvalidArgument.customizeDescription(msg);
|
||||
return { error };
|
||||
}
|
||||
|
||||
actionParams.newerNoncurrentVersions = newerVersionsInt;
|
||||
|
||||
} else {
|
||||
actionParams.newerNoncurrentVersions = 0;
|
||||
}
|
||||
|
||||
return actionParams;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1112,6 +1173,10 @@ export default class LifecycleConfiguration {
|
|||
assert.strictEqual(typeof t.storageClass, 'string');
|
||||
});
|
||||
}
|
||||
|
||||
if (a.newerNoncurrentVersions) {
|
||||
assert.strictEqual(typeof a.newerNoncurrentVersions, 'number');
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
@ -1161,15 +1226,24 @@ export default class LifecycleConfiguration {
|
|||
}
|
||||
|
||||
const Actions = actions.map(action => {
|
||||
const { actionName, days, date, deleteMarker,
|
||||
nonCurrentVersionTransition, transition } = action;
|
||||
const {
|
||||
actionName,
|
||||
days,
|
||||
date,
|
||||
deleteMarker,
|
||||
nonCurrentVersionTransition,
|
||||
transition,
|
||||
newerNoncurrentVersions,
|
||||
} = action;
|
||||
let Action: any;
|
||||
if (actionName === 'AbortIncompleteMultipartUpload') {
|
||||
Action = `<${actionName}><DaysAfterInitiation>${days}` +
|
||||
`</DaysAfterInitiation></${actionName}>`;
|
||||
} else if (actionName === 'NoncurrentVersionExpiration') {
|
||||
Action = `<${actionName}><NoncurrentDays>${days}` +
|
||||
`</NoncurrentDays></${actionName}>`;
|
||||
const Days = `<NoncurrentDays>${days}</NoncurrentDays>`;
|
||||
const NewerVersions = newerNoncurrentVersions ?
|
||||
`<NewerNoncurrentVersions>${newerNoncurrentVersions}</NewerNoncurrentVersions>` : '';
|
||||
Action = `<${actionName}>${Days}${NewerVersions}</${actionName}>`;
|
||||
} else if (actionName === 'Expiration') {
|
||||
const Days = days ? `<Days>${days}</Days>` : '';
|
||||
const Date = date ? `<Date>${date}</Date>` : '';
|
||||
|
@ -1246,13 +1320,18 @@ export default class LifecycleConfiguration {
|
|||
}
|
||||
|
||||
actions.forEach(action => {
|
||||
const { actionName, days, date, deleteMarker } = action;
|
||||
const { actionName, days, date, deleteMarker, newerNoncurrentVersions } = action;
|
||||
if (actionName === 'AbortIncompleteMultipartUpload') {
|
||||
entry.addAbortMPU(days!);
|
||||
return;
|
||||
}
|
||||
if (actionName === 'NoncurrentVersionExpiration') {
|
||||
entry.addNCVExpiration(days!);
|
||||
entry.addNCVExpiration('NoncurrentDays', days!);
|
||||
|
||||
if (newerNoncurrentVersions) {
|
||||
entry.addNCVExpiration('NewerNoncurrentVersions', newerNoncurrentVersions!);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
if (actionName === 'Expiration') {
|
||||
|
@ -1289,6 +1368,7 @@ export type Rule = {
|
|||
days?: number;
|
||||
date?: number;
|
||||
deleteMarker?: boolean;
|
||||
newerNoncurrentVersions?: number;
|
||||
nonCurrentVersionTransition?: {
|
||||
noncurrentDays: number;
|
||||
storageClass: string;
|
||||
|
|
|
@ -10,6 +10,10 @@ export type Expiration = {
|
|||
Date?: number | boolean;
|
||||
Days?: number | boolean;
|
||||
};
|
||||
export type NoncurrentExpiration = {
|
||||
NoncurrentDays: number | null;
|
||||
NewerNoncurrentVersions: number | null;
|
||||
};
|
||||
|
||||
/**
|
||||
* @class LifecycleRule
|
||||
|
@ -21,9 +25,10 @@ export default class LifecycleRule {
|
|||
status: Status;
|
||||
tags: Tags;
|
||||
expiration?: Expiration;
|
||||
ncvExpiration?: { NoncurrentDays: number };
|
||||
ncvExpiration?: NoncurrentExpiration;
|
||||
abortMPU?: { DaysAfterInitiation: number };
|
||||
transitions?: any[];
|
||||
ncvTransitions?: any[];
|
||||
prefix?: string;
|
||||
|
||||
constructor(id: string, status: Status) {
|
||||
|
@ -38,9 +43,10 @@ export default class LifecycleRule {
|
|||
ID: string;
|
||||
Status: Status;
|
||||
Expiration?: Expiration;
|
||||
NoncurrentVersionExpiration?: { NoncurrentDays: number };
|
||||
NoncurrentVersionExpiration?: NoncurrentExpiration;
|
||||
AbortIncompleteMultipartUpload?: { DaysAfterInitiation: number };
|
||||
Transitions?: any[];
|
||||
NoncurrentVersionTransitions?: any[];
|
||||
Filter?: Filter;
|
||||
Prefix?: '';
|
||||
} = { ID: this.id, Status: this.status };
|
||||
|
@ -49,7 +55,7 @@ export default class LifecycleRule {
|
|||
rule.Expiration = this.expiration;
|
||||
}
|
||||
if (this.ncvExpiration) {
|
||||
rule.NoncurrentVersionExpiration = this.ncvExpiration;
|
||||
rule.NoncurrentVersionExpiration = this.ncvExpiration
|
||||
}
|
||||
if (this.abortMPU) {
|
||||
rule.AbortIncompleteMultipartUpload = this.abortMPU;
|
||||
|
@ -57,6 +63,9 @@ export default class LifecycleRule {
|
|||
if (this.transitions) {
|
||||
rule.Transitions = this.transitions;
|
||||
}
|
||||
if (this.ncvTransitions) {
|
||||
rule.NoncurrentVersionTransitions = this.ncvTransitions;
|
||||
}
|
||||
|
||||
const filter = this.buildFilter();
|
||||
|
||||
|
@ -136,15 +145,24 @@ export default class LifecycleRule {
|
|||
|
||||
/**
|
||||
* NoncurrentVersionExpiration
|
||||
* @param days - NoncurrentDays
|
||||
* @param prop - Property must be defined in `validProps`
|
||||
* @param value - integer for `NoncurrentDays` and `NewerNoncurrentVersions`
|
||||
*/
|
||||
addNCVExpiration(days: number) {
|
||||
this.ncvExpiration = { NoncurrentDays: days };
|
||||
addNCVExpiration(prop: 'NoncurrentDays' | 'NewerNoncurrentVersions', value: number): this;
|
||||
addNCVExpiration(prop: string, value: number) {
|
||||
const validProps = ['NoncurrentDays', 'NewerNoncurrentVersions'];
|
||||
if (validProps.includes(prop)) {
|
||||
this.ncvExpiration = this.ncvExpiration || {
|
||||
NoncurrentDays: null,
|
||||
NewerNoncurrentVersions: null,
|
||||
};
|
||||
this.ncvExpiration[prop] = value;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* AbortIncompleteMultipartUpload
|
||||
* abortincompletemultipartupload
|
||||
* @param days - DaysAfterInitiation
|
||||
*/
|
||||
addAbortMPU(days: number) {
|
||||
|
@ -160,4 +178,13 @@ export default class LifecycleRule {
|
|||
this.transitions = transitions;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* NonCurrentVersionTransitions
|
||||
* @param nvcTransitions - NonCurrentVersionTransitions
|
||||
*/
|
||||
addNCVTransitions(nvcTransitions) {
|
||||
this.ncvTransitions = nvcTransitions;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import * as crypto from 'crypto';
|
||||
import * as constants from '../constants';
|
||||
import * as VersionIDUtils from '../versioning/VersionID';
|
||||
import { VersioningConstants } from '../versioning/constants';
|
||||
import ObjectMDLocation, {
|
||||
ObjectMDLocationData,
|
||||
Location,
|
||||
|
@ -56,13 +57,16 @@ export type ObjectMDData = {
|
|||
'x-amz-server-side-encryption-aws-kms-key-id': string;
|
||||
'x-amz-server-side-encryption-customer-algorithm': string;
|
||||
'x-amz-website-redirect-location': string;
|
||||
'x-amz-scal-transition-in-progress'?: boolean;
|
||||
'x-amz-scal-transition-time'?: string;
|
||||
azureInfo?: any;
|
||||
acl: ACL;
|
||||
key: string;
|
||||
location: null | Location[];
|
||||
// versionId, isNull, nullVersionId and isDeleteMarker
|
||||
// versionId, isNull, isNull2, nullVersionId and isDeleteMarker
|
||||
// should be undefined when not set explicitly
|
||||
isNull?: boolean;
|
||||
isNull2?: boolean;
|
||||
nullVersionId?: string;
|
||||
nullUploadId?: string;
|
||||
isDeleteMarker?: boolean;
|
||||
|
@ -76,6 +80,16 @@ export type ObjectMDData = {
|
|||
dataStoreName: string;
|
||||
originOp: string;
|
||||
microVersionId?: string;
|
||||
// Deletion flag
|
||||
// Used for keeping object metadata in the oplog event
|
||||
// In case of a deletion the flag is first updated before
|
||||
// deleting the object
|
||||
deleted: boolean;
|
||||
// PHD flag indicates whether the object is a temporary placeholder.
|
||||
// This is the case when the latest version of an object gets deleted
|
||||
// the master is set as a placeholder and gets updated with the new latest
|
||||
// version data after a certain amount of time.
|
||||
isPHD: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -183,6 +197,7 @@ export default class ObjectMD {
|
|||
'x-amz-server-side-encryption-aws-kms-key-id': '',
|
||||
'x-amz-server-side-encryption-customer-algorithm': '',
|
||||
'x-amz-website-redirect-location': '',
|
||||
'x-amz-scal-transition-in-progress': false,
|
||||
acl: {
|
||||
Canned: 'private',
|
||||
FULL_CONTROL: [],
|
||||
|
@ -196,6 +211,7 @@ export default class ObjectMD {
|
|||
// versionId, isNull, nullVersionId and isDeleteMarker
|
||||
// should be undefined when not set explicitly
|
||||
isNull: undefined,
|
||||
isNull2: undefined,
|
||||
nullVersionId: undefined,
|
||||
nullUploadId: undefined,
|
||||
isDeleteMarker: undefined,
|
||||
|
@ -215,6 +231,8 @@ export default class ObjectMD {
|
|||
},
|
||||
dataStoreName: '',
|
||||
originOp: '',
|
||||
deleted: false,
|
||||
isPHD: false,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -628,6 +646,48 @@ export default class ObjectMD {
|
|||
return this._data['x-amz-website-redirect-location'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata transition in progress value
|
||||
*
|
||||
* @param inProgress - True if transition is in progress, false otherwise
|
||||
* @param transitionTime - Date when the transition started
|
||||
* @return itself
|
||||
*/
|
||||
setTransitionInProgress(inProgress: false): this
|
||||
setTransitionInProgress(inProgress: true, transitionTime: Date|string|number): this
|
||||
setTransitionInProgress(inProgress: boolean, transitionTime?: Date|string|number) {
|
||||
this._data['x-amz-scal-transition-in-progress'] = inProgress;
|
||||
if (!inProgress || !transitionTime) {
|
||||
delete this._data['x-amz-scal-transition-time'];
|
||||
} else {
|
||||
if (typeof transitionTime === 'number') {
|
||||
transitionTime = new Date(transitionTime);
|
||||
}
|
||||
if (transitionTime instanceof Date) {
|
||||
transitionTime = transitionTime.toISOString();
|
||||
}
|
||||
this._data['x-amz-scal-transition-time'] = transitionTime;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get metadata transition in progress value
|
||||
*
|
||||
* @return True if transition is in progress, false otherwise
|
||||
*/
|
||||
getTransitionInProgress() {
|
||||
return this._data['x-amz-scal-transition-in-progress'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the transition time of the object.
|
||||
* @returns The transition time of the object.
|
||||
*/
|
||||
getTransitionTime() {
|
||||
return this._data['x-amz-scal-transition-time'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set access control list
|
||||
*
|
||||
|
@ -776,6 +836,31 @@ export default class ObjectMD {
|
|||
return this._data.isNull || false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata isNull2 value
|
||||
*
|
||||
* @param isNull2 - Whether new version is null or not AND has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352)
|
||||
|
||||
* @return itself
|
||||
*/
|
||||
setIsNull2(isNull2: boolean) {
|
||||
this._data.isNull2 = isNull2;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get metadata isNull2 value
|
||||
*
|
||||
* @return isNull2 - Whether new version is null or not AND has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352)
|
||||
*/
|
||||
getIsNull2() {
|
||||
return this._data.isNull2 || false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata nullVersionId value
|
||||
*
|
||||
|
@ -867,6 +952,9 @@ export default class ObjectMD {
|
|||
* @return The object versionId
|
||||
*/
|
||||
getVersionId() {
|
||||
if (this.getIsNull()) {
|
||||
return VersioningConstants.ExternalNullVersionId;
|
||||
}
|
||||
return this._data.versionId;
|
||||
}
|
||||
|
||||
|
@ -874,13 +962,16 @@ export default class ObjectMD {
|
|||
* Get metadata versionId value in encoded form (the one visible
|
||||
* to the S3 API user)
|
||||
*
|
||||
* @return The encoded object versionId
|
||||
* @return {undefined|string} The encoded object versionId
|
||||
*/
|
||||
getEncodedVersionId() {
|
||||
const versionId = this.getVersionId();
|
||||
if (versionId) {
|
||||
if (versionId === VersioningConstants.ExternalNullVersionId) {
|
||||
return versionId;
|
||||
} else if (versionId) {
|
||||
return VersionIDUtils.encode(versionId);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1354,4 +1445,40 @@ export default class ObjectMD {
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set deleted flag
|
||||
* @param {Boolean} value deleted object
|
||||
* @return {ObjectMD}
|
||||
*/
|
||||
setDeleted(value) {
|
||||
this._data.deleted = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get deleted flag
|
||||
* @return {Boolean}
|
||||
*/
|
||||
getDeleted() {
|
||||
return this._data.deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set isPHD flag
|
||||
* @param {Boolean} value isPHD value
|
||||
* @return {ObjectMD}
|
||||
*/
|
||||
setIsPHD(value) {
|
||||
this._data.isPHD = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get isPHD flag
|
||||
* @return {Boolean}
|
||||
*/
|
||||
getIsPHD() {
|
||||
return this._data.isPHD;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import assert from 'assert';
|
||||
import UUID from 'uuid';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import escapeForXml from '../s3middleware/escapeForXml';
|
||||
import errors from '../errors';
|
||||
import { isValidBucketName } from '../s3routes/routesUtils';
|
||||
|
@ -359,6 +361,11 @@ export default class ReplicationConfiguration {
|
|||
(endpoint: any) => endpoint.site === storageClass
|
||||
);
|
||||
if (endpoint) {
|
||||
// We do not support replication to cold location.
|
||||
// Only transition to cold location is supported.
|
||||
if (endpoint.site && this._config.locationConstraints[endpoint.site]?.isCold) {
|
||||
return false;
|
||||
}
|
||||
// If this._hasScalityDestination was not set to true in any
|
||||
// previous iteration or by a prior rule's storage class, then
|
||||
// check if the current endpoint is a Scality destination.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import * as http from 'http';
|
||||
import * as https from 'https';
|
||||
import { https as HttpsAgent } from 'httpagent';
|
||||
import * as tls from 'tls';
|
||||
import * as net from 'net';
|
||||
import assert from 'assert';
|
||||
|
@ -409,7 +410,11 @@ export default class Server {
|
|||
method: 'arsenal.network.Server.start',
|
||||
port: this._port,
|
||||
});
|
||||
this._https.agent = new https.Agent(this._https);
|
||||
this._https.agent = new HttpsAgent.Agent(this._https, {
|
||||
// Do not enforce the maximum number of sockets for the
|
||||
// main server, as it might be able to serve more clients.
|
||||
maxSockets: false,
|
||||
});
|
||||
this._server = https.createServer(this._https,
|
||||
(req, res) => this._onRequest(req, res));
|
||||
} else {
|
||||
|
@ -430,7 +435,6 @@ export default class Server {
|
|||
this._server.on('connection', sock => {
|
||||
// Setting no delay of the socket to the value configured
|
||||
// TODO fix this
|
||||
// @ts-expect-errors
|
||||
sock.setNoDelay(this.isNoDelay());
|
||||
sock.on('error', err => this._logger.info(
|
||||
'socket error - request rejected', { error: err }));
|
||||
|
|
|
@ -77,10 +77,11 @@ export function getByteRangeFromSpec(
|
|||
objectSize - 1] };
|
||||
}
|
||||
if (rangeSpec.start < objectSize) {
|
||||
// test is false if end is undefined
|
||||
return { range: [rangeSpec.start,
|
||||
((rangeSpec.end && (rangeSpec.end < objectSize)) ?
|
||||
rangeSpec.end : objectSize - 1)] };
|
||||
// test is false if end is undefined or end is greater than objectSize
|
||||
const end: number = rangeSpec.end !== undefined && rangeSpec.end < objectSize
|
||||
? rangeSpec.end
|
||||
: objectSize - 1;
|
||||
return { range: [rangeSpec.start, end] };
|
||||
}
|
||||
return { error: errors.InvalidRange };
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ function _ttlvPadVector(vec: any[]) {
|
|||
return vec;
|
||||
}
|
||||
|
||||
function _throwError(logger: werelogs.Logger, msg: string, data?: LogDictionnary) {
|
||||
function _throwError(logger: werelogs.Logger, msg: string, data?: LogDictionary) {
|
||||
logger.error(msg, data);
|
||||
throw Error(msg);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ export default class HealthProbeServer extends httpServer {
|
|||
_onLiveness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._livenessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
|
@ -74,7 +74,7 @@ export default class HealthProbeServer extends httpServer {
|
|||
_onReadiness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._readinessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
|
@ -84,10 +84,11 @@ export default class HealthProbeServer extends httpServer {
|
|||
}
|
||||
|
||||
// expose metrics to Prometheus
|
||||
_onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
async _onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
|
||||
const metrics = await ZenkoMetrics.asPrometheus();
|
||||
res.writeHead(200, {
|
||||
'Content-Type': ZenkoMetrics.asPrometheusContentType(),
|
||||
});
|
||||
res.end(ZenkoMetrics.asPrometheus());
|
||||
res.end(metrics);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ export const DEFAULT_METRICS_ROUTE = '/metrics';
|
|||
* @param log - Werelogs instance for logging if you choose to
|
||||
*/
|
||||
|
||||
export type ProbeDelegate = (res: http.ServerResponse, log: RequestLogger) => string | void
|
||||
export type ProbeDelegate = (res: http.ServerResponse, log: werelogs.RequestLogger) => string | void
|
||||
|
||||
export type ProbeServerParams = {
|
||||
port: number;
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
import * as http from 'http';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { ArsenalError } from '../../errors';
|
||||
|
||||
/**
|
||||
|
|
|
@ -4,7 +4,7 @@ import * as werelogs from 'werelogs';
|
|||
import * as constants from '../../constants';
|
||||
import * as utils from './utils';
|
||||
import errors, { ArsenalError } from '../../errors';
|
||||
import HttpAgent from 'agentkeepalive';
|
||||
import { http as HttpAgent } from 'httpagent';
|
||||
import * as stream from 'stream';
|
||||
|
||||
function setRequestUids(reqHeaders: http.IncomingHttpHeaders, reqUids: string) {
|
||||
|
@ -71,7 +71,7 @@ function makeErrorFromHTTPResponse(response: http.IncomingMessage) {
|
|||
export default class RESTClient {
|
||||
host: string;
|
||||
port: number;
|
||||
httpAgent: HttpAgent;
|
||||
httpAgent: http.Agent;
|
||||
logging: werelogs.Logger;
|
||||
isPassthrough: boolean;
|
||||
|
||||
|
@ -98,10 +98,10 @@ export default class RESTClient {
|
|||
this.port = params.port;
|
||||
this.isPassthrough = params.isPassthrough || false;
|
||||
this.logging = new (params.logApi || werelogs).Logger('DataFileRESTClient');
|
||||
this.httpAgent = new HttpAgent({
|
||||
this.httpAgent = new HttpAgent.Agent({
|
||||
keepAlive: true,
|
||||
freeSocketTimeout: constants.httpClientFreeSocketTimeout,
|
||||
});
|
||||
}) as http.Agent;
|
||||
}
|
||||
|
||||
/** Destroy the HTTP agent, forcing a close of the remaining open connections */
|
||||
|
@ -119,7 +119,7 @@ export default class RESTClient {
|
|||
method: string,
|
||||
headers: http.OutgoingHttpHeaders | null,
|
||||
key: string | null,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
responseCb: (res: http.IncomingMessage) => void,
|
||||
) {
|
||||
const reqHeaders = headers || {};
|
||||
|
|
|
@ -25,7 +25,7 @@ function setContentRange(
|
|||
|
||||
function sendError(
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
error: ArsenalError,
|
||||
optMessage?: string,
|
||||
) {
|
||||
|
@ -68,7 +68,6 @@ export default class RESTServer extends httpServer {
|
|||
}) {
|
||||
assert(params.port);
|
||||
|
||||
// @ts-expect-error
|
||||
werelogs.configure({
|
||||
level: params.log.logLevel,
|
||||
dump: params.log.dumpLevel,
|
||||
|
@ -142,7 +141,7 @@ export default class RESTServer extends httpServer {
|
|||
_onPut(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let size: number;
|
||||
try {
|
||||
|
@ -184,7 +183,7 @@ export default class RESTServer extends httpServer {
|
|||
_onGet(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let pathInfo: ReturnType<typeof parseURL>;
|
||||
let rangeSpec: ReturnType<typeof httpUtils.parseRangeSpec> | undefined =
|
||||
|
@ -267,7 +266,7 @@ export default class RESTServer extends httpServer {
|
|||
_onDelete(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let pathInfo: ReturnType<typeof parseURL>;
|
||||
try {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import ioClient from 'socket.io-client';
|
||||
import * as http from 'http';
|
||||
import io from 'socket.io';
|
||||
import { Server as IOServer } from 'socket.io';
|
||||
import * as sioStream from './sio-stream';
|
||||
import async from 'async';
|
||||
import assert from 'assert';
|
||||
|
@ -497,7 +497,7 @@ export function RPCServer(params: {
|
|||
assert(params.logger);
|
||||
|
||||
const httpServer = http.createServer();
|
||||
const server = io(httpServer);
|
||||
const server = new IOServer(httpServer, { maxHttpBufferSize: 1e8 });
|
||||
const log = params.logger;
|
||||
|
||||
/**
|
||||
|
@ -508,7 +508,7 @@ export function RPCServer(params: {
|
|||
*
|
||||
* @param {BaseService} serviceList - list of services to register
|
||||
*/
|
||||
server.registerServices = function registerServices(...serviceList: any[]) {
|
||||
(server as any).registerServices = function registerServices(...serviceList: any[]) {
|
||||
serviceList.forEach(service => {
|
||||
const sock = this.of(service.namespace);
|
||||
sock.on('connection', conn => {
|
||||
|
@ -536,7 +536,7 @@ export function RPCServer(params: {
|
|||
});
|
||||
};
|
||||
|
||||
server.listen = function listen(port, bindAddress = undefined) {
|
||||
(server as any).listen = function listen(port, bindAddress = undefined) {
|
||||
httpServer.listen(port, bindAddress);
|
||||
};
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
},
|
||||
"principalAWSUserArn": {
|
||||
"type": "string",
|
||||
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,64}$"
|
||||
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,2017}$"
|
||||
},
|
||||
"principalAWSRoleArn": {
|
||||
"type": "string",
|
||||
|
@ -360,6 +360,9 @@
|
|||
"type": "string",
|
||||
"const": "2012-10-17"
|
||||
},
|
||||
"Id": {
|
||||
"type": "string"
|
||||
},
|
||||
"Statement": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
},
|
||||
"principalAWSUserArn": {
|
||||
"type": "string",
|
||||
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,64}$"
|
||||
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,2017}$"
|
||||
},
|
||||
"principalAWSRoleArn": {
|
||||
"type": "string",
|
||||
|
|
|
@ -12,13 +12,39 @@ import {
|
|||
actionMapSSO,
|
||||
actionMapSTS,
|
||||
actionMapMetadata,
|
||||
actionMapScuba,
|
||||
} from './utils/actionMaps';
|
||||
|
||||
const _actionNeedQuotaCheck = {
|
||||
export const actionNeedQuotaCheck = {
|
||||
objectPut: true,
|
||||
objectPutVersion: true,
|
||||
objectPutPart: true,
|
||||
objectRestore: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* This variable describes APIs that change the bytes
|
||||
* stored, requiring quota updates
|
||||
*/
|
||||
export const actionWithDataDeletion = {
|
||||
objectDelete: true,
|
||||
objectDeleteVersion: true,
|
||||
multipartDelete: true,
|
||||
multiObjectDelete: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* The function returns true if the current API call is a copy object
|
||||
* and the action requires a quota evaluation logic, post retrieval
|
||||
* of the object metadata.
|
||||
* @param {string} action - the action being performed
|
||||
* @param {string} currentApi - the current API being called
|
||||
* @return {boolean} - whether the action requires a quota check
|
||||
*/
|
||||
export function actionNeedQuotaCheckCopy(action: string, currentApi: string) {
|
||||
return action === 'objectGet' && (currentApi === 'objectCopy' || currentApi === 'objectPutCopyPart');
|
||||
}
|
||||
|
||||
function _findAction(service: string, method: string) {
|
||||
switch (service) {
|
||||
case 's3':
|
||||
|
@ -36,6 +62,8 @@ function _findAction(service: string, method: string) {
|
|||
return actionMapSTS[method];
|
||||
case 'metadata':
|
||||
return actionMapMetadata[method];
|
||||
case 'scuba':
|
||||
return actionMapScuba[method];
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
|
@ -105,6 +133,10 @@ function _buildArn(
|
|||
return `arn:scality:metadata::${requesterInfo!.accountid}:` +
|
||||
`${generalResource}/`;
|
||||
}
|
||||
case 'scuba': {
|
||||
return `arn:scality:scuba::${requesterInfo!.accountid}:` +
|
||||
`${generalResource}${specificResource ? '/' + specificResource : ''}`;
|
||||
}
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
|
@ -168,12 +200,12 @@ export default class RequestContext {
|
|||
_policyArn: string;
|
||||
_action?: string;
|
||||
_needQuota: boolean;
|
||||
_postXml?: string;
|
||||
_requestObjTags: string | null;
|
||||
_existingObjTag: string | null;
|
||||
_needTagEval: boolean;
|
||||
_foundAction?: string;
|
||||
_foundResource?: string;
|
||||
_objectLockRetentionDays?: number | null;
|
||||
|
||||
constructor(
|
||||
headers: { [key: string]: string | string[] },
|
||||
|
@ -192,7 +224,10 @@ export default class RequestContext {
|
|||
securityToken: string,
|
||||
policyArn: string,
|
||||
action?: string,
|
||||
postXml?: string,
|
||||
requestObjTags?: string,
|
||||
existingObjTag?: string,
|
||||
needTagEval?: false,
|
||||
objectLockRetentionDays?: number,
|
||||
) {
|
||||
this._headers = headers;
|
||||
this._query = query;
|
||||
|
@ -221,11 +256,12 @@ export default class RequestContext {
|
|||
this._securityToken = securityToken;
|
||||
this._policyArn = policyArn;
|
||||
this._action = action;
|
||||
this._needQuota = _actionNeedQuotaCheck[apiMethod] === true;
|
||||
this._postXml = postXml;
|
||||
this._requestObjTags = null;
|
||||
this._existingObjTag = null;
|
||||
this._needTagEval = false;
|
||||
this._needQuota = actionNeedQuotaCheck[apiMethod] === true
|
||||
|| actionWithDataDeletion[apiMethod] === true;
|
||||
this._requestObjTags = requestObjTags || null;
|
||||
this._existingObjTag = existingObjTag || null;
|
||||
this._needTagEval = needTagEval || false;
|
||||
this._objectLockRetentionDays = objectLockRetentionDays || null;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -238,7 +274,7 @@ export default class RequestContext {
|
|||
apiMethod: this._apiMethod,
|
||||
headers: this._headers,
|
||||
query: this._query,
|
||||
requersterInfo: this._requesterInfo,
|
||||
requesterInfo: this._requesterInfo,
|
||||
requesterIp: this._requesterIp,
|
||||
sslEnabled: this._sslEnabled,
|
||||
awsService: this._awsService,
|
||||
|
@ -254,10 +290,10 @@ export default class RequestContext {
|
|||
securityToken: this._securityToken,
|
||||
policyArn: this._policyArn,
|
||||
action: this._action,
|
||||
postXml: this._postXml,
|
||||
requestObjTags: this._requestObjTags,
|
||||
existingObjTag: this._existingObjTag,
|
||||
needTagEval: this._needTagEval,
|
||||
objectLockRetentionDays: this._objectLockRetentionDays,
|
||||
};
|
||||
return JSON.stringify(requestInfo);
|
||||
}
|
||||
|
@ -278,12 +314,28 @@ export default class RequestContext {
|
|||
if (resource) {
|
||||
obj.specificResource = resource;
|
||||
}
|
||||
return new RequestContext(obj.headers, obj.query, obj.generalResource,
|
||||
obj.specificResource, obj.requesterIp, obj.sslEnabled,
|
||||
obj.apiMethod, obj.awsService, obj.locationConstraint,
|
||||
obj.requesterInfo, obj.signatureVersion,
|
||||
obj.authType, obj.signatureAge, obj.securityToken, obj.policyArn,
|
||||
obj.action, obj.postXml);
|
||||
return new RequestContext(
|
||||
obj.headers,
|
||||
obj.query,
|
||||
obj.generalResource,
|
||||
obj.specificResource,
|
||||
obj.requesterIp,
|
||||
obj.sslEnabled,
|
||||
obj.apiMethod,
|
||||
obj.awsService,
|
||||
obj.locationConstraint,
|
||||
obj.requesterInfo,
|
||||
obj.signatureVersion,
|
||||
obj.authType,
|
||||
obj.signatureAge,
|
||||
obj.securityToken,
|
||||
obj.policyArn,
|
||||
obj.action,
|
||||
obj.requestObjTags,
|
||||
obj.existingObjTag,
|
||||
obj.needTagEval,
|
||||
obj.objectLockRetentionDays,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -627,26 +679,6 @@ export default class RequestContext {
|
|||
return this._needQuota;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set request post
|
||||
*
|
||||
* @param postXml - request post
|
||||
* @return itself
|
||||
*/
|
||||
setPostXml(postXml: string) {
|
||||
this._postXml = postXml;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get request post
|
||||
*
|
||||
* @return request post
|
||||
*/
|
||||
getPostXml() {
|
||||
return this._postXml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set request object tags
|
||||
*
|
||||
|
@ -706,4 +738,24 @@ export default class RequestContext {
|
|||
getNeedTagEval() {
|
||||
return this._needTagEval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object lock retention days
|
||||
*
|
||||
* @returns objectLockRetentionDays - object lock retention days
|
||||
*/
|
||||
getObjectLockRetentionDays() {
|
||||
return this._objectLockRetentionDays;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set object lock retention days
|
||||
*
|
||||
* @param objectLockRetentionDays - object lock retention days
|
||||
* @returns itself
|
||||
*/
|
||||
setObjectLockRetentionDays(objectLockRetentionDays: number) {
|
||||
this._objectLockRetentionDays = objectLockRetentionDays;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,11 @@ const operatorsWithVariables = ['StringEquals', 'StringNotEquals',
|
|||
const operatorsWithNegation = ['StringNotEquals',
|
||||
'StringNotEqualsIgnoreCase', 'StringNotLike', 'ArnNotEquals',
|
||||
'ArnNotLike', 'NumericNotEquals'];
|
||||
const tagConditions = new Set(['s3:ExistingObjectTag', 's3:RequestObjectTagKey', 's3:RequestObjectTagKeys']);
|
||||
const tagConditions = new Set([
|
||||
's3:ExistingObjectTag',
|
||||
's3:RequestObjectTagKey',
|
||||
's3:RequestObjectTagKeys',
|
||||
]);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -24,11 +28,11 @@ const tagConditions = new Set(['s3:ExistingObjectTag', 's3:RequestObjectTagKey',
|
|||
* @param log - logger
|
||||
* @return true if applicable, false if not
|
||||
*/
|
||||
export const isResourceApplicable = (
|
||||
export function isResourceApplicable(
|
||||
requestContext: RequestContext,
|
||||
statementResource: string | string[],
|
||||
log: Logger,
|
||||
): boolean => {
|
||||
): boolean {
|
||||
const resource = requestContext.getResource();
|
||||
if (!Array.isArray(statementResource)) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
|
@ -59,7 +63,7 @@ export const isResourceApplicable = (
|
|||
{ requestResource: resource });
|
||||
// If no match found, no resource is applicable
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether action in policy statement applies to request
|
||||
|
@ -69,11 +73,11 @@ export const isResourceApplicable = (
|
|||
* @param log - logger
|
||||
* @return true if applicable, false if not
|
||||
*/
|
||||
export const isActionApplicable = (
|
||||
export function isActionApplicable(
|
||||
requestAction: string,
|
||||
statementAction: string | string[],
|
||||
log: Logger,
|
||||
): boolean => {
|
||||
): boolean {
|
||||
if (!Array.isArray(statementAction)) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
statementAction = [statementAction];
|
||||
|
@ -95,32 +99,33 @@ export const isActionApplicable = (
|
|||
{ requestAction });
|
||||
// If no match found, return false
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether request meets policy conditions
|
||||
* @param requestContext - info about request
|
||||
* @param statementCondition - Condition statement from policy
|
||||
* @param log - logger
|
||||
* @return contains whether conditions are allowed and whether they
|
||||
* contain any tag condition keys
|
||||
* @param {RequestContext} requestContext - info about request
|
||||
* @param {object} statementCondition - Condition statement from policy
|
||||
* @param {Logger} log - logger
|
||||
* @return {boolean|null} a condition evaluation result, one of:
|
||||
* - true: condition is met
|
||||
* - false: condition is not met
|
||||
* - null: condition evaluation requires additional info to be
|
||||
* provided (namely, for tag conditions, request tags and/or object
|
||||
* tags have to be provided to evaluate the condition)
|
||||
*/
|
||||
export const meetConditions = (
|
||||
export function meetConditions(
|
||||
requestContext: RequestContext,
|
||||
statementCondition: any,
|
||||
log: Logger,
|
||||
) => {
|
||||
): boolean | null {
|
||||
let hasTagConditions = false;
|
||||
// The Condition portion of a policy is an object with different
|
||||
// operators as keys
|
||||
const conditionEval = {};
|
||||
const operators = Object.keys(statementCondition);
|
||||
const length = operators.length;
|
||||
for (let i = 0; i < length; i++) {
|
||||
const operator = operators[i];
|
||||
for (const operator of Object.keys(statementCondition)) {
|
||||
const hasPrefix = operator.includes(':');
|
||||
const hasIfExistsCondition = operator.endsWith('IfExists');
|
||||
// If has "IfExists" added to operator name, or operator has "ForAnyValue" or
|
||||
// "For All Values" prefix, find operator name without "IfExists" or prefix
|
||||
// "ForAllValues" prefix, find operator name without "IfExists" or prefix
|
||||
let bareOperator = hasIfExistsCondition ? operator.slice(0, -8) :
|
||||
operator;
|
||||
let prefix: string | undefined;
|
||||
|
@ -135,10 +140,6 @@ export const meetConditions = (
|
|||
// Note: this should be the actual operator name, not the bareOperator
|
||||
const conditionsWithSameOperator = statementCondition[operator];
|
||||
const conditionKeys = Object.keys(conditionsWithSameOperator);
|
||||
if (conditionKeys.some(key => tagConditions.has(key)) && !requestContext.getNeedTagEval()) {
|
||||
// @ts-expect-error
|
||||
conditionEval.tagConditions = true;
|
||||
}
|
||||
const conditionKeysLength = conditionKeys.length;
|
||||
for (let j = 0; j < conditionKeysLength; j++) {
|
||||
const key = conditionKeys[j];
|
||||
|
@ -155,6 +156,10 @@ export const meetConditions = (
|
|||
// tag key is included in condition key and needs to be
|
||||
// moved to value for evaluation, otherwise key/value are unchanged
|
||||
const [transformedKey, transformedValue] = transformTagKeyValue(key, value);
|
||||
if (tagConditions.has(transformedKey) && !requestContext.getNeedTagEval()) {
|
||||
hasTagConditions = true;
|
||||
continue;
|
||||
}
|
||||
// Pull key using requestContext
|
||||
// TODO: If applicable to S3, handle policy set operations
|
||||
// where a keyBasedOnRequestContext returns multiple values and
|
||||
|
@ -180,11 +185,10 @@ export const meetConditions = (
|
|||
log.trace('condition not satisfied due to ' +
|
||||
'missing info', { operator,
|
||||
conditionKey: transformedKey, policyValue: transformedValue });
|
||||
return { allow: false };
|
||||
return false;
|
||||
}
|
||||
// If condition operator prefix is included, the key should be an array
|
||||
if (prefix && !Array.isArray(keyBasedOnRequestContext)) {
|
||||
// @ts-expect-error
|
||||
keyBasedOnRequestContext = [keyBasedOnRequestContext];
|
||||
}
|
||||
// Transalate operator into function using bareOperator
|
||||
|
@ -196,14 +200,16 @@ export const meetConditions = (
|
|||
if (!operatorFunction(keyBasedOnRequestContext, transformedValue, prefix)) {
|
||||
log.trace('did not satisfy condition', { operator: bareOperator,
|
||||
keyBasedOnRequestContext, policyValue: transformedValue });
|
||||
return { allow: false };
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// @ts-expect-error
|
||||
conditionEval.allow = true;
|
||||
return conditionEval;
|
||||
};
|
||||
// one or more conditions required tag info to be evaluated
|
||||
if (hasTagConditions) {
|
||||
return null;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate whether a request is permitted under a policy.
|
||||
|
@ -216,13 +222,15 @@ export const meetConditions = (
|
|||
* @return Allow if permitted, Deny if not permitted or Neutral
|
||||
* if not applicable
|
||||
*/
|
||||
export const evaluatePolicy = (
|
||||
export function evaluatePolicy(
|
||||
requestContext: RequestContext,
|
||||
policy: any,
|
||||
log: Logger,
|
||||
): string => {
|
||||
): string {
|
||||
// TODO: For bucket policies need to add Principal evaluation
|
||||
let verdict = 'Neutral';
|
||||
let allow = false;
|
||||
let allowWithTagCondition = false;
|
||||
let denyWithTagCondition = false;
|
||||
|
||||
if (!Array.isArray(policy.Statement)) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
|
@ -259,10 +267,18 @@ export const evaluatePolicy = (
|
|||
}
|
||||
const conditionEval = currentStatement.Condition ?
|
||||
meetConditions(requestContext, currentStatement.Condition, log) :
|
||||
null;
|
||||
true;
|
||||
// If do not meet conditions move on to next statement
|
||||
// @ts-expect-error
|
||||
if (conditionEval && !conditionEval.allow) {
|
||||
if (conditionEval === false) {
|
||||
continue;
|
||||
}
|
||||
// If condition needs tag info to be evaluated, mark and move on to next statement
|
||||
if (conditionEval === null) {
|
||||
if (currentStatement.Effect === 'Deny') {
|
||||
denyWithTagCondition = true;
|
||||
} else {
|
||||
allowWithTagCondition = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (currentStatement.Effect === 'Deny') {
|
||||
|
@ -271,19 +287,30 @@ export const evaluatePolicy = (
|
|||
return 'Deny';
|
||||
}
|
||||
log.trace('Allow statement applies');
|
||||
// If statement is applicable, conditions are met and Effect is
|
||||
// to Allow, set verdict to Allow
|
||||
verdict = 'Allow';
|
||||
// @ts-expect-error
|
||||
if (conditionEval && conditionEval.tagConditions) {
|
||||
verdict = 'NeedTagConditionEval';
|
||||
// statement is applicable, conditions are met and Effect is
|
||||
// to Allow
|
||||
allow = true;
|
||||
}
|
||||
let verdict;
|
||||
if (denyWithTagCondition) {
|
||||
// priority is on checking tags to potentially deny
|
||||
verdict = 'DenyWithTagCondition';
|
||||
} else if (allow) {
|
||||
// at least one statement is an allow
|
||||
verdict = 'Allow';
|
||||
} else if (allowWithTagCondition) {
|
||||
// all allow statements need tag checks
|
||||
verdict = 'AllowWithTagCondition';
|
||||
} else {
|
||||
// no statement matched to allow or deny
|
||||
verdict = 'Neutral';
|
||||
}
|
||||
log.trace('result of evaluating single policy', { verdict });
|
||||
return verdict;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Upgrade to standardEvaluateAllPolicies
|
||||
* Evaluate whether a request is permitted under a policy.
|
||||
* @param requestContext - Info necessary to
|
||||
* evaluate permission
|
||||
|
@ -294,24 +321,58 @@ export const evaluatePolicy = (
|
|||
* @return Allow if permitted, Deny if not permitted.
|
||||
* Default is to Deny. Deny overrides an Allow
|
||||
*/
|
||||
export const evaluateAllPolicies = (
|
||||
export function evaluateAllPolicies(
|
||||
requestContext: RequestContext,
|
||||
allPolicies: any[],
|
||||
log: Logger,
|
||||
): string => {
|
||||
): string {
|
||||
return standardEvaluateAllPolicies(requestContext, allPolicies, log).verdict;
|
||||
}
|
||||
export function standardEvaluateAllPolicies(
|
||||
requestContext: RequestContext,
|
||||
allPolicies: any[],
|
||||
log: Logger,
|
||||
): {
|
||||
verdict: string;
|
||||
isImplicit: boolean;
|
||||
} {
|
||||
log.trace('evaluating all policies');
|
||||
let verdict = 'Deny';
|
||||
let allow = false;
|
||||
let allowWithTagCondition = false;
|
||||
let denyWithTagCondition = false;
|
||||
for (let i = 0; i < allPolicies.length; i++) {
|
||||
const singlePolicyVerdict =
|
||||
evaluatePolicy(requestContext, allPolicies[i], log);
|
||||
const singlePolicyVerdict = evaluatePolicy(requestContext, allPolicies[i], log);
|
||||
// If there is any Deny, just return Deny
|
||||
if (singlePolicyVerdict === 'Deny') {
|
||||
return 'Deny';
|
||||
return {
|
||||
verdict: 'Deny',
|
||||
isImplicit: false,
|
||||
};
|
||||
}
|
||||
if (singlePolicyVerdict === 'Allow') {
|
||||
allow = true;
|
||||
} else if (singlePolicyVerdict === 'AllowWithTagCondition') {
|
||||
allowWithTagCondition = true;
|
||||
} else if (singlePolicyVerdict === 'DenyWithTagCondition') {
|
||||
denyWithTagCondition = true;
|
||||
} // else 'Neutral'
|
||||
}
|
||||
let verdict;
|
||||
let isImplicit = false;
|
||||
if (allow) {
|
||||
if (denyWithTagCondition) {
|
||||
verdict = 'NeedTagConditionEval';
|
||||
} else {
|
||||
verdict = 'Allow';
|
||||
}
|
||||
} else {
|
||||
if (allowWithTagCondition) {
|
||||
verdict = 'NeedTagConditionEval';
|
||||
} else {
|
||||
verdict = 'Deny';
|
||||
isImplicit = true;
|
||||
}
|
||||
log.trace('result of evaluating all pollicies', { verdict });
|
||||
return verdict;
|
||||
};
|
||||
}
|
||||
log.trace('result of evaluating all policies', { verdict, isImplicit });
|
||||
return { verdict, isImplicit };
|
||||
}
|
||||
|
|
|
@ -23,15 +23,22 @@ export default class Principal {
|
|||
* @param statement - Statement policy field
|
||||
* @return True if meet conditions
|
||||
*/
|
||||
static _evaluateCondition(
|
||||
static _evaluateStatement(
|
||||
params: Params,
|
||||
statement: Statement,
|
||||
// TODO Fix return type
|
||||
): any {
|
||||
if (statement.Condition) {
|
||||
return meetConditions(params.rc, statement.Condition, params.log);
|
||||
): 'Neutral' | 'Allow' | 'Deny' {
|
||||
const reverse = !!statement.NotPrincipal;
|
||||
if (reverse) {
|
||||
// In case of anonymous NotPrincipal, this will neutral everyone
|
||||
return 'Neutral';
|
||||
}
|
||||
return true;
|
||||
if (statement.Condition) {
|
||||
const conditionEval = meetConditions(params.rc, statement.Condition, params.log);
|
||||
if (conditionEval === false || conditionEval === null) {
|
||||
return 'Neutral';
|
||||
}
|
||||
}
|
||||
return statement.Effect;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -48,19 +55,12 @@ export default class Principal {
|
|||
statement: Statement,
|
||||
valids: Valid,
|
||||
): 'Neutral' | 'Allow' | 'Deny' {
|
||||
const reverse = !!statement.NotPrincipal;
|
||||
const principal = (statement.Principal || statement.NotPrincipal)!;
|
||||
if (typeof principal === 'string' && principal === '*') {
|
||||
if (reverse) {
|
||||
// In case of anonymous NotPrincipal, this will neutral everyone
|
||||
return 'Neutral';
|
||||
const reverse = !!statement.NotPrincipal;
|
||||
if (typeof principal === 'string') {
|
||||
if (principal === '*') {
|
||||
return Principal._evaluateStatement(params, statement);
|
||||
}
|
||||
const conditionEval = Principal._evaluateCondition(params, statement);
|
||||
if (!conditionEval || conditionEval.allow === false) {
|
||||
return 'Neutral';
|
||||
}
|
||||
return statement.Effect;
|
||||
} else if (typeof principal === 'string') {
|
||||
return 'Deny';
|
||||
}
|
||||
let ref = [];
|
||||
|
@ -82,28 +82,8 @@ export default class Principal {
|
|||
}
|
||||
toCheck = Array.isArray(toCheck) ? toCheck : [toCheck];
|
||||
ref = Array.isArray(ref) ? ref : [ref];
|
||||
if (toCheck.indexOf('*') !== -1) {
|
||||
if (reverse) {
|
||||
return 'Neutral';
|
||||
}
|
||||
const conditionEval = Principal._evaluateCondition(params, statement);
|
||||
if (!conditionEval || conditionEval.allow === false) {
|
||||
return 'Neutral';
|
||||
}
|
||||
return statement.Effect;
|
||||
}
|
||||
const len = ref.length;
|
||||
for (let i = 0; i < len; ++i) {
|
||||
if (toCheck.indexOf(ref[i]) !== -1) {
|
||||
if (reverse) {
|
||||
return 'Neutral';
|
||||
}
|
||||
const conditionEval = Principal._evaluateCondition(params, statement);
|
||||
if (!conditionEval || conditionEval.allow === false) {
|
||||
return 'Neutral';
|
||||
}
|
||||
return statement.Effect;
|
||||
}
|
||||
if (toCheck.includes('*') || ref.some(r => toCheck.includes(r))) {
|
||||
return Principal._evaluateStatement(params, statement);
|
||||
}
|
||||
if (reverse) {
|
||||
return statement.Effect;
|
||||
|
|
|
@ -4,14 +4,14 @@ const sharedActionMap = {
|
|||
bucketDeleteEncryption: 's3:PutEncryptionConfiguration',
|
||||
bucketDeletePolicy: 's3:DeleteBucketPolicy',
|
||||
bucketDeleteWebsite: 's3:DeleteBucketWebsite',
|
||||
bucketDeleteTagging: 's3:DeleteBucketTagging',
|
||||
bucketDeleteTagging: 's3:PutBucketTagging',
|
||||
bucketGet: 's3:ListBucket',
|
||||
bucketGetACL: 's3:GetBucketAcl',
|
||||
bucketGetCors: 's3:GetBucketCORS',
|
||||
bucketGetEncryption: 's3:GetEncryptionConfiguration',
|
||||
bucketGetLifecycle: 's3:GetLifecycleConfiguration',
|
||||
bucketGetLocation: 's3:GetBucketLocation',
|
||||
bucketGetNotification: 's3:GetBucketNotificationConfiguration',
|
||||
bucketGetNotification: 's3:GetBucketNotification',
|
||||
bucketGetObjectLock: 's3:GetBucketObjectLockConfiguration',
|
||||
bucketGetPolicy: 's3:GetBucketPolicy',
|
||||
bucketGetReplication: 's3:GetReplicationConfiguration',
|
||||
|
@ -23,7 +23,7 @@ const sharedActionMap = {
|
|||
bucketPutCors: 's3:PutBucketCORS',
|
||||
bucketPutEncryption: 's3:PutEncryptionConfiguration',
|
||||
bucketPutLifecycle: 's3:PutLifecycleConfiguration',
|
||||
bucketPutNotification: 's3:PutBucketNotificationConfiguration',
|
||||
bucketPutNotification: 's3:PutBucketNotification',
|
||||
bucketPutObjectLock: 's3:PutBucketObjectLockConfiguration',
|
||||
bucketPutPolicy: 's3:PutBucketPolicy',
|
||||
bucketPutReplication: 's3:PutReplicationConfiguration',
|
||||
|
@ -42,6 +42,7 @@ const sharedActionMap = {
|
|||
objectGetLegalHold: 's3:GetObjectLegalHold',
|
||||
objectGetRetention: 's3:GetObjectRetention',
|
||||
objectGetTagging: 's3:GetObjectTagging',
|
||||
objectHead: 's3:GetObject',
|
||||
objectPut: 's3:PutObject',
|
||||
objectPutACL: 's3:PutObjectAcl',
|
||||
objectPutLegalHold: 's3:PutObjectLegalHold',
|
||||
|
@ -51,6 +52,12 @@ const sharedActionMap = {
|
|||
objectPutVersion: 's3:PutObjectVersion',
|
||||
};
|
||||
|
||||
const actionMapBucketQuotas = {
|
||||
bucketGetQuota: 'scality:GetBucketQuota',
|
||||
bucketUpdateQuota: 'scality:UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'scality:DeleteBucketQuota',
|
||||
};
|
||||
|
||||
// action map used for request context
|
||||
const actionMapRQ = {
|
||||
bucketPut: 's3:CreateBucket',
|
||||
|
@ -58,36 +65,35 @@ const actionMapRQ = {
|
|||
// see http://docs.aws.amazon.com/AmazonS3/latest/API/
|
||||
// RESTBucketDELETEcors.html
|
||||
bucketDeleteCors: 's3:PutBucketCORS',
|
||||
bucketDeleteReplication: 's3:DeleteReplicationConfiguration',
|
||||
bucketDeleteLifecycle: 's3:DeleteLifecycleConfiguration',
|
||||
bucketDeleteReplication: 's3:PutReplicationConfiguration',
|
||||
bucketDeleteLifecycle: 's3:PutLifecycleConfiguration',
|
||||
completeMultipartUpload: 's3:PutObject',
|
||||
initiateMultipartUpload: 's3:PutObject',
|
||||
objectDeleteVersion: 's3:DeleteObjectVersion',
|
||||
objectDeleteTaggingVersion: 's3:DeleteObjectVersionTagging',
|
||||
objectGetArchiveInfo: 'scality:GetObjectArchiveInfo',
|
||||
objectGetVersion: 's3:GetObjectVersion',
|
||||
objectGetACLVersion: 's3:GetObjectVersionAcl',
|
||||
objectGetTaggingVersion: 's3:GetObjectVersionTagging',
|
||||
objectHead: 's3:GetObject',
|
||||
objectPutACLVersion: 's3:PutObjectVersionAcl',
|
||||
objectPutPart: 's3:PutObject',
|
||||
objectPutTaggingVersion: 's3:PutObjectVersionTagging',
|
||||
serviceGet: 's3:ListAllMyBuckets',
|
||||
objectReplicate: 's3:ReplicateObject',
|
||||
objectPutRetentionVersion: 's3:PutObjectVersionRetention',
|
||||
objectPutLegalHoldVersion: 's3:PutObjectVersionLegalHold',
|
||||
objectGetRetentionVersion: 's3:GetObjectRetention',
|
||||
objectPutRetentionVersion: 's3:PutObjectRetention',
|
||||
objectGetLegalHoldVersion: 's3:GetObjectLegalHold',
|
||||
objectPutLegalHoldVersion: 's3:PutObjectLegalHold',
|
||||
listObjectVersions: 's3:ListBucketVersions',
|
||||
...sharedActionMap,
|
||||
...actionMapBucketQuotas,
|
||||
};
|
||||
|
||||
// action map used for bucket policies
|
||||
const actionMapBP = { ...sharedActionMap };
|
||||
const actionMapBP = actionMapRQ;
|
||||
|
||||
// action map for all relevant s3 actions
|
||||
const actionMapS3 = {
|
||||
// TODO
|
||||
// @ts-ignore
|
||||
bucketGetNotification: 's3:GetBucketNotification',
|
||||
// @ts-ignore
|
||||
bucketPutNotification: 's3:PutBucketNotification',
|
||||
...sharedActionMap,
|
||||
...actionMapRQ,
|
||||
...actionMapBP,
|
||||
|
@ -107,7 +113,7 @@ const actionMonitoringMapS3 = {
|
|||
bucketGetCors: 'GetBucketCors',
|
||||
bucketGetLifecycle: 'GetBucketLifecycleConfiguration',
|
||||
bucketGetLocation: 'GetBucketLocation',
|
||||
bucketGetNotification: 'GetBucketNotificationConfiguration',
|
||||
bucketGetNotification: 'GetBucketNotification',
|
||||
bucketGetObjectLock: 'GetObjectLockConfiguration',
|
||||
bucketGetPolicy: 'GetBucketPolicy',
|
||||
bucketGetReplication: 'GetBucketReplication',
|
||||
|
@ -120,7 +126,7 @@ const actionMonitoringMapS3 = {
|
|||
bucketPutACL: 'PutBucketAcl',
|
||||
bucketPutCors: 'PutBucketCors',
|
||||
bucketPutLifecycle: 'PutBucketLifecycleConfiguration',
|
||||
bucketPutNotification: 'PutBucketNotificationConfiguration',
|
||||
bucketPutNotification: 'PutBucketNotification',
|
||||
bucketPutObjectLock: 'PutObjectLockConfiguration',
|
||||
bucketPutPolicy: 'PutBucketPolicy',
|
||||
bucketPutReplication: 'PutBucketReplication',
|
||||
|
@ -153,6 +159,15 @@ const actionMonitoringMapS3 = {
|
|||
objectPutTagging: 'PutObjectTagging',
|
||||
objectRestore: 'RestoreObject',
|
||||
serviceGet: 'ListBuckets',
|
||||
bucketGetQuota: 'GetBucketQuota',
|
||||
bucketUpdateQuota: 'UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'DeleteBucketQuota',
|
||||
};
|
||||
|
||||
const actionMapAccountQuotas = {
|
||||
UpdateAccountQuota : 'scality:UpdateAccountQuota',
|
||||
DeleteAccountQuota : 'scality:DeleteAccountQuota',
|
||||
GetAccountQuota : 'scality:GetAccountQuota',
|
||||
};
|
||||
|
||||
const actionMapIAM = {
|
||||
|
@ -188,10 +203,15 @@ const actionMapIAM = {
|
|||
removeUserFromGroup: 'iam:RemoveUserFromGroup',
|
||||
updateAccessKey: 'iam:UpdateAccessKey',
|
||||
updateGroup: 'iam:UpdateGroup',
|
||||
updateRole: 'iam:UpdateRole',
|
||||
updateUser: 'iam:UpdateUser',
|
||||
getAccessKeyLastUsed: 'iam:GetAccessKeyLastUsed',
|
||||
generateCredentialReport: 'iam:GenerateCredentialReport',
|
||||
getCredentialReport: 'iam:GetCredentialReport',
|
||||
tagUser: 'iam:TagUser',
|
||||
unTagUser: 'iam:UntagUser',
|
||||
listUserTags: 'iam:ListUserTags',
|
||||
...actionMapAccountQuotas,
|
||||
};
|
||||
|
||||
const actionMapSSO = {
|
||||
|
@ -207,6 +227,14 @@ const actionMapMetadata = {
|
|||
default: 'metadata:bucketd',
|
||||
};
|
||||
|
||||
const actionMapScuba = {
|
||||
GetMetrics: 'scuba:GetMetrics',
|
||||
AdminStartIngest: 'scuba:AdminStartIngest',
|
||||
AdminStopIngest: 'scuba:AdminStopIngest',
|
||||
AdminReadRaftCseq: 'scuba:AdminReadRaftCseq',
|
||||
AdminTriggerRepair: 'scuba:AdminTriggerRepair',
|
||||
};
|
||||
|
||||
export {
|
||||
actionMapRQ,
|
||||
actionMapBP,
|
||||
|
@ -216,4 +244,5 @@ export {
|
|||
actionMapSSO,
|
||||
actionMapSTS,
|
||||
actionMapMetadata,
|
||||
actionMapScuba,
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { handleWildcardInResource } from './wildcards';
|
||||
|
||||
import { policyArnAllowedEmptyAccountId } from '../../constants';
|
||||
/**
|
||||
* Checks whether an ARN from a request matches an ARN in a policy
|
||||
* to compare against each portion of the ARN from the request
|
||||
|
@ -38,9 +38,10 @@ export default function checkArnMatch(
|
|||
const requestSegment = caseSensitive ? requestArnArr[j] :
|
||||
requestArnArr[j].toLowerCase();
|
||||
const policyArnArr = policyArn.split(':');
|
||||
// We want to allow an empty account ID for utapi service ARNs to not
|
||||
// We want to allow an empty account ID for utapi and scuba service ARNs to not
|
||||
// break compatibility.
|
||||
if (j === 4 && policyArnArr[2] === 'utapi' && policyArnArr[4] === '') {
|
||||
if (j === 4 && policyArnAllowedEmptyAccountId.includes(policyArnArr[2])
|
||||
&& policyArnArr[4] === '') {
|
||||
continue;
|
||||
} else if (!segmentRegEx.test(requestSegment)) {
|
||||
return false;
|
||||
|
|
|
@ -11,31 +11,30 @@ import ipaddr from 'ipaddr.js';
|
|||
* @param requestContext - info sent with request
|
||||
* @return condition key value
|
||||
*/
|
||||
export const findConditionKey = (
|
||||
export function findConditionKey(
|
||||
key: string,
|
||||
requestContext: RequestContext,
|
||||
): string => {
|
||||
): any {
|
||||
// TODO: Consider combining with findVariable function if no benefit
|
||||
// to keeping separate
|
||||
const headers = requestContext.getHeaders();
|
||||
const query = requestContext.getQuery();
|
||||
const requesterInfo = requestContext.getRequesterInfo();
|
||||
|
||||
const map = new Map();
|
||||
// Possible AWS Condition keys (http://docs.aws.amazon.com/IAM/latest/
|
||||
// UserGuide/reference_policies_elements.html#AvailableKeys)
|
||||
|
||||
switch (key) {
|
||||
// aws:CurrentTime – Used for date/time conditions
|
||||
// (see Date Condition Operators).
|
||||
map.set('aws:CurrentTime', new Date().toISOString());
|
||||
case 'aws:CurrentTime': return new Date().toISOString();
|
||||
// aws:EpochTime – Used for date/time conditions
|
||||
// (see Date Condition Operators).
|
||||
map.set('aws:EpochTime', Date.now().toString());
|
||||
case 'aws:EpochTime': return Date.now().toString();
|
||||
// aws:TokenIssueTime – Date/time that temporary security
|
||||
// credentials were issued (see Date Condition Operators).
|
||||
// Only present in requests that are signed using temporary security
|
||||
// credentials.
|
||||
map.set('aws:TokenIssueTime', requestContext.getTokenIssueTime());
|
||||
case 'aws:TokenIssueTime': return requestContext.getTokenIssueTime();
|
||||
// aws:MultiFactorAuthPresent – Used to check whether MFA was used
|
||||
// (see Boolean Condition Operators).
|
||||
// Note: This key is only present if MFA was used. So, the following
|
||||
|
@ -45,133 +44,137 @@ export const findConditionKey = (
|
|||
// Instead use:
|
||||
// "Condition" :
|
||||
// { "Null" : { "aws:MultiFactorAuthPresent" : true } }
|
||||
map.set('aws:MultiFactorAuthPresent',
|
||||
requestContext.getMultiFactorAuthPresent());
|
||||
case 'aws:MultiFactorAuthPresent': return requestContext.getMultiFactorAuthPresent();
|
||||
// aws:MultiFactorAuthAge – Used to check how many seconds since
|
||||
// MFA credentials were issued. If MFA was not used,
|
||||
// this key is not present
|
||||
map.set('aws:MultiFactorAuthAge', requestContext.getMultiFactorAuthAge());
|
||||
case 'aws:MultiFactorAuthAge': return requestContext.getMultiFactorAuthAge();
|
||||
// aws:principaltype states whether the principal is an account,
|
||||
// user, federated, or assumed role
|
||||
// Note: Docs for conditions have "PrincipalType" but simulator
|
||||
// and docs for variables have lowercase
|
||||
map.set('aws:principaltype', requesterInfo.principaltype);
|
||||
case 'aws:principaltype': return requesterInfo.principaltype;
|
||||
// aws:Referer – Used to check who referred the client browser to
|
||||
// the address the request is being sent to. Only supported by some
|
||||
// services, such as S3. Value comes from the referer header in the
|
||||
// HTTPS request made to AWS.
|
||||
map.set('aws:referer', headers.referer);
|
||||
case 'aws:referer': return headers.referer;
|
||||
// aws:SecureTransport – Used to check whether the request was sent
|
||||
// using SSL (see Boolean Condition Operators).
|
||||
map.set('aws:SecureTransport',
|
||||
requestContext.getSslEnabled() ? 'true' : 'false');
|
||||
case 'aws:SecureTransport': return requestContext.getSslEnabled() ? 'true' : 'false';
|
||||
// aws:SourceArn – Used check the source of the request,
|
||||
// using the ARN of the source. N/A here.
|
||||
map.set('aws:SourceArn', undefined);
|
||||
case 'aws:SourceArn': return undefined;
|
||||
// aws:SourceIp – Used to check the requester's IP address
|
||||
// (see IP Address Condition Operators)
|
||||
map.set('aws:SourceIp', requestContext.getRequesterIp());
|
||||
case 'aws:SourceIp': return requestContext.getRequesterIp();
|
||||
// aws:SourceVpc – Used to restrict access to a specific
|
||||
// AWS Virtual Private Cloud. N/A here.
|
||||
map.set('aws:SourceVpc', undefined);
|
||||
case 'aws:SourceVpc': return undefined;
|
||||
// aws:SourceVpce – Used to limit access to a specific VPC endpoint
|
||||
// N/A here
|
||||
map.set('aws:SourceVpce', undefined);
|
||||
case 'aws:SourceVpce': return undefined;
|
||||
// aws:UserAgent – Used to check the requester's client app.
|
||||
// (see String Condition Operators)
|
||||
map.set('aws:UserAgent', headers['user-agent']);
|
||||
case 'aws:UserAgent': return headers['user-agent'];
|
||||
// aws:userid – Used to check the requester's unique user ID.
|
||||
// (see String Condition Operators)
|
||||
map.set('aws:userid', requesterInfo.userid);
|
||||
case 'aws:userid': return requesterInfo.userid;
|
||||
// aws:username – Used to check the requester's friendly user name.
|
||||
// (see String Condition Operators)
|
||||
map.set('aws:username', requesterInfo.username);
|
||||
case 'aws:username': return requesterInfo.username;
|
||||
// Possible condition keys for S3:
|
||||
// s3:x-amz-acl is acl request for bucket or object put request
|
||||
map.set('s3:x-amz-acl', headers['x-amz-acl']);
|
||||
case 's3:x-amz-acl': return headers['x-amz-acl'];
|
||||
// s3:x-amz-grant-PERMISSION (where permission can be:
|
||||
// read, write, read-acp, write-acp or full-control)
|
||||
// Value is the value of that header (ex. id of grantee)
|
||||
map.set('s3:x-amz-grant-read', headers['x-amz-grant-read']);
|
||||
map.set('s3:x-amz-grant-write', headers['x-amz-grant-write']);
|
||||
map.set('s3:x-amz-grant-read-acp', headers['x-amz-grant-read-acp']);
|
||||
map.set('s3:x-amz-grant-write-acp', headers['x-amz-grant-write-acp']);
|
||||
map.set('s3:x-amz-grant-full-control', headers['x-amz-grant-full-control']);
|
||||
case 's3:x-amz-grant-read': return headers['x-amz-grant-read'];
|
||||
case 's3:x-amz-grant-write': return headers['x-amz-grant-write'];
|
||||
case 's3:x-amz-grant-read-acp': return headers['x-amz-grant-read-acp'];
|
||||
case 's3:x-amz-grant-write-acp': return headers['x-amz-grant-write-acp'];
|
||||
case 's3:x-amz-grant-full-control': return headers['x-amz-grant-full-control'];
|
||||
// s3:x-amz-copy-source is x-amz-copy-source header if applicable on
|
||||
// a put object
|
||||
map.set('s3:x-amz-copy-source', headers['x-amz-copy-source']);
|
||||
case 's3:x-amz-copy-source': return headers['x-amz-copy-source'];
|
||||
// s3:x-amz-metadata-directive is x-amz-metadata-directive header if
|
||||
// applicable on a put object copy. Determines whether metadata will
|
||||
// be copied from original object or replaced. Values or "COPY" or
|
||||
// "REPLACE". Default is "COPY"
|
||||
map.set('s3:x-amz-metadata-directive', headers['metadata-directive']);
|
||||
case 's3:x-amz-metadata-directive': return headers['metadata-directive'];
|
||||
// s3:x-amz-server-side-encryption -- Used to require that object put
|
||||
// use server side encryption. Value is the encryption algo such as
|
||||
// "AES256"
|
||||
map.set('s3:x-amz-server-side-encryption',
|
||||
headers['x-amz-server-side-encryption']);
|
||||
case 's3:x-amz-server-side-encryption': return headers['x-amz-server-side-encryption'];
|
||||
// s3:x-amz-storage-class -- x-amz-storage-class header value
|
||||
// (STANDARD, etc.)
|
||||
map.set('s3:x-amz-storage-class', headers['x-amz-storage-class']);
|
||||
case 's3:x-amz-storage-class': return headers['x-amz-storage-class'];
|
||||
// s3:VersionId -- version id of object
|
||||
map.set('s3:VersionId', query.versionId);
|
||||
case 's3:VersionId': return query.versionId;
|
||||
// s3:LocationConstraint -- Used to restrict creation of bucket
|
||||
// in certain region. Only applicable for CreateBucket
|
||||
map.set('s3:LocationConstraint', requestContext.getLocationConstraint());
|
||||
case 's3:LocationConstraint': return requestContext.getLocationConstraint();
|
||||
// s3:delimiter is delimiter for listing request
|
||||
map.set('s3:delimiter', query.delimiter);
|
||||
case 's3:delimiter': return query.delimiter;
|
||||
// s3:max-keys is max-keys for listing request
|
||||
map.set('s3:max-keys', query['max-keys']);
|
||||
case 's3:max-keys': return query['max-keys'];
|
||||
// s3:prefix is prefix for listing request
|
||||
map.set('s3:prefix', query.prefix);
|
||||
case 's3:prefix': return query.prefix;
|
||||
// s3 auth v4 additional condition keys
|
||||
// (See http://docs.aws.amazon.com/AmazonS3/latest/API/
|
||||
// bucket-policy-s3-sigv4-conditions.html)
|
||||
// s3:signatureversion -- Either "AWS" for v2 or
|
||||
// "AWS4-HMAC-SHA256" for v4
|
||||
map.set('s3:signatureversion', requestContext.getSignatureVersion());
|
||||
case 's3:signatureversion': return requestContext.getSignatureVersion();
|
||||
// s3:authType -- Method of authentication: either "REST-HEADER",
|
||||
// "REST-QUERY-STRING" or "POST"
|
||||
map.set('s3:authType', requestContext.getAuthType());
|
||||
case 's3:authType': return requestContext.getAuthType();
|
||||
// s3:signatureAge is the length of time, in milliseconds,
|
||||
// that a signature is valid in an authenticated request. So,
|
||||
// can use this to limit the age to less than 7 days
|
||||
map.set('s3:signatureAge', requestContext.getSignatureAge());
|
||||
case 's3:signatureAge': return requestContext.getSignatureAge();
|
||||
// s3:x-amz-content-sha256 - Valid value is "UNSIGNED-PAYLOAD"
|
||||
// so can use this in a deny policy to deny any requests that do not
|
||||
// have a signed payload
|
||||
map.set('s3:x-amz-content-sha256', headers['x-amz-content-sha256']);
|
||||
case 's3:x-amz-content-sha256': return headers['x-amz-content-sha256'];
|
||||
// s3:ObjLocationConstraint is the location constraint set for an
|
||||
// object on a PUT request using the "x-amz-meta-scal-location-constraint"
|
||||
// header
|
||||
map.set('s3:ObjLocationConstraint',
|
||||
headers['x-amz-meta-scal-location-constraint']);
|
||||
map.set('sts:ExternalId', requestContext.getRequesterExternalId());
|
||||
map.set('keycloak:groups', requesterInfo.keycloakGroup);
|
||||
map.set('keycloak:roles', requesterInfo.keycloakRole);
|
||||
map.set('iam:PolicyArn', requestContext.getPolicyArn());
|
||||
case 's3:ObjLocationConstraint': return headers['x-amz-meta-scal-location-constraint'];
|
||||
case 'sts:ExternalId': return requestContext.getRequesterExternalId();
|
||||
case 'keycloak:groups': return requesterInfo.keycloakGroup;
|
||||
case 'keycloak:roles': return requesterInfo.keycloakRole;
|
||||
case 'iam:PolicyArn': return requestContext.getPolicyArn();
|
||||
// s3:ExistingObjectTag - Used to check that existing object tag has
|
||||
// specific tag key and value. Extraction of correct tag key is done in CloudServer.
|
||||
// On first pass of policy evaluation, CloudServer information will not be included,
|
||||
// so evaluation should be skipped
|
||||
map.set('s3:ExistingObjectTag', requestContext.getNeedTagEval() ? requestContext.getExistingObjTag() : undefined);
|
||||
case 's3:ExistingObjectTag':
|
||||
return requestContext.getNeedTagEval()
|
||||
? requestContext.getExistingObjTag() : undefined;
|
||||
// s3:RequestObjectTag - Used to limit putting object tags to specific
|
||||
// tag key and value. N/A here.
|
||||
// Requires information from CloudServer
|
||||
// On first pass of policy evaluation, CloudServer information will not be included,
|
||||
// so evaluation should be skipped
|
||||
map.set('s3:RequestObjectTagKey', requestContext.getNeedTagEval() ? requestContext.getRequestObjTags() : undefined);
|
||||
case 's3:RequestObjectTagKey':
|
||||
return requestContext.getNeedTagEval()
|
||||
? requestContext.getRequestObjTags() : undefined;
|
||||
// s3:RequestObjectTagKeys - Used to limit putting object tags specific tag keys.
|
||||
// Requires information from CloudServer.
|
||||
// On first pass of policy evaluation, CloudServer information will not be included,
|
||||
// so evaluation should be skipped
|
||||
map.set('s3:RequestObjectTagKeys',
|
||||
requestContext.getNeedTagEval() && requestContext.getRequestObjTags()
|
||||
case 's3:RequestObjectTagKeys':
|
||||
return requestContext.getNeedTagEval() && requestContext.getRequestObjTags()
|
||||
? getTagKeys(requestContext.getRequestObjTags()!)
|
||||
: undefined,
|
||||
);
|
||||
return map.get(key);
|
||||
};
|
||||
: undefined;
|
||||
// The maximum retention period is 100 years.
|
||||
case 's3:object-lock-remaining-retention-days':
|
||||
return requestContext.getObjectLockRetentionDays() || undefined;
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Wildcards are allowed in certain string comparison and arn comparisons
|
||||
|
@ -231,7 +234,7 @@ function convertToEpochTime(time: string | string[]) {
|
|||
* reference_policies_elements.html)
|
||||
* @return true if condition passes and false if not
|
||||
*/
|
||||
export const convertConditionOperator = (operator: string): boolean => {
|
||||
export function convertConditionOperator(operator: string): boolean {
|
||||
// Policy Validator checks that the condition operator
|
||||
// is only one of these strings so should not have undefined
|
||||
// or security issue with object assignment
|
||||
|
@ -446,4 +449,4 @@ export const convertConditionOperator = (operator: string): boolean => {
|
|||
},
|
||||
};
|
||||
return operatorMap[operator];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ export default class ResultsCollector extends EventEmitter {
|
|||
* @emits ResultCollector#done
|
||||
* @emits ResultCollector#error
|
||||
*/
|
||||
pushResult(err: Error | undefined, subPartIndex: number) {
|
||||
pushResult(err: Error | null | undefined, subPartIndex: number) {
|
||||
this._results.push({
|
||||
error: err,
|
||||
subPartIndex,
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
import assert from 'assert';
|
||||
import * as crypto from 'crypto';
|
||||
import * as stream from 'stream';
|
||||
import azure from '@azure/storage-blob';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import ResultsCollector from './ResultsCollector';
|
||||
import SubStreamInterface from './SubStreamInterface';
|
||||
import * as objectUtils from '../objectUtils';
|
||||
import MD5Sum from '../MD5Sum';
|
||||
import errors from '../../errors';
|
||||
import errors, { ArsenalError } from '../../errors';
|
||||
|
||||
export const splitter = '|';
|
||||
export const overviewMpuKey = 'azure_mpu';
|
||||
|
@ -61,7 +65,7 @@ export const getBlockId = (
|
|||
const paddedSubPart = padString(subPartIndex, 'subPart');
|
||||
const blockId = `${uploadId}${splitter}partNumber${paddedPartNumber}` +
|
||||
`${splitter}subPart${paddedSubPart}${splitter}`;
|
||||
return padString(blockId, 'part');
|
||||
return Buffer.from(padString(blockId, 'part')).toString('base64');
|
||||
};
|
||||
|
||||
export const getSummaryPartId = (partNumber: number, eTag: string, size: number) => {
|
||||
|
@ -100,10 +104,17 @@ export const getSubPartIds = (
|
|||
) => [...Array(part.numberSubParts).keys()].map(subPartIndex =>
|
||||
getBlockId(uploadId, part.partNumber, subPartIndex));
|
||||
|
||||
// TODO Better type this
|
||||
type ErrorWrapperFn = (
|
||||
s3Method: string,
|
||||
azureMethod: string,
|
||||
command: (client: azure.ContainerClient) => Promise<any>,
|
||||
log: RequestLogger,
|
||||
cb: (err: ArsenalError | null | undefined) => void,
|
||||
) => void
|
||||
|
||||
export const putSinglePart = (
|
||||
errorWrapperFn: (first: string, second: string, third: any, log: any, cb: any) => void,
|
||||
request: any,
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
request: stream.Readable,
|
||||
params: {
|
||||
bucketName: string;
|
||||
partNumber: number;
|
||||
|
@ -114,44 +125,44 @@ export const putSinglePart = (
|
|||
},
|
||||
dataStoreName: string,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string, size?: number) => void,
|
||||
) => {
|
||||
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
|
||||
= params;
|
||||
const blockId = getBlockId(uploadId, partNumber, 0);
|
||||
const passThrough = new stream.PassThrough();
|
||||
const options = contentMD5
|
||||
? { useTransactionalMD5: true, transactionalContentMD5: contentMD5 }
|
||||
? { transactionalContentMD5: objectUtils.getMD5Buffer(contentMD5) }
|
||||
: {};
|
||||
request.pipe(passThrough);
|
||||
return errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[blockId, bucketName, objectKey, passThrough, size, options,
|
||||
(err: any | null, result: any) => {
|
||||
if (err) {
|
||||
return errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
|
||||
try {
|
||||
const result = await client.getBlockBlobClient(objectKey)
|
||||
.stageBlock(blockId, () => passThrough, size, options);
|
||||
const md5 = result.contentMD5 || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return eTag
|
||||
} catch (err: any) {
|
||||
log.error('Error from Azure data backend uploadPart',
|
||||
{ error: err.message, dataStoreName });
|
||||
if (err.code === 'ContainerNotFound') {
|
||||
return cb(errors.NoSuchBucket);
|
||||
throw errors.NoSuchBucket;
|
||||
}
|
||||
if (err.code === 'InvalidMd5') {
|
||||
return cb(errors.InvalidDigest);
|
||||
throw errors.InvalidDigest;
|
||||
}
|
||||
if (err.code === 'Md5Mismatch') {
|
||||
return cb(errors.BadDigest);
|
||||
throw errors.BadDigest;
|
||||
}
|
||||
return cb(errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`),
|
||||
throw errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`
|
||||
);
|
||||
}
|
||||
const md5 = result.headers['content-md5'] || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return cb(null, eTag, size);
|
||||
}], log, cb);
|
||||
}, log, cb);
|
||||
};
|
||||
|
||||
// TODO type this
|
||||
export const putNextSubPart = (
|
||||
errorWrapperFn: any,
|
||||
const putNextSubPart = (
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
partParams: {
|
||||
uploadId: string;
|
||||
partNumber: number;
|
||||
|
@ -159,11 +170,10 @@ export const putNextSubPart = (
|
|||
objectKey: string;
|
||||
},
|
||||
subPartInfo: { lastPartIndex: number; lastPartSize: number },
|
||||
subPartStream: any,
|
||||
subPartStream: stream.Readable,
|
||||
subPartIndex: number,
|
||||
resultsCollector: ResultsCollector,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
) => {
|
||||
const { uploadId, partNumber, bucketName, objectKey } = partParams;
|
||||
const subPartSize = getSubPartSize(
|
||||
|
@ -171,14 +181,20 @@ export const putNextSubPart = (
|
|||
const subPartId = getBlockId(uploadId, partNumber,
|
||||
subPartIndex);
|
||||
resultsCollector.pushOp();
|
||||
errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[subPartId, bucketName, objectKey, subPartStream, subPartSize,
|
||||
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
|
||||
errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
|
||||
try {
|
||||
const result = await client.getBlockBlobClient(objectKey)
|
||||
.stageBlock(subPartId, () => subPartStream, subPartSize, {});
|
||||
resultsCollector.pushResult(null, subPartIndex);
|
||||
} catch (err: any) {
|
||||
resultsCollector.pushResult(err, subPartIndex);
|
||||
}
|
||||
}, log, () => {});
|
||||
};
|
||||
|
||||
export const putSubParts = (
|
||||
errorWrapperFn: any,
|
||||
request: any,
|
||||
errorWrapperFn: ErrorWrapperFn,
|
||||
request: stream.Readable,
|
||||
params: {
|
||||
uploadId: string;
|
||||
partNumber: number;
|
||||
|
@ -188,7 +204,7 @@ export const putSubParts = (
|
|||
},
|
||||
dataStoreName: string,
|
||||
log: RequestLogger,
|
||||
cb: any,
|
||||
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string) => void,
|
||||
) => {
|
||||
const subPartInfo = getSubPartInfo(params.size);
|
||||
const resultsCollector = new ResultsCollector();
|
||||
|
@ -227,14 +243,13 @@ export const putSubParts = (
|
|||
const totalLength = streamInterface.getTotalBytesStreamed();
|
||||
log.trace('successfully put subparts to Azure',
|
||||
{ numberSubParts, totalLength });
|
||||
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash,
|
||||
totalLength));
|
||||
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash));
|
||||
|
||||
// in case the hashed event was already emitted before the
|
||||
// event handler was registered:
|
||||
if (hashedStream.completedHash) {
|
||||
hashedStream.removeAllListeners('hashed');
|
||||
return cb(null, hashedStream.completedHash, totalLength);
|
||||
return cb(null, hashedStream.completedHash);
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
@ -242,7 +257,7 @@ export const putSubParts = (
|
|||
const currentStream = streamInterface.getCurrentStream();
|
||||
// start first put to Azure before we start streaming the data
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo,
|
||||
currentStream, 0, resultsCollector, log, cb);
|
||||
currentStream, 0, resultsCollector, log);
|
||||
|
||||
request.pipe(hashedStream);
|
||||
hashedStream.on('end', () => {
|
||||
|
@ -262,8 +277,8 @@ export const putSubParts = (
|
|||
}
|
||||
const { nextStream, subPartIndex } =
|
||||
streamInterface.transitionToNextStream();
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo,
|
||||
nextStream, subPartIndex, resultsCollector, log, cb);
|
||||
putNextSubPart(errorWrapperFn, params, subPartInfo, nextStream,
|
||||
subPartIndex, resultsCollector, log);
|
||||
streamInterface.write(firstChunk);
|
||||
} else {
|
||||
streamInterface.write(data);
|
||||
|
|
|
@ -1,19 +1,25 @@
|
|||
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
import { scaleMsPerDay } from '../objectUtils';
|
||||
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
|
||||
export default class LifecycleDateTime {
|
||||
_transitionOneDayEarlier?: boolean;
|
||||
_expireOneDayEarlier?: boolean;
|
||||
_timeProgressionFactor?: number;
|
||||
_scaledMsPerDay: number;
|
||||
|
||||
constructor(params?: {
|
||||
transitionOneDayEarlier: boolean;
|
||||
expireOneDayEarlier: boolean;
|
||||
timeProgressionFactor: number;
|
||||
}) {
|
||||
this._transitionOneDayEarlier = params?.transitionOneDayEarlier;
|
||||
this._expireOneDayEarlier = params?.expireOneDayEarlier;
|
||||
this._timeProgressionFactor = params?.timeProgressionFactor || 1;
|
||||
this._scaledMsPerDay = scaleMsPerDay(this._timeProgressionFactor);
|
||||
}
|
||||
|
||||
getCurrentDate() {
|
||||
const timeTravel = this._expireOneDayEarlier ? oneDay : 0;
|
||||
const timeTravel = this._expireOneDayEarlier ? msInOneDay : 0;
|
||||
return Date.now() + timeTravel;
|
||||
}
|
||||
|
||||
|
@ -25,7 +31,7 @@ export default class LifecycleDateTime {
|
|||
findDaysSince(date: Date) {
|
||||
const now = this.getCurrentDate();
|
||||
const diff = now - date.getTime();
|
||||
return Math.floor(diff / (1000 * 60 * 60 * 24));
|
||||
return Math.floor(diff / this._scaledMsPerDay);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,8 +58,25 @@ export default class LifecycleDateTime {
|
|||
}
|
||||
if (transition.Days !== undefined) {
|
||||
const lastModifiedTime = this.getTimestamp(lastModified);
|
||||
const timeTravel = this._transitionOneDayEarlier ? -oneDay : 0;
|
||||
return lastModifiedTime + (transition.Days * oneDay) + timeTravel;
|
||||
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
|
||||
return lastModifiedTime + (transition.Days * this._scaledMsPerDay) + timeTravel;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the Unix time at which the non-current version transition should occur.
|
||||
* @param transition - A non-current version transition from the lifecycle non-current version transitions
|
||||
* @param lastModified - The object's last modified date
|
||||
* @return - The normalized transition timestamp
|
||||
*/
|
||||
getNCVTransitionTimestamp(
|
||||
transition: { NoncurrentDays?: number },
|
||||
lastModified: string,
|
||||
) {
|
||||
if (transition.NoncurrentDays !== undefined) {
|
||||
const lastModifiedTime = this.getTimestamp(lastModified);
|
||||
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
|
||||
return lastModifiedTime + (transition.NoncurrentDays * this._scaledMsPerDay) + timeTravel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,47 @@ export default class LifecycleUtils {
|
|||
return trans1 > trans2 ? transition1 : transition2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two non-current version transition rules and return the one that is most recent.
|
||||
* @param params - The function parameters
|
||||
* @param params.transition1 - A non-current version transition from the current rule
|
||||
* @param params.transition2 - A non-current version transition from the previous rule
|
||||
* @param params.lastModified - The object's last modified
|
||||
* date
|
||||
* @return The most applicable transition rule
|
||||
*/
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1: any;
|
||||
transition2?: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1?: any;
|
||||
transition2: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1: any;
|
||||
transition2: any;
|
||||
}): number | undefined;
|
||||
compareNCVTransitions(params: {
|
||||
lastModified: string;
|
||||
transition1?: any;
|
||||
transition2?: any;
|
||||
}) {
|
||||
const { transition1, transition2, lastModified } = params;
|
||||
if (transition1 === undefined) {
|
||||
return transition2;
|
||||
}
|
||||
if (transition2 === undefined) {
|
||||
return transition1;
|
||||
}
|
||||
const trans1 = this._datetime.getNCVTransitionTimestamp(transition1!, lastModified)!;
|
||||
const trans2 = this._datetime.getNCVTransitionTimestamp(transition2!, lastModified)!;
|
||||
return trans1 > trans2 ? transition1 : transition2;
|
||||
}
|
||||
|
||||
// TODO Fix This
|
||||
/**
|
||||
* Find the most relevant trantition rule for the given transitions array
|
||||
|
@ -98,6 +139,42 @@ export default class LifecycleUtils {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the most relevant non-current version transition rule for the given transitions array
|
||||
* and any previously stored non-current version transition from another rule.
|
||||
* @param params - The function parameters
|
||||
* @param params.transitions - Array of lifecycle non-current version transitions
|
||||
* @param params.lastModified - The object's last modified
|
||||
* date
|
||||
* @return The most applicable non-current version transition rule
|
||||
*/
|
||||
getApplicableNCVTransition(params: {
|
||||
store: any;
|
||||
currentDate: Date;
|
||||
transitions: any[];
|
||||
lastModified: string;
|
||||
}) {
|
||||
const { transitions, store, lastModified, currentDate } = params;
|
||||
const transition = transitions.reduce((result, transition) => {
|
||||
const isApplicable = // Is the transition time in the past?
|
||||
this._datetime.getTimestamp(currentDate) >=
|
||||
this._datetime.getNCVTransitionTimestamp(transition, lastModified)!;
|
||||
if (!isApplicable) {
|
||||
return result;
|
||||
}
|
||||
return this.compareNCVTransitions({
|
||||
transition1: transition,
|
||||
transition2: result,
|
||||
lastModified,
|
||||
});
|
||||
}, undefined);
|
||||
return this.compareNCVTransitions({
|
||||
transition1: transition,
|
||||
transition2: store.NoncurrentVersionTransition,
|
||||
lastModified,
|
||||
});
|
||||
}
|
||||
|
||||
// TODO
|
||||
/**
|
||||
* Filter out all rules based on `Status` and `Filter` (Prefix and Tags)
|
||||
|
@ -207,6 +284,7 @@ export default class LifecycleUtils {
|
|||
// Names are long, so obscuring a bit
|
||||
const ncve = 'NoncurrentVersionExpiration';
|
||||
const ncd = 'NoncurrentDays';
|
||||
const nncv = 'NewerNoncurrentVersions';
|
||||
|
||||
if (!store[ncve]) {
|
||||
store[ncve] = {};
|
||||
|
@ -214,6 +292,7 @@ export default class LifecycleUtils {
|
|||
if (!store[ncve][ncd] || rule[ncve][ncd] < store[ncve][ncd]) {
|
||||
store[ncve].ID = rule.ID;
|
||||
store[ncve][ncd] = rule[ncve][ncd];
|
||||
store[ncve][nncv] = rule[ncve][nncv];
|
||||
}
|
||||
}
|
||||
if (rule.AbortIncompleteMultipartUpload
|
||||
|
@ -239,7 +318,17 @@ export default class LifecycleUtils {
|
|||
currentDate,
|
||||
});
|
||||
}
|
||||
// TODO: Add support for NoncurrentVersionTransitions.
|
||||
|
||||
const ncvt = 'NoncurrentVersionTransitions';
|
||||
const hasNoncurrentVersionTransitions = Array.isArray(rule[ncvt]) && rule[ncvt].length > 0;
|
||||
if (hasNoncurrentVersionTransitions && this._supportedRules.includes('noncurrentVersionTransition')) {
|
||||
store.NoncurrentVersionTransition = this.getApplicableNCVTransition({
|
||||
transitions: rule.NoncurrentVersionTransitions,
|
||||
lastModified: metadata.LastModified,
|
||||
store,
|
||||
currentDate,
|
||||
});
|
||||
}
|
||||
return store;
|
||||
}, {});
|
||||
// Do not transition to a location where the object is already stored.
|
||||
|
@ -247,6 +336,12 @@ export default class LifecycleUtils {
|
|||
&& applicableRules.Transition.StorageClass === metadata.StorageClass) {
|
||||
applicableRules.Transition = undefined;
|
||||
}
|
||||
|
||||
if (applicableRules.NoncurrentVersionTransition
|
||||
&& applicableRules.NoncurrentVersionTransition.StorageClass === metadata.StorageClass) {
|
||||
applicableRules.NoncurrentVersionTransition = undefined;
|
||||
}
|
||||
|
||||
return applicableRules;
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
|
|
|
@ -1,5 +1,21 @@
|
|||
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string>) =>
|
||||
Buffer.from(base64MD5, 'base64').toString('hex');
|
||||
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||
|
||||
export const getMD5Buffer = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
|
||||
base64MD5 instanceof Uint8Array ? base64MD5 : Buffer.from(base64MD5, 'base64')
|
||||
|
||||
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
|
||||
getMD5Buffer(base64MD5).toString('hex');
|
||||
|
||||
export const getBase64MD5 = (hexMD5: WithImplicitCoercion<string>) =>
|
||||
Buffer.from(hexMD5, 'hex').toString('base64');
|
||||
|
||||
|
||||
/**
|
||||
* Calculates the number of scaled milliseconds per day based on the given time progression factor.
|
||||
* This function is intended for testing and simulation purposes only.
|
||||
* @param {number} timeProgressionFactor - The desired time progression factor for scaling.
|
||||
* @returns {number} The number of scaled milliseconds per day.
|
||||
* If the result is 0, the minimum value of 1 millisecond is returned.
|
||||
*/
|
||||
export const scaleMsPerDay = (timeProgressionFactor: number): number =>
|
||||
Math.round(msInOneDay / (timeProgressionFactor || 1)) || 1;
|
||||
|
|
|
@ -3,6 +3,11 @@ import * as werelogs from 'werelogs';
|
|||
import errors, { ArsenalError } from '../errors';
|
||||
import escapeForXml from './escapeForXml';
|
||||
|
||||
export interface BucketTag {
|
||||
Key: string;
|
||||
Value: string;
|
||||
};
|
||||
|
||||
const errorInvalidArgument = () => errors.InvalidArgument
|
||||
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
|
||||
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
|
||||
|
@ -32,6 +37,15 @@ export const _validator = {
|
|||
&& tag.Key[0] !== undefined && tag.Value[0] !== undefined
|
||||
&& typeof tag.Key[0] === 'string' && typeof tag.Value[0] === 'string',
|
||||
|
||||
// Allowed characters are letters, whitespace, and numbers, plus
|
||||
// the following special characters: + - = . _ : /
|
||||
// Maximum key length: 128 Unicode characters
|
||||
// Maximum value length: 256 Unicode characters
|
||||
validateTagObjectStructure: (tag: BucketTag) => tag
|
||||
&& Object.keys(tag).length === 2
|
||||
&& typeof tag.Key === 'string' && typeof tag.Value === 'string'
|
||||
&& tag.Key.length >= 1 && tag.Value.length >= 1,
|
||||
|
||||
validateXMLStructure: (result: any) =>
|
||||
result && Object.keys(result).length === 1 &&
|
||||
result.Tagging &&
|
||||
|
@ -100,12 +114,47 @@ function _validateTags(tags: Array<{ Key: string[], Value: string[] }>) {
|
|||
}
|
||||
// not repeating keys
|
||||
if (tags.length > Object.keys(tagsResult).length) {
|
||||
return errors.InvalidTag.customizeDescription('Cannot provide ' +
|
||||
'multiple Tags with the same key');
|
||||
return errors.InvalidTag.customizeDescription(
|
||||
'Cannot provide multiple Tags with the same key'
|
||||
);
|
||||
}
|
||||
return tagsResult;
|
||||
}
|
||||
|
||||
/** areTagsValid - Validate bucket tags
|
||||
* @param tags - tags parsed from xml to be validated
|
||||
* @return result - true if the tags are valide, false otherwise
|
||||
*/
|
||||
export function areTagsValid(tags: Array<BucketTag>) {
|
||||
if (tags.length === 0) {
|
||||
return true;
|
||||
}
|
||||
// Maximum number of tags per resource: 50
|
||||
if (tags.length > 50) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const tagsResult = {};
|
||||
for (const tag of tags) {
|
||||
if (!_validator.validateTagObjectStructure(tag)) {
|
||||
return false;
|
||||
}
|
||||
const { Key: key, Value: value } = tag;
|
||||
|
||||
const result = _validator.validateKeyValue(key, value);
|
||||
if (result instanceof Error) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tagsResult[key] = value;
|
||||
}
|
||||
// not repeating keys
|
||||
if (tags.length > Object.keys(tagsResult).length) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** parseTagXml - Parse and validate xml body, returning callback with object
|
||||
* tags : { key: value}
|
||||
* @param xml - xml body to parse and validate
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
import assert from 'assert';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors from '../errors';
|
||||
import routeGET from './routes/routeGET';
|
||||
import routePUT from './routes/routePUT';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import StatsClient from '../../metrics/StatsClient';
|
||||
|
@ -41,6 +43,8 @@ export default function routeDELETE(
|
|||
return call('bucketDeleteEncryption');
|
||||
} else if (query?.tagging !== undefined) {
|
||||
return call('bucketDeleteTagging');
|
||||
} else if (query?.quota !== undefined) {
|
||||
return call('bucketDeleteQuota');
|
||||
}
|
||||
call('bucketDelete');
|
||||
} else {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
@ -58,6 +60,8 @@ export default function routerGET(
|
|||
call('bucketGetEncryption');
|
||||
} else if (query.search !== undefined) {
|
||||
call('metadataSearch')
|
||||
} else if (query.quota !== undefined) {
|
||||
call('bucketGetQuota');
|
||||
} else {
|
||||
// GET bucket
|
||||
call('bucketGet');
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import StatsClient from '../../metrics/StatsClient';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
@ -103,6 +105,13 @@ export default function routePUT(
|
|||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
} else if (query.quota !== undefined) {
|
||||
api.callApiMethod('bucketUpdateQuota', request, response,
|
||||
log, (err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders, response,
|
||||
200, log);
|
||||
});
|
||||
} else {
|
||||
// PUT bucket
|
||||
return api.callApiMethod('bucketPut', request, response, log,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
@ -27,6 +29,11 @@ export default function routerWebsite(
|
|||
routesUtils.statsReport500(err, statsClient);
|
||||
// request being redirected
|
||||
if (redirectInfo) {
|
||||
if (err && redirectInfo.withError) {
|
||||
return routesUtils.redirectRequestOnError(err,
|
||||
'GET', redirectInfo, dataGetInfo, dataRetrievalParams,
|
||||
response, resMetaHeaders, log)
|
||||
}
|
||||
// note that key might have been modified in websiteGet
|
||||
// api to add index document
|
||||
return routesUtils.redirectRequest(redirectInfo,
|
||||
|
@ -57,6 +64,11 @@ export default function routerWebsite(
|
|||
(err, resMetaHeaders, redirectInfo, key) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
if (redirectInfo) {
|
||||
if (err && redirectInfo.withError) {
|
||||
return routesUtils.redirectRequestOnError(err,
|
||||
'HEAD', redirectInfo, null, dataRetrievalParams,
|
||||
response, resMetaHeaders, log)
|
||||
}
|
||||
return routesUtils.redirectRequest(redirectInfo,
|
||||
// TODO ARSN-217 encrypted does not exists in request.connection
|
||||
// @ts-ignore
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
import * as url from 'url';
|
||||
import * as http from 'http';
|
||||
import { eachSeries } from 'async';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as ipCheck from '../ipCheck';
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import * as constants from '../constants';
|
||||
import { eachSeries } from 'async';
|
||||
import DataWrapper from '../storage/data/DataWrapper';
|
||||
import * as http from 'http';
|
||||
import StatsClient from '../metrics/StatsClient';
|
||||
import { objectKeyByteLimit } from '../constants';
|
||||
const jsutil = require('../jsutil');
|
||||
|
||||
export type CallApiMethod = (
|
||||
methodName: string,
|
||||
|
@ -409,6 +413,7 @@ function retrieveData(
|
|||
return eachSeries(locations,
|
||||
(current, next) => data.get(current, response, log,
|
||||
(err: any, readable: http.IncomingMessage) => {
|
||||
const cbOnce = jsutil.once(next);
|
||||
// NB: readable is of IncomingMessage type
|
||||
if (err) {
|
||||
log.error('failed to get object', {
|
||||
|
@ -416,7 +421,7 @@ function retrieveData(
|
|||
method: 'retrieveData',
|
||||
});
|
||||
_destroyResponse();
|
||||
return next(err);
|
||||
return cbOnce(err);
|
||||
}
|
||||
// response.isclosed is set by the S3 server. Might happen if
|
||||
// the S3-client closes the connection before the first request
|
||||
|
@ -430,19 +435,19 @@ function retrieveData(
|
|||
// @ts-ignore
|
||||
responseErr.code = 'ResponseError';
|
||||
responseErr.message = 'response closed by client request before all data sent';
|
||||
return next(responseErr);
|
||||
return cbOnce(responseErr);
|
||||
}
|
||||
// readable stream successfully consumed
|
||||
readable.on('end', () => {
|
||||
currentStream = null;
|
||||
log.debug('readable stream end reached');
|
||||
return next();
|
||||
return cbOnce();
|
||||
});
|
||||
// errors on server side with readable stream
|
||||
readable.on('error', err => {
|
||||
log.error('error piping data from source');
|
||||
_destroyResponse();
|
||||
return next(err);
|
||||
return cbOnce(err);
|
||||
});
|
||||
currentStream = readable;
|
||||
return readable.pipe(response, { end: false });
|
||||
|
@ -689,6 +694,8 @@ export function streamUserErrorPage(
|
|||
log: RequestLogger,
|
||||
) {
|
||||
setCommonResponseHeaders(corsHeaders, response, log);
|
||||
response.setHeader('x-amz-error-code', err.message);
|
||||
response.setHeader('x-amz-error-message', err.description);
|
||||
response.writeHead(err.code, { 'Content-type': 'text/html' });
|
||||
response.on('finish', () => {
|
||||
// TODO ARSN-216 Fix logger
|
||||
|
@ -871,7 +878,7 @@ export function redirectRequest(
|
|||
}
|
||||
let redirectLocation = justPath ? `/${redirectKey}` :
|
||||
`${redirectProtocol}://${redirectHostName}/${redirectKey}`;
|
||||
if (!redirectKey && redirectLocationHeader) {
|
||||
if (!redirectKey && redirectLocationHeader && redirectLocation !== '/') {
|
||||
// remove hanging slash
|
||||
redirectLocation = redirectLocation.slice(0, -1);
|
||||
}
|
||||
|
@ -888,6 +895,52 @@ export function redirectRequest(
|
|||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* redirectRequestOnError - redirect with an error body
|
||||
* @param err - arsenal error object
|
||||
* @param method - HTTP method
|
||||
* @param routingInfo - info for routing
|
||||
* @param [routingInfo.withError] - flag to differentiate from routing rules
|
||||
* @param [routingInfo.location] - location header
|
||||
* @param dataLocations --
|
||||
* - array of locations to get streams from backend
|
||||
* @param retrieveDataParams - params to create instance of
|
||||
* data retrieval function
|
||||
* @param response - response object
|
||||
* @param corsHeaders - CORS-related response headers
|
||||
* @param log - Werelogs instance
|
||||
*/
|
||||
export function redirectRequestOnError(
|
||||
err: ArsenalError,
|
||||
method: 'HEAD' | 'GET',
|
||||
routingInfo: {
|
||||
withError: true;
|
||||
location: string;
|
||||
},
|
||||
dataLocations: { size: string | number }[] | null,
|
||||
retrieveDataParams: any,
|
||||
response: http.ServerResponse,
|
||||
corsHeaders: { [key: string]: string },
|
||||
log: RequestLogger,
|
||||
) {
|
||||
response.setHeader('Location', routingInfo.location);
|
||||
|
||||
if (!dataLocations && err.is.Found) {
|
||||
if (method === 'HEAD') {
|
||||
return errorHeaderResponse(err, response, corsHeaders, log);
|
||||
}
|
||||
response.setHeader('x-amz-error-code', err.message);
|
||||
response.setHeader('x-amz-error-message', err.description);
|
||||
return errorHtmlResponse(err, false, '', response, corsHeaders, log);
|
||||
}
|
||||
|
||||
// This is reached only for website error document (GET only)
|
||||
const overrideErrorCode = err.flatten();
|
||||
overrideErrorCode.code = 301;
|
||||
return streamUserErrorPage(ArsenalError.unflatten(overrideErrorCode)!,
|
||||
dataLocations || [], retrieveDataParams, response, corsHeaders, log);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get bucket name and object name from the request
|
||||
* @param request - http request object
|
||||
|
|
|
@ -2,6 +2,8 @@ const async = require('async');
|
|||
const PassThrough = require('stream').PassThrough;
|
||||
const assert = require('assert');
|
||||
|
||||
const { Logger } = require('werelogs');
|
||||
|
||||
const errors = require('../../errors').default;
|
||||
const MD5Sum = require('../../s3middleware/MD5Sum').default;
|
||||
const NullStream = require('../../s3middleware/nullStream').default;
|
||||
|
@ -27,6 +29,7 @@ class DataWrapper {
|
|||
this.metadata = metadata;
|
||||
this.locStorageCheckFn = locStorageCheckFn;
|
||||
this.vault = vault;
|
||||
this.logger = new Logger('DataWrapper');
|
||||
}
|
||||
|
||||
put(cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) {
|
||||
|
@ -127,7 +130,7 @@ class DataWrapper {
|
|||
}
|
||||
|
||||
delete(objectGetInfo, log, cb) {
|
||||
const callback = cb || log.end;
|
||||
const callback = cb || (() => {});
|
||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
||||
const isRequiredStringKey =
|
||||
constants.clientsRequireStringKey[this.implName];
|
||||
|
@ -176,7 +179,9 @@ class DataWrapper {
|
|||
newObjDataStoreName)) {
|
||||
return process.nextTick(cb);
|
||||
}
|
||||
log.trace('initiating batch delete', {
|
||||
const delLog = this.logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
delLog.trace('initiating batch delete', {
|
||||
keys: locations,
|
||||
implName: this.implName,
|
||||
method: 'batchDelete',
|
||||
|
@ -202,21 +207,21 @@ class DataWrapper {
|
|||
return false;
|
||||
});
|
||||
if (shouldBatchDelete && keys.length > 1) {
|
||||
return this.client.batchDelete(backendName, { keys }, log, cb);
|
||||
return this.client.batchDelete(backendName, { keys }, delLog, cb);
|
||||
}
|
||||
return async.eachLimit(locations, 5, (loc, next) => {
|
||||
process.nextTick(() => this.delete(loc, log, next));
|
||||
process.nextTick(() => this.delete(loc, delLog, next));
|
||||
},
|
||||
err => {
|
||||
if (err) {
|
||||
log.end().error('batch delete failed', { error: err });
|
||||
delLog.end().error('batch delete failed', { error: err });
|
||||
// deletion of non-existing objects result in 204
|
||||
if (err.code === 404) {
|
||||
return cb();
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
log.end().trace('batch delete successfully completed');
|
||||
delLog.end().trace('batch delete successfully completed');
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
@ -984,13 +989,14 @@ class DataWrapper {
|
|||
return this.client.delete(objectGetInfo, log.getSerializedUids(),
|
||||
err => {
|
||||
if (err) {
|
||||
if (err.is.ObjNotFound) {
|
||||
// TODO: sproxydclient and hdclient does not return standard Arsenal error yet.
|
||||
if (err.code === 404) {
|
||||
log.info('no such key in datastore', {
|
||||
objectGetInfo,
|
||||
implName: this.implName,
|
||||
moreRetries: 'no',
|
||||
});
|
||||
return cb(err);
|
||||
return cb(errors.ObjNotFound);
|
||||
}
|
||||
log.error('delete error from datastore', {
|
||||
error: err,
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
const https = require('https');
|
||||
const http = require('http');
|
||||
const { http, https } = require('httpagent');
|
||||
const url = require('url');
|
||||
const AWS = require('aws-sdk');
|
||||
const Sproxy = require('sproxydclient');
|
||||
const Hyperdrive = require('hdclient');
|
||||
const HttpsProxyAgent = require('https-proxy-agent');
|
||||
|
||||
require("aws-sdk/lib/maintenance_mode_message").suppress = true;
|
||||
|
||||
const constants = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const inMemory = require('./in_memory/datastore').backend;
|
||||
|
@ -26,8 +25,13 @@ function parseLC(config, vault) {
|
|||
if (locationObj.type === 'file') {
|
||||
clients[location] = new DataFileBackend(config);
|
||||
}
|
||||
if (locationObj.type === 'vitastor') {
|
||||
const VitastorBackend = require('./vitastor/VitastorBackend');
|
||||
clients[location] = new VitastorBackend(location, locationObj.details);
|
||||
}
|
||||
if (locationObj.type === 'scality') {
|
||||
if (locationObj.details.connector.sproxyd) {
|
||||
const Sproxy = require('sproxydclient');
|
||||
clients[location] = new Sproxy({
|
||||
bootstrap: locationObj.details.connector
|
||||
.sproxyd.bootstrap,
|
||||
|
@ -42,6 +46,7 @@ function parseLC(config, vault) {
|
|||
});
|
||||
clients[location].clientType = 'scality';
|
||||
} else if (locationObj.details.connector.hdclient) {
|
||||
const Hyperdrive = require('hdclient');
|
||||
clients[location] = new Hyperdrive.hdcontroller.HDProxydClient(
|
||||
locationObj.details.connector.hdclient);
|
||||
clients[location].clientType = 'scality';
|
||||
|
@ -77,8 +82,8 @@ function parseLC(config, vault) {
|
|||
connectionAgent = new HttpsProxyAgent(options);
|
||||
} else {
|
||||
connectionAgent = sslEnabled ?
|
||||
new https.Agent(httpAgentConfig) :
|
||||
new http.Agent(httpAgentConfig);
|
||||
new https.Agent(httpAgentConfig, { maxSockets: false }) :
|
||||
new http.Agent(httpAgentConfig, { maxSockets: false });
|
||||
}
|
||||
const httpOptions = { agent: connectionAgent, timeout: 0 };
|
||||
const s3Params = {
|
||||
|
|
|
@ -5,6 +5,7 @@ const { parseTagFromQuery } = require('../../s3middleware/tagging');
|
|||
const { externalBackendHealthCheckInterval } = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const { createLogger, checkExternalBackend } = require('./external/utils');
|
||||
const jsutil = require('../../jsutil');
|
||||
|
||||
class MultipleBackendGateway {
|
||||
constructor(clients, metadata, locStorageCheckFn) {
|
||||
|
@ -199,11 +200,12 @@ class MultipleBackendGateway {
|
|||
uploadPart(request, streamingV4Params, stream, size, location, key,
|
||||
uploadId, partNumber, bucketName, log, cb) {
|
||||
const client = this.clients[location];
|
||||
const cbOnce = jsutil.once(cb);
|
||||
|
||||
if (client.uploadPart) {
|
||||
return this.locStorageCheckFn(location, size, log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
}
|
||||
return client.uploadPart(request, streamingV4Params, stream,
|
||||
size, key, uploadId, partNumber, bucketName, log,
|
||||
|
@ -217,14 +219,14 @@ class MultipleBackendGateway {
|
|||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
});
|
||||
}
|
||||
return cb(null, partInfo);
|
||||
return cbOnce(null, partInfo);
|
||||
});
|
||||
});
|
||||
}
|
||||
return cb();
|
||||
return cbOnce();
|
||||
}
|
||||
|
||||
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||
|
|
|
@ -8,6 +8,7 @@ const getMetaHeaders =
|
|||
const { prepareStream } = require('../../../s3middleware/prepareStream');
|
||||
const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
|
||||
require('./utils');
|
||||
const jsutil = require('../../../jsutil');
|
||||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
|
@ -317,9 +318,11 @@ class AwsClient {
|
|||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
const cbOnce = jsutil.once(callback);
|
||||
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
this._vault, log, callback);
|
||||
this._vault, log, cbOnce);
|
||||
hashedStream = new MD5Sum();
|
||||
partStream.pipe(hashedStream);
|
||||
}
|
||||
|
@ -333,7 +336,7 @@ class AwsClient {
|
|||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
return cbOnce(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
|
@ -347,7 +350,7 @@ class AwsClient {
|
|||
dataStoreName: this._dataStoreName,
|
||||
dataStoreETag: noQuotesETag,
|
||||
};
|
||||
return callback(null, dataRetrievalInfo);
|
||||
return cbOnce(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -1,62 +1,109 @@
|
|||
const url = require('url');
|
||||
|
||||
const azure = require('azure-storage');
|
||||
const { BlobServiceClient, StorageSharedKeyCredential, AnonymousCredential } = require('@azure/storage-blob');
|
||||
const { ClientSecretCredential } = require('@azure/identity');
|
||||
const errors = require('../../../errors').default;
|
||||
const azureMpuUtils = require('../../../s3middleware/azureHelpers/mpuUtils').default;
|
||||
const azureMpuUtils = require('../../../s3middleware/azureHelpers/mpuUtils');
|
||||
const { validateAndFilterMpuParts } =
|
||||
require('../../../s3middleware/processMpuParts');
|
||||
|
||||
const { createLogger, logHelper, translateAzureMetaHeaders } =
|
||||
require('./utils');
|
||||
const objectUtils = require('../../../s3middleware/objectUtils');
|
||||
|
||||
const constants = require('../../../constants');
|
||||
const packageVersion = require('../../../../package.json').version;
|
||||
|
||||
azure.Constants.USER_AGENT_PRODUCT_NAME = constants.productName;
|
||||
azure.Constants.USER_AGENT_PRODUCT_VERSION = packageVersion;
|
||||
|
||||
class AzureClient {
|
||||
static addQueryParams(endpoint, token) {
|
||||
const url = new URL(endpoint);
|
||||
const query = token.startsWith('?') ? token.slice(1) : token;
|
||||
if (!url.search) {
|
||||
url.search = `?${query}`;
|
||||
} else if (url.search === '?') {
|
||||
url.search += query;
|
||||
} else {
|
||||
url.search += `&${query}`;
|
||||
}
|
||||
return url.toString();
|
||||
}
|
||||
|
||||
constructor(config) {
|
||||
this._azureStorageEndpoint = config.azureStorageEndpoint;
|
||||
this._azureStorageCredentials = config.azureStorageCredentials;
|
||||
this._azureContainerName = config.azureContainerName;
|
||||
this._client = azure.createBlobService(
|
||||
this._azureStorageCredentials.storageAccountName,
|
||||
this._azureStorageCredentials.storageAccessKey,
|
||||
this._azureStorageEndpoint);
|
||||
this._client.enableGlobalHttpAgent = true;
|
||||
const cred = (credentialsConfig => {
|
||||
switch (credentialsConfig.authMethod) {
|
||||
case 'client-secret':
|
||||
return new ClientSecretCredential(
|
||||
credentialsConfig.tenantId,
|
||||
credentialsConfig.clientId,
|
||||
credentialsConfig.clientKey,
|
||||
);
|
||||
|
||||
case 'shared-access-signature':
|
||||
this._azureStorageEndpoint = AzureClient.addQueryParams(
|
||||
this._azureStorageEndpoint, credentialsConfig.sasToken);
|
||||
return new AnonymousCredential();
|
||||
|
||||
case 'shared-key':
|
||||
default:
|
||||
return new StorageSharedKeyCredential(
|
||||
credentialsConfig.storageAccountName,
|
||||
credentialsConfig.storageAccessKey,
|
||||
);
|
||||
}
|
||||
})(this._azureStorageCredentials);
|
||||
const proxyOptions = (() => {
|
||||
if (!config.proxy || !config.proxy.url) {
|
||||
return undefined;
|
||||
}
|
||||
// NOTE: config.proxy.certs is not supported
|
||||
const parsedUrl = new URL(config.proxy.url);
|
||||
return {
|
||||
host: parsedUrl.host,
|
||||
port: parsedUrl.port || 80,
|
||||
username: parsedUrl.username || undefined,
|
||||
password: parsedUrl.password || undefined,
|
||||
};
|
||||
})();
|
||||
this._client = new BlobServiceClient(this._azureStorageEndpoint, cred, {
|
||||
keepAliveOptions: {
|
||||
enable: false, // Enable use of global HTTP agent
|
||||
},
|
||||
proxyOptions,
|
||||
userAgentOptions: {
|
||||
userAgentPrefix: `${constants.productName}/${packageVersion} `,
|
||||
},
|
||||
}).getContainerClient(this._azureContainerName);
|
||||
this._dataStoreName = config.dataStoreName;
|
||||
this._bucketMatch = config.bucketMatch;
|
||||
if (config.proxy && config.proxy.url) {
|
||||
const parsedUrl = url.parse(config.proxy.url);
|
||||
if (!parsedUrl.port) {
|
||||
parsedUrl.port = 80;
|
||||
}
|
||||
const proxyParams = parsedUrl;
|
||||
if (config.proxy.certs) {
|
||||
Object.assign(proxyParams, config.proxy.certs);
|
||||
}
|
||||
this._client.setProxy(proxyParams);
|
||||
}
|
||||
}
|
||||
|
||||
_errorWrapper(s3Method, azureMethod, args, log, cb) {
|
||||
/**
|
||||
* Run azure method call.
|
||||
* @param {string} [s3Method] S3 method name
|
||||
* @param {string} [azureMethod] Azure method name
|
||||
* @param {ErrorWrapper~Command} [command] Actual command to run
|
||||
* @param {RequestLogger} [log] Logger
|
||||
* @param {ErrorWrapper~Cb} [cb] The final callback
|
||||
* @returns {void}
|
||||
*
|
||||
* @callback ErrorWrapper~Command
|
||||
* @param {azure.ContainerClient} [client] Azure client to use
|
||||
* @returns {Promise<any>}
|
||||
*
|
||||
* @callback ErrorWrapper~Cb
|
||||
* @param {azure.ArsenalError} [arsenalErr] Error returned by the command
|
||||
* @param {any} [result] Result of Azure SDK command
|
||||
* @returns {void}
|
||||
*/
|
||||
_errorWrapper(s3Method, azureMethod, command, log, cb) {
|
||||
if (log) {
|
||||
log.info(`calling azure ${azureMethod}`);
|
||||
}
|
||||
try {
|
||||
this._client[azureMethod].apply(this._client, args);
|
||||
} catch (err) {
|
||||
const error = errors.ServiceUnavailable;
|
||||
if (log) {
|
||||
log.error('error thrown by Azure Storage Client Library',
|
||||
{ error: err.message, stack: err.stack, s3Method,
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
}
|
||||
cb(error.customizeDescription('Error from Azure ' +
|
||||
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
||||
`${err.message}`));
|
||||
log.info(`calling azure ${azureMethod} in ${s3Method}`);
|
||||
}
|
||||
command(this._client).then(
|
||||
result => cb(null, result),
|
||||
cb,
|
||||
);
|
||||
}
|
||||
|
||||
_createAzureKey(requestBucketName, requestObjectKey,
|
||||
|
@ -119,6 +166,32 @@ class AzureClient {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build Azure HTTP headers for content settings
|
||||
* @param {object} [properties] The blob properties to set.
|
||||
* @param {string} [properties.contentType] The MIME content type of the blob.
|
||||
* The default type is application/octet-stream.
|
||||
* @param {string} [properties.contentEncoding] The content encodings that have been applied
|
||||
* to the blob.
|
||||
* @param {string} [properties.contentLanguage] The natural languages used by this resource.
|
||||
* @param {string} [properties.cacheControl] The blob's cache control.
|
||||
* @param {string} [properties.contentDisposition] The blob's content disposition.
|
||||
* @param {string} [properties.contentMD5] The blob's MD5 hash.
|
||||
* @returns {BlobHTTPHeaders} The headers
|
||||
*/
|
||||
_getAzureContentSettingsHeaders(properties) {
|
||||
return {
|
||||
blobContentMD5: properties.contentMD5
|
||||
? objectUtils.getMD5Buffer(properties.contentMD5)
|
||||
: undefined,
|
||||
blobContentType: properties.contentType || undefined,
|
||||
blobCacheControl: properties.cacheControl || undefined,
|
||||
blobContentDisposition: properties.contentDisposition || undefined,
|
||||
blobContentEncoding: properties.contentEncoding || undefined,
|
||||
blobContentLanguage: properties.blobContentLanguage || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
put(stream, size, keyContext, reqUids, callback, skey, metadata) {
|
||||
const log = createLogger(reqUids);
|
||||
// before blob is put, make sure there is no ongoing MPU with same key
|
||||
|
@ -134,50 +207,59 @@ class AzureClient {
|
|||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
undefined,
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
blobHTTPHeaders: this._getAzureContentSettingsHeaders(
|
||||
keyContext || {}),
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
return this._errorWrapper('put', 'uploadData', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).upload('', 0, options);
|
||||
return azureKey;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).upload(() => stream, size, options);
|
||||
return azureKey;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Build BlobRequestConditions from azureStreamingOptions
|
||||
* @param {object} [objectGetInfoOptions] Azure streaming options
|
||||
* @param {object} [objectGetInfoOptions.accessConditions] Access conditions
|
||||
* @param {Date} [objectGetInfoOptions.accessConditions.DateUnModifiedSince] Filter objects not
|
||||
* modified since that date.
|
||||
* @returns {BlobRequestConditions} Request conditions
|
||||
*/
|
||||
_getAzureConditions(objectGetInfoOptions) {
|
||||
const accessConditions = objectGetInfoOptions.accessConditions || {};
|
||||
return {
|
||||
ifUnmodifiedSince: accessConditions.DateUnModifiedSince || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
head(objectGetInfo, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
const { key, azureStreamingOptions } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties',
|
||||
[this._azureContainerName, key, azureStreamingOptions,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
const { key } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties', async client => {
|
||||
try {
|
||||
const data = await client.getBlockBlobClient(key).getProperties();
|
||||
return data;
|
||||
} catch (err) {
|
||||
let logLevel;
|
||||
let retError;
|
||||
if (err.code === 'NotFound') {
|
||||
|
@ -185,42 +267,46 @@ class AzureClient {
|
|||
retError = errors.LocationNotFound;
|
||||
} else {
|
||||
logLevel = 'error';
|
||||
retError = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
retError = errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
logHelper(log, logLevel, 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(retError);
|
||||
throw retError;
|
||||
}
|
||||
return callback(null, data);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
get(objectGetInfo, range, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
// for backwards compatibility
|
||||
const { key, response, azureStreamingOptions } = objectGetInfo;
|
||||
let streamingOptions;
|
||||
let rangeStart = 0;
|
||||
let rangeEnd = undefined;
|
||||
if (azureStreamingOptions) {
|
||||
// option coming from api.get()
|
||||
streamingOptions = azureStreamingOptions;
|
||||
rangeStart = (typeof azureStreamingOptions.rangeStart === 'string')
|
||||
? parseInt(azureStreamingOptions.rangeStart, 10)
|
||||
: azureStreamingOptions.rangeStart;
|
||||
rangeEnd = (typeof azureStreamingOptions.rangeEnd === 'string')
|
||||
? parseInt(azureStreamingOptions.rangeEnd, 10)
|
||||
: azureStreamingOptions.rangeEnd;
|
||||
} else if (range) {
|
||||
// option coming from multipleBackend.upload()
|
||||
const rangeStart = (typeof range[0] === 'number') ? range[0].toString() : undefined;
|
||||
const rangeEnd = range[1] ? range[1].toString() : undefined;
|
||||
streamingOptions = { rangeStart, rangeEnd };
|
||||
rangeStart = (typeof range[0] === 'number') ? range[0] : 0;
|
||||
rangeEnd = range[1] || undefined;
|
||||
}
|
||||
this._errorWrapper('get', 'getBlobToStream',
|
||||
[this._azureContainerName, key, response, streamingOptions,
|
||||
err => {
|
||||
if (err) {
|
||||
this._errorWrapper('get', 'getBlobToStream', async client => {
|
||||
try {
|
||||
const rsp = await client.getBlockBlobClient(key)
|
||||
.download(rangeStart, rangeEnd - rangeStart + 1 || undefined);
|
||||
rsp.readableStreamBody.pipe(response);
|
||||
return response;
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
delete(objectGetInfo, reqUids, callback) {
|
||||
|
@ -230,44 +316,46 @@ class AzureClient {
|
|||
objectGetInfo.key;
|
||||
let options;
|
||||
if (typeof objectGetInfo === 'object') {
|
||||
options = objectGetInfo.options;
|
||||
options = {
|
||||
conditions: this._getAzureConditions(objectGetInfo.options || {}),
|
||||
};
|
||||
}
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
||||
[this._azureContainerName, key, options,
|
||||
err => {
|
||||
if (err && err.statusCode === 412) {
|
||||
return callback(errors.PreconditionFailed);
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(key).deleteIfExists(options);
|
||||
} catch (err) {
|
||||
if (err.statusCode === 412) {
|
||||
throw errors.PreconditionFailed;
|
||||
}
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
'Azure datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
logHelper(log, 'error', 'error deleting object from Azure datastore',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
healthcheck(location, callback, flightCheckOnStartUp) {
|
||||
const azureResp = {};
|
||||
const healthCheckAction = flightCheckOnStartUp ?
|
||||
'createContainerIfNotExists' : 'doesContainerExist';
|
||||
this._errorWrapper('checkAzureHealth', healthCheckAction,
|
||||
[this._azureContainerName, err => {
|
||||
/* eslint-disable no-param-reassign */
|
||||
if (err) {
|
||||
azureResp[location] = { error: err.message,
|
||||
external: true };
|
||||
return callback(null, azureResp);
|
||||
this._errorWrapper('healthcheck', 'checkAzureHealth', async client => {
|
||||
try {
|
||||
if (flightCheckOnStartUp) {
|
||||
await client.createIfNotExists();
|
||||
} else {
|
||||
await client.exists();
|
||||
}
|
||||
azureResp[location] = {
|
||||
message:
|
||||
'Congrats! You can access the Azure storage account',
|
||||
message: 'Congrats! You can access the Azure storage account',
|
||||
};
|
||||
return callback(null, azureResp);
|
||||
}], null, callback);
|
||||
} catch (err) {
|
||||
azureResp[location] = {
|
||||
error: err.message,
|
||||
external: true,
|
||||
};
|
||||
}
|
||||
return azureResp;
|
||||
}, null, callback);
|
||||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
||||
|
@ -321,9 +409,7 @@ class AzureClient {
|
|||
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
||||
contentSettings, tagging, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const commitList = {
|
||||
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
||||
};
|
||||
const commitList = jsonList.uncommittedBlocks || [];
|
||||
let filteredPartsObj;
|
||||
if (!jsonList.uncommittedBlocks) {
|
||||
const { storedParts, mpuOverviewKey, splitter } = mdInfo;
|
||||
|
@ -336,60 +422,56 @@ class AzureClient {
|
|||
// part.locations is always array of 1, which contains data info
|
||||
const subPartIds =
|
||||
azureMpuUtils.getSubPartIds(part.locations[0], uploadId);
|
||||
commitList.UncommittedBlocks.push(...subPartIds);
|
||||
commitList.push(...subPartIds);
|
||||
});
|
||||
}
|
||||
const options = {
|
||||
contentSettings,
|
||||
blobHTTPHeaders: this._getAzureContentSettingsHeaders(contentSettings || {}),
|
||||
metadata: translateAzureMetaHeaders(metaHeaders || {}, tagging),
|
||||
};
|
||||
return this._errorWrapper('completeMPU', 'commitBlocks',
|
||||
[this._azureContainerName, azureKey, commitList, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
'datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
const completeObjData = {
|
||||
return this._errorWrapper('completeMPU', 'commitBlocks', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).commitBlockList(commitList, options);
|
||||
return {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure datastore',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
azureMD.tags = JSON.stringify(objectMD.tags);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to Azure backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
objectDeleteTagging(key, bucketName, objectMD, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucketName, key, this._bucketMatch);
|
||||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata', async client => {
|
||||
try {
|
||||
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to Azure backend',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable;
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}, log, callback);
|
||||
}
|
||||
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
|
@ -406,54 +488,50 @@ class AzureClient {
|
|||
|
||||
let options;
|
||||
if (storeMetadataParams.metaHeaders) {
|
||||
options = { metadata:
|
||||
translateAzureMetaHeaders(storeMetadataParams.metaHeaders) };
|
||||
options = {
|
||||
metadata: translateAzureMetaHeaders(storeMetadataParams.metaHeaders),
|
||||
};
|
||||
}
|
||||
|
||||
this._errorWrapper('copyObject', 'startCopyBlob',
|
||||
[`${this._azureStorageEndpoint}` +
|
||||
`${sourceContainerName}/${sourceKey}`,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') {
|
||||
logHelper(log, 'error', 'Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`),
|
||||
// TODO: should we use syncCopyBlob() instead? or use poller.pollUntilDone() to wait until complete?
|
||||
this._errorWrapper('copyObject', 'startCopyBlob', async client => {
|
||||
let res;
|
||||
try {
|
||||
const poller = await client.getBlockBlobClient(destAzureKey).beginCopyFromURL(
|
||||
`${this._azureStorageEndpoint}${sourceContainerName}/${sourceKey}`,
|
||||
options,
|
||||
);
|
||||
|
||||
res = poller.getOperationState().result;
|
||||
if (res.copyProgress !== 'pending') {
|
||||
return destAzureKey;
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (res.copy.status === 'pending') {
|
||||
logHelper(log, 'error', 'Azure copy status is pending',
|
||||
} catch (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') { // TOOD: may use a constant (or type) from SDK ??
|
||||
logHelper(log, 'error',
|
||||
`Unable to access ${sourceContainerName} Azure Container`,
|
||||
err, this._dataStoreName);
|
||||
const copyId = res.copy.id;
|
||||
this._client.abortCopyBlob(this._azureContainerName,
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
'on abortCopyBlob', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`),
|
||||
);
|
||||
throw errors.AccessDenied.customizeDescription(
|
||||
`Error: Unable to access ${sourceContainerName} Azure Container`);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully'),
|
||||
);
|
||||
});
|
||||
logHelper(log, 'error', 'error from data backend on copyObject',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from AWS: ${err.message}`);
|
||||
}
|
||||
return callback(null, destAzureKey);
|
||||
}], log, callback);
|
||||
|
||||
logHelper(log, 'error', 'Azure copy status is pending', {}, this._dataStoreName);
|
||||
try {
|
||||
await client.getBlockBlobClient(destAzureKey).abortCopyFromURL(res.copyId);
|
||||
} catch (err) {
|
||||
logHelper(log, 'error', 'error from data backend on abortCopyBlob',
|
||||
err, this._dataStoreName);
|
||||
throw errors.ServiceUnavailable.customizeDescription(
|
||||
`Error returned from AWS on abortCopyBlob: ${err.message}`);
|
||||
}
|
||||
throw errors.InvalidObjectState.customizeDescription(
|
||||
'Error: Azure copy status was pending. It has been aborted successfully');
|
||||
}, log, callback);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,696 @@
|
|||
// Zenko CloudServer Vitastor data storage backend adapter
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
|
||||
const stream = require('stream');
|
||||
|
||||
const vitastor = require('vitastor');
|
||||
|
||||
const VOLUME_MAGIC = 'VstS3Vol';
|
||||
const OBJECT_MAGIC = 'VstS3Obj';
|
||||
const FLAG_DELETED = 2n;
|
||||
|
||||
type Volume = {
|
||||
id: number,
|
||||
partial_sectors: {
|
||||
[key: string]: {
|
||||
buffer: Buffer,
|
||||
refs: number,
|
||||
},
|
||||
},
|
||||
header: {
|
||||
location: string,
|
||||
bucket: string,
|
||||
max_size: number,
|
||||
create_ts: number,
|
||||
used_ts: number,
|
||||
size: number,
|
||||
objects: number,
|
||||
removed_objects: number,
|
||||
object_bytes: number,
|
||||
removed_bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
type ObjectHeader = {
|
||||
size: number,
|
||||
key: string,
|
||||
part_num?: number,
|
||||
};
|
||||
|
||||
class VitastorBackend
|
||||
{
|
||||
locationName: string;
|
||||
config: {
|
||||
pool_id: number,
|
||||
metadata_image: string,
|
||||
metadata_pool_id: number,
|
||||
metadata_inode_num: number,
|
||||
size_buckets: number[],
|
||||
size_bucket_mul: number,
|
||||
id_batch_size: number,
|
||||
sector_size: number,
|
||||
write_chunk_size: number,
|
||||
read_chunk_size: number,
|
||||
pack_objects: boolean,
|
||||
// and also other parameters for vitastor itself
|
||||
};
|
||||
next_id: number;
|
||||
alloc_id: number;
|
||||
opened: boolean;
|
||||
on_open: ((...args: any[]) => void)[] | null;
|
||||
open_error: Error | null;
|
||||
cli: any;
|
||||
kv: any;
|
||||
volumes: {
|
||||
[bucket: string]: {
|
||||
[max_size: string]: Volume,
|
||||
},
|
||||
};
|
||||
volumes_by_id: {
|
||||
[id: string]: Volume,
|
||||
};
|
||||
volume_delete_stats: {
|
||||
[id: string]: {
|
||||
count: number,
|
||||
bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
constructor(locationName, config)
|
||||
{
|
||||
this.locationName = locationName;
|
||||
this.config = config;
|
||||
// validate config
|
||||
this.config.pool_id = Number(this.config.pool_id) || 0;
|
||||
if (!this.config.pool_id)
|
||||
throw new Error('pool_id is required for Vitastor');
|
||||
if (!this.config.metadata_image && (!this.config.metadata_pool_id || !this.config.metadata_inode_num))
|
||||
throw new Error('metadata_image or metadata_inode is required for Vitastor');
|
||||
if (!this.config.size_buckets || !this.config.size_buckets.length)
|
||||
this.config.size_buckets = [ 32*1024, 128*1024, 512*1024, 2*1024, 8*1024 ];
|
||||
this.config.size_bucket_mul = Number(this.config.size_bucket_mul) || 2;
|
||||
this.config.id_batch_size = Number(this.config.id_batch_size) || 100;
|
||||
this.config.sector_size = Number(this.config.sector_size) || 0;
|
||||
if (this.config.sector_size < 4096)
|
||||
this.config.sector_size = 4096;
|
||||
this.config.write_chunk_size = Number(this.config.write_chunk_size) || 0;
|
||||
if (this.config.write_chunk_size < this.config.sector_size)
|
||||
this.config.write_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.read_chunk_size = Number(this.config.read_chunk_size) || 0;
|
||||
if (this.config.read_chunk_size < this.config.sector_size)
|
||||
this.config.read_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.pack_objects = !!this.config.pack_objects;
|
||||
// state
|
||||
this.next_id = 1;
|
||||
this.alloc_id = 0;
|
||||
this.opened = false;
|
||||
this.on_open = null;
|
||||
this.open_error = null;
|
||||
this.cli = new vitastor.Client(config);
|
||||
this.kv = new vitastor.KV(this.cli);
|
||||
// we group objects into volumes by bucket and size
|
||||
this.volumes = {};
|
||||
this.volumes_by_id = {};
|
||||
this.volume_delete_stats = {};
|
||||
}
|
||||
|
||||
async _makeVolumeId()
|
||||
{
|
||||
if (this.next_id <= this.alloc_id)
|
||||
{
|
||||
return this.next_id++;
|
||||
}
|
||||
const id_key = 'id'+this.config.pool_id;
|
||||
const [ err, prev ] = await new Promise<[ any, string ]>(ok => this.kv.get(id_key, (err, value) => ok([ err, value ])));
|
||||
if (err && err != vitastor.ENOENT)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
const new_id = (parseInt(prev) || 0) + 1;
|
||||
this.next_id = new_id;
|
||||
this.alloc_id = this.next_id + this.config.id_batch_size - 1;
|
||||
await new Promise((ok, no) => this.kv.set(id_key, this.alloc_id, err => (err ? no(new Error(err)) : ok(null)), cas_old => cas_old === prev));
|
||||
return this.next_id;
|
||||
}
|
||||
|
||||
async _getVolume(bucketName, size)
|
||||
{
|
||||
if (!this.opened)
|
||||
{
|
||||
if (this.on_open)
|
||||
{
|
||||
await new Promise(ok => this.on_open!.push(ok));
|
||||
}
|
||||
else
|
||||
{
|
||||
this.on_open = [];
|
||||
if (this.config.metadata_image)
|
||||
{
|
||||
const img = new vitastor.Image(this.cli, this.config.metadata_image);
|
||||
const info = await new Promise<{ pool_id: number, inode_num: number }>(ok => img.get_info(ok));
|
||||
this.config.metadata_pool_id = info.pool_id;
|
||||
this.config.metadata_inode_num = info.inode_num;
|
||||
}
|
||||
const kv_config = {};
|
||||
for (const key in this.config)
|
||||
{
|
||||
if (key.substr(0, 3) === 'kv_')
|
||||
kv_config[key] = this.config[key];
|
||||
}
|
||||
this.open_error = await new Promise(ok => this.kv.open(
|
||||
this.config.metadata_pool_id, this.config.metadata_inode_num,
|
||||
kv_config, err => ok(err ? new Error(err) : null)
|
||||
));
|
||||
this.opened = true;
|
||||
this.on_open.map(cb => setImmediate(cb));
|
||||
this.on_open = null;
|
||||
}
|
||||
}
|
||||
if (this.open_error)
|
||||
{
|
||||
throw this.open_error;
|
||||
}
|
||||
let i;
|
||||
for (i = 0; i < this.config.size_buckets.length && size >= this.config.size_buckets[i]; i++) {}
|
||||
let s;
|
||||
if (i < this.config.size_buckets.length)
|
||||
s = this.config.size_buckets[i];
|
||||
else if (this.config.size_bucket_mul > 1)
|
||||
{
|
||||
while (size >= s)
|
||||
s = Math.floor(this.config.size_bucket_mul * s);
|
||||
}
|
||||
if (!this.volumes[bucketName])
|
||||
{
|
||||
this.volumes[bucketName] = {};
|
||||
}
|
||||
if (this.volumes[bucketName][s])
|
||||
{
|
||||
return this.volumes[bucketName][s];
|
||||
}
|
||||
const new_id = await this._makeVolumeId();
|
||||
const new_vol = this.volumes[bucketName][s] = {
|
||||
id: new_id,
|
||||
// FIXME: partial_sectors should be written with CAS because otherwise we may lose quick deletes
|
||||
partial_sectors: {},
|
||||
header: {
|
||||
location: this.locationName,
|
||||
bucket: bucketName,
|
||||
max_size: s,
|
||||
create_ts: Date.now(),
|
||||
used_ts: Date.now(),
|
||||
size: this.config.sector_size, // initial position is right after header
|
||||
objects: 0,
|
||||
removed_objects: 0,
|
||||
object_bytes: 0,
|
||||
removed_bytes: 0,
|
||||
},
|
||||
};
|
||||
this.volumes_by_id[new_id] = new_vol;
|
||||
const header_text = JSON.stringify(this.volumes[bucketName][s].header);
|
||||
const buf = Buffer.alloc(this.config.sector_size);
|
||||
buf.write(VOLUME_MAGIC + header_text, 0);
|
||||
await new Promise((ok, no) => this.cli.write(
|
||||
this.config.pool_id, new_id, 0, buf, err => (err ? no(new Error(err)) : ok(null))
|
||||
));
|
||||
await new Promise((ok, no) => this.kv.set(
|
||||
'vol_'+this.config.pool_id+'_'+new_id, header_text, err => (err ? no(new Error(err)) : ok(null)), cas_old => !cas_old
|
||||
));
|
||||
return new_vol;
|
||||
}
|
||||
|
||||
toObjectGetInfo(objectKey, bucketName, storageLocation)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
_bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs)
|
||||
{
|
||||
if ((cur_pos % this.config.sector_size) ||
|
||||
Math.floor((cur_pos + cur_size) / this.config.sector_size) == Math.floor(cur_pos / this.config.sector_size))
|
||||
{
|
||||
const sect_pos = Math.floor(cur_pos / this.config.sector_size) * this.config.sector_size;
|
||||
const sect = vol.partial_sectors[sect_pos]
|
||||
? vol.partial_sectors[sect_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[sect_pos])
|
||||
vol.partial_sectors[sect_pos] = { buffer: sect, refs: 0 };
|
||||
vol.partial_sectors[sect_pos].refs++;
|
||||
sector_refs.push(sect_pos);
|
||||
}
|
||||
let off = cur_pos % this.config.sector_size;
|
||||
let i = 0;
|
||||
for (; i < cur_chunks.length; i++)
|
||||
{
|
||||
let copy_len = this.config.sector_size - off;
|
||||
copy_len = copy_len > cur_chunks[i].length ? cur_chunks[i].length : copy_len;
|
||||
cur_chunks[i].copy(sect, off, 0, copy_len);
|
||||
off += copy_len;
|
||||
if (copy_len < cur_chunks[i].length)
|
||||
{
|
||||
cur_chunks[i] = cur_chunks[i].slice(copy_len);
|
||||
cur_size -= copy_len;
|
||||
break;
|
||||
}
|
||||
else
|
||||
cur_size -= cur_chunks[i].length;
|
||||
}
|
||||
cur_chunks.splice(0, i, sect);
|
||||
cur_size += this.config.sector_size;
|
||||
cur_pos = sect_pos;
|
||||
}
|
||||
return [ cur_pos, cur_size ];
|
||||
}
|
||||
|
||||
_bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, write_all)
|
||||
{
|
||||
const write_pos = cur_pos;
|
||||
const write_chunks = cur_chunks;
|
||||
let write_size = cur_size;
|
||||
cur_chunks = [];
|
||||
cur_pos += cur_size;
|
||||
cur_size = 0;
|
||||
let remain = (cur_pos % this.config.sector_size);
|
||||
if (remain > 0)
|
||||
{
|
||||
cur_pos -= remain;
|
||||
let last_sect = null;
|
||||
if (write_all)
|
||||
{
|
||||
last_sect = vol.partial_sectors[cur_pos]
|
||||
? vol.partial_sectors[cur_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[cur_pos])
|
||||
vol.partial_sectors[cur_pos] = { buffer: last_sect, refs: 0 };
|
||||
vol.partial_sectors[cur_pos].refs++;
|
||||
sector_refs.push(cur_pos);
|
||||
}
|
||||
}
|
||||
write_size -= remain;
|
||||
if (write_size < 0)
|
||||
write_size = 0;
|
||||
for (let i = write_chunks.length-1; i >= 0 && remain > 0; i--)
|
||||
{
|
||||
if (write_chunks[i].length <= remain)
|
||||
{
|
||||
remain -= write_chunks[i].length;
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i]);
|
||||
write_chunks.pop();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, 0, write_chunks[i].length - remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i].slice(write_chunks[i].length - remain));
|
||||
write_chunks[i] = write_chunks[i].slice(0, write_chunks[i].length - remain);
|
||||
remain = 0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (write_all)
|
||||
{
|
||||
write_chunks.push(last_sect);
|
||||
write_size += this.config.sector_size;
|
||||
}
|
||||
}
|
||||
for (const chunk of cur_chunks)
|
||||
{
|
||||
cur_size += chunk.length;
|
||||
}
|
||||
return [ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ];
|
||||
}
|
||||
|
||||
/**
|
||||
* reqUids: string, // request-ids for log, usually joined by ':'
|
||||
* keyContext: {
|
||||
* // a lot of shit, basically all metadata
|
||||
* bucketName,
|
||||
* objectKey,
|
||||
* owner?,
|
||||
* namespace?,
|
||||
* partNumber?,
|
||||
* uploadId?,
|
||||
* metaHeaders?,
|
||||
* isDeleteMarker?,
|
||||
* tagging?,
|
||||
* contentType?,
|
||||
* cacheControl?,
|
||||
* contentDisposition?,
|
||||
* contentEncoding?,
|
||||
* },
|
||||
* callback: (error, objectGetInfo: any) => void,
|
||||
*/
|
||||
put(stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._getVolume(keyContext.bucketName, size)
|
||||
.then(vol => this._put(vol, stream, size, keyContext, reqUids, callback))
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
_put(vol, stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
const object_header: ObjectHeader = {
|
||||
size,
|
||||
key: keyContext.objectKey,
|
||||
};
|
||||
if (keyContext.partNumber)
|
||||
{
|
||||
object_header.part_num = keyContext.partNumber;
|
||||
}
|
||||
// header is: <8 bytes magic> <8 bytes flags> <8 bytes json length> <json>
|
||||
const hdr_begin_buf = Buffer.alloc(24);
|
||||
const hdr_json_buf = Buffer.from(JSON.stringify(object_header), 'utf-8');
|
||||
hdr_begin_buf.write(OBJECT_MAGIC);
|
||||
hdr_begin_buf.writeBigInt64LE(BigInt(hdr_json_buf.length), 16);
|
||||
const object_header_buf = Buffer.concat([ hdr_begin_buf, hdr_json_buf ]);
|
||||
const object_pos = vol.header.size;
|
||||
const object_get_info = { volume: vol.id, offset: object_pos, hdrlen: object_header_buf.length, size };
|
||||
let cur_pos = object_pos;
|
||||
let cur_chunks = [ object_header_buf ];
|
||||
let cur_size = object_header_buf.length;
|
||||
let err: Error|null = null;
|
||||
let waiting = 1; // 1 for end or error, 1 for each write request
|
||||
vol.header.size += object_header_buf.length + size;
|
||||
if (!this.config.pack_objects && (vol.header.size % this.config.sector_size))
|
||||
{
|
||||
vol.header.size += this.config.sector_size - (vol.header.size % this.config.sector_size);
|
||||
}
|
||||
const writeChunk = (last) =>
|
||||
{
|
||||
const sector_refs = [];
|
||||
// Handle partial beginning
|
||||
[ cur_pos, cur_size ] = this._bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs);
|
||||
// Handle partial end
|
||||
let write_pos, write_chunks, write_size;
|
||||
[ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ] = this._bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, last);
|
||||
waiting++;
|
||||
// FIXME: pool_id: maybe it should be stored in volume metadata to allow to migrate volumes?
|
||||
this.cli.write(this.config.pool_id, vol.id, write_pos, write_chunks, (res) =>
|
||||
{
|
||||
for (const sect of sector_refs)
|
||||
{
|
||||
vol.partial_sectors[sect].refs--;
|
||||
if (!vol.partial_sectors[sect].refs &&
|
||||
vol.header.size >= sect+this.config.sector_size)
|
||||
{
|
||||
// Forget partial data when it's not needed anymore
|
||||
delete(vol.partial_sectors[sect]);
|
||||
}
|
||||
}
|
||||
waiting--;
|
||||
if (res)
|
||||
{
|
||||
err = new Error(res);
|
||||
waiting--;
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, err ? null : object_get_info);
|
||||
}
|
||||
});
|
||||
};
|
||||
// Stream data
|
||||
stream.on('error', (e) =>
|
||||
{
|
||||
err = e;
|
||||
waiting--;
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, null);
|
||||
}
|
||||
});
|
||||
stream.on('end', () =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
waiting--;
|
||||
if (cur_size)
|
||||
{
|
||||
// write last chunk
|
||||
writeChunk(true);
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(null, object_get_info);
|
||||
}
|
||||
});
|
||||
stream.on('data', (chunk) =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
cur_chunks.push(chunk);
|
||||
cur_size += chunk.length;
|
||||
if (cur_size >= this.config.write_chunk_size)
|
||||
{
|
||||
// got a complete chunk, write it out
|
||||
writeChunk(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* range?: [ start, end ], // like in HTTP - first byte index, last byte index
|
||||
* callback: (error, readStream) => void,
|
||||
*/
|
||||
get(objectGetInfo, range, reqUids, callback)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const [ start, end ] = range || [];
|
||||
if (start < 0 || end < 0 || end != null && start != null && end < start || start >= objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('Invalid range: '+start+'-'+end);
|
||||
}
|
||||
let offset = objectGetInfo.key.offset + objectGetInfo.key.hdrlen + (start || 0);
|
||||
let len = objectGetInfo.key.size - (start || 0);
|
||||
if (end)
|
||||
{
|
||||
const len2 = end - (start || 0) + 1;
|
||||
if (len2 < len)
|
||||
len = len2;
|
||||
}
|
||||
callback(null, new VitastorReadStream(this.cli, objectGetInfo.key.volume, offset, len, this.config));
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* callback: (error) => void,
|
||||
*/
|
||||
delete(objectGetInfo, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._delete(objectGetInfo, reqUids)
|
||||
.then(callback)
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
async _delete(objectGetInfo, reqUids)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const in_sect_pos = (objectGetInfo.key.offset % this.config.sector_size);
|
||||
const sect_pos = objectGetInfo.key.offset - in_sect_pos;
|
||||
const vol = this.volumes_by_id[objectGetInfo.key.volume];
|
||||
if (vol && vol.partial_sectors[sect_pos])
|
||||
{
|
||||
// The sector may still be written to in corner cases
|
||||
const sect = vol.partial_sectors[sect_pos];
|
||||
const flags = sect.buffer.readBigInt64LE(in_sect_pos + 8);
|
||||
if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
const del_stat = this.volume_delete_stats[vol.id] = (this.volume_delete_stats[vol.id] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
sect.buffer.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
sect.refs++;
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, sect.buffer, ok));
|
||||
sect.refs--;
|
||||
if (err)
|
||||
{
|
||||
sect.buffer.writeBigInt64LE(0n, in_sect_pos + 8);
|
||||
throw new Error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// RMW with CAS
|
||||
const [ err, buf, version ] = await new Promise<[ any, Buffer, bigint ]>(ok => this.cli.read(
|
||||
this.config.pool_id, objectGetInfo.key.volume, sect_pos, this.config.sector_size,
|
||||
(err, buf, version) => ok([ err, buf, version ])
|
||||
));
|
||||
if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
// FIXME What if JSON crosses sector boundary? Prevent it if we want to pack objects
|
||||
const magic = buf.slice(in_sect_pos, in_sect_pos+8).toString();
|
||||
const flags = buf.readBigInt64LE(in_sect_pos+8);
|
||||
const json_len = Number(buf.readBigInt64LE(in_sect_pos+16));
|
||||
let json_hdr;
|
||||
if (in_sect_pos+24+json_len <= buf.length)
|
||||
{
|
||||
try
|
||||
{
|
||||
json_hdr = JSON.parse(buf.slice(in_sect_pos+24, in_sect_pos+24+json_len).toString());
|
||||
}
|
||||
catch (e)
|
||||
{
|
||||
}
|
||||
}
|
||||
if (magic !== OBJECT_MAGIC || !json_hdr || json_hdr.size !== objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error(
|
||||
'header of object with size '+objectGetInfo.key.size+
|
||||
' bytes not found in volume '+objectGetInfo.key.volume+' at '+objectGetInfo.key.offset
|
||||
);
|
||||
}
|
||||
else if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
buf.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, buf, { version: version+1n }, ok));
|
||||
if (err == vitastor.EINTR)
|
||||
{
|
||||
// Retry
|
||||
await this._delete(objectGetInfo, reqUids);
|
||||
}
|
||||
else if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
else
|
||||
{
|
||||
// FIXME: Write deletion statistics to volumes
|
||||
// FIXME: Implement defragmentation
|
||||
const del_stat = this.volume_delete_stats[objectGetInfo.key.volume] = (this.volume_delete_stats[objectGetInfo.key.volume] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* config: full zenko server config,
|
||||
* callback: (error, stats) => void, // stats is the returned statistics in arbitrary format
|
||||
*/
|
||||
getDiskUsage(config, reqUids, callback)
|
||||
{
|
||||
// FIXME: Iterate all volumes and return its sizes and deletion statistics, or maybe just sizes
|
||||
callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
class VitastorReadStream extends stream.Readable
|
||||
{
|
||||
constructor(cli, volume_id, offset, len, config, options = undefined)
|
||||
{
|
||||
super(options);
|
||||
this.cli = cli;
|
||||
this.volume_id = volume_id;
|
||||
this.offset = offset;
|
||||
this.end = offset + len;
|
||||
this.pos = offset;
|
||||
this.config = config;
|
||||
this._reading = false;
|
||||
}
|
||||
|
||||
_read(n)
|
||||
{
|
||||
if (this._reading)
|
||||
{
|
||||
return;
|
||||
}
|
||||
// FIXME: Validate object header
|
||||
const chunk_size = n && this.config.read_chunk_size < n ? n : this.config.read_chunk_size;
|
||||
const read_offset = this.pos;
|
||||
const round_offset = read_offset - (read_offset % this.config.sector_size);
|
||||
let read_end = this.end <= read_offset+chunk_size ? this.end : read_offset+chunk_size;
|
||||
const round_end = (read_end % this.config.sector_size)
|
||||
? read_end + this.config.sector_size - (read_end % this.config.sector_size)
|
||||
: read_end;
|
||||
if (round_end <= this.end)
|
||||
read_end = round_end;
|
||||
this.pos = read_end;
|
||||
if (read_end <= read_offset)
|
||||
{
|
||||
// EOF
|
||||
this.push(null);
|
||||
return;
|
||||
}
|
||||
this._reading = true;
|
||||
this.cli.read(this.config.pool_id, this.volume_id, round_offset, round_end-round_offset, (err, buf, version) =>
|
||||
{
|
||||
this._reading = false;
|
||||
if (err)
|
||||
{
|
||||
this.destroy(new Error(err));
|
||||
return;
|
||||
}
|
||||
if (read_offset != round_offset || round_end != read_end)
|
||||
{
|
||||
buf = buf.subarray(read_offset-round_offset, buf.length-(round_end-read_end));
|
||||
}
|
||||
if (this.push(buf))
|
||||
{
|
||||
this._read(n);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function once(callback)
|
||||
{
|
||||
let called = false;
|
||||
return function()
|
||||
{
|
||||
if (!called)
|
||||
{
|
||||
called = true;
|
||||
callback.apply(null, arguments);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = VitastorBackend;
|
|
@ -51,6 +51,36 @@ function _parseListEntries(entries) {
|
|||
});
|
||||
}
|
||||
|
||||
/** _parseLifecycleListEntries - parse the values returned in a lifeycle listing by metadata
|
||||
* @param {object[]} entries - Version or Content entries in a metadata listing
|
||||
* @param {string} entries[].key - metadata key
|
||||
* @param {string} entries[].value - stringified object metadata
|
||||
* @return {object} - mapped array with parsed value or JSON parsing err
|
||||
*/
|
||||
function _parseLifecycleListEntries(entries) {
|
||||
return entries.map(entry => {
|
||||
const tmp = JSON.parse(entry.value);
|
||||
return {
|
||||
key: entry.key,
|
||||
value: {
|
||||
Size: tmp['content-length'],
|
||||
ETag: tmp['content-md5'],
|
||||
VersionId: tmp.versionId,
|
||||
IsNull: tmp.isNull,
|
||||
LastModified: tmp['last-modified'],
|
||||
Owner: {
|
||||
DisplayName: tmp['owner-display-name'],
|
||||
ID: tmp['owner-id'],
|
||||
},
|
||||
StorageClass: tmp['x-amz-storage-class'],
|
||||
tags: tmp.tags,
|
||||
staleDate: tmp.staleDate,
|
||||
dataStoreName: tmp.dataStoreName,
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/** parseListEntries - parse the values returned in a listing by metadata
|
||||
* @param {object[]} entries - Version or Content entries in a metadata listing
|
||||
* @param {string} entries[].key - metadata key
|
||||
|
@ -147,6 +177,42 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
updateBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, capability, log, cb) {
|
||||
log.debug('updating bucket capabilities in metadata');
|
||||
// When concurrency update is not supported, we update the whole bucket metadata
|
||||
if (!this.client.putBucketAttributesCapabilities) {
|
||||
return this.updateBucket(bucketName, bucketMD, log, cb);
|
||||
}
|
||||
return this.client.putBucketAttributesCapabilities(bucketName, capabilityName, capacityField, capability,
|
||||
log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
log.trace('bucket capabilities updated in metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
deleteBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, log, cb) {
|
||||
log.debug('deleting bucket capabilities in metadata');
|
||||
// When concurrency update is not supported, we update the whole bucket metadata
|
||||
if (!this.client.deleteBucketAttributesCapability) {
|
||||
return this.updateBucket(bucketName, bucketMD, log, cb);
|
||||
}
|
||||
return this.client.deleteBucketAttributesCapability(bucketName, capabilityName, capacityField,
|
||||
log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
log.trace('bucket capabilities deleted in metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
getBucket(bucketName, log, cb) {
|
||||
log.debug('getting bucket from metadata');
|
||||
this.client.getBucketAttributes(bucketName, log, (err, data) => {
|
||||
|
@ -160,6 +226,19 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
getBucketQuota(bucketName, log, cb) {
|
||||
log.debug('getting bucket quota from metadata');
|
||||
this.client.getBucketAttributes(bucketName, log, (err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const bucketInfo = BucketInfo.fromObj(data);
|
||||
return cb(err, { quota: bucketInfo.getQuota() });
|
||||
});
|
||||
}
|
||||
|
||||
deleteBucket(bucketName, log, cb) {
|
||||
log.debug('deleting bucket from metadata');
|
||||
this.client.deleteBucket(bucketName, log, err => {
|
||||
|
@ -213,6 +292,25 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
getObjectsMD(bucketName, objNamesWithParams, log, cb) {
|
||||
if (typeof this.client.getObjects !== 'function') {
|
||||
log.debug('backend does not support get object metadata with batching', {
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
log.debug('getting objects from metadata', { objects: objNamesWithParams });
|
||||
return this.client.getObjects(bucketName, objNamesWithParams, log, (err, data) => {
|
||||
if (err) {
|
||||
log.debug('error getting objects from metadata', { implName: this.implName, objects: objNamesWithParams,
|
||||
err });
|
||||
return cb(err);
|
||||
}
|
||||
log.debug('objects retrieved from metadata', { objects: objNamesWithParams });
|
||||
return cb(err, data);
|
||||
});
|
||||
}
|
||||
|
||||
getObjectMD(bucketName, objName, params, log, cb) {
|
||||
log.debug('getting object from metadata');
|
||||
this.client.getObject(bucketName, objName, params, log, (err, data) => {
|
||||
|
@ -226,7 +324,7 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
deleteObjectMD(bucketName, objName, params, log, cb) {
|
||||
deleteObjectMD(bucketName, objName, params, log, cb, originOp = 's3:ObjectRemoved:Delete') {
|
||||
log.debug('deleting object from metadata');
|
||||
this.client.deleteObject(bucketName, objName, params, log, err => {
|
||||
if (err) {
|
||||
|
@ -236,7 +334,7 @@ class MetadataWrapper {
|
|||
}
|
||||
log.debug('object deleted from metadata');
|
||||
return cb(err);
|
||||
});
|
||||
}, originOp);
|
||||
}
|
||||
|
||||
listObject(bucketName, listingParams, log, cb) {
|
||||
|
@ -279,6 +377,29 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
listLifecycleObject(bucketName, listingParams, log, cb) {
|
||||
log.debug('getting object listing for lifecycle from metadata');
|
||||
this.client.listLifecycleObject(bucketName, listingParams, log, (err, data) => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { implName: this.implName,
|
||||
err });
|
||||
return cb(err);
|
||||
}
|
||||
log.debug('object listing for lifecycle retrieved from metadata');
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
data.Contents = parseListEntries(data.Contents, _parseLifecycleListEntries);
|
||||
if (data.Contents instanceof Error) {
|
||||
log.error('error parsing metadata listing for lifecycle', {
|
||||
error: data.Contents,
|
||||
listingType: listingParams.listingType,
|
||||
method: 'listLifecycleObject',
|
||||
});
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return cb(null, data);
|
||||
});
|
||||
}
|
||||
|
||||
listMultipartUploads(bucketName, listingParams, log, cb) {
|
||||
this.client.listMultipartUploads(bucketName, listingParams, log,
|
||||
(err, data) => {
|
||||
|
@ -427,6 +548,139 @@ class MetadataWrapper {
|
|||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Put bucket indexes
|
||||
*
|
||||
* indexSpec format:
|
||||
* [
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
|
||||
* ...
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
|
||||
* ]
|
||||
*
|
||||
*
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {Array<Object>} indexSpecs index specification
|
||||
* @param {Object} log logger
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
putBucketIndexes(bucketName, indexSpecs, log, cb) {
|
||||
log.debug('put bucket indexes');
|
||||
|
||||
if (typeof this.client.putBucketIndexes !== 'function') {
|
||||
log.error('error from metadata', {
|
||||
method: 'putBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.putBucketIndexes(bucketName, indexSpecs, log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'putBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Delete bucket indexes
|
||||
*
|
||||
* indexSpec format:
|
||||
* [
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
|
||||
* ...
|
||||
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
|
||||
* ]
|
||||
*
|
||||
*
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {Array<Object>} indexSpecs index specification
|
||||
* @param {Object} log logger
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
deleteBucketIndexes(bucketName, indexSpecs, log, cb) {
|
||||
log.debug('delete bucket indexes');
|
||||
|
||||
if (typeof this.client.deleteBucketIndexes !== 'function') {
|
||||
log.error('error from metadata', {
|
||||
method: 'deleteBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.deleteBucketIndexes(bucketName, indexSpecs, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', {
|
||||
method: 'deleteBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
getBucketIndexes(bucketName, log, cb) {
|
||||
log.debug('get bucket indexes');
|
||||
|
||||
if (typeof this.client.getBucketIndexes !== 'function') {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.getBucketIndexes(bucketName, log, (err, res) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
getIndexingJobs(log, cb) {
|
||||
if (typeof this.client.getIndexingJobs !== 'function') {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getIndexingJobs',
|
||||
error: errors.NotImplemented,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
}
|
||||
|
||||
return this.client.getIndexingJobs(log, (err, res) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', {
|
||||
method: 'getBucketIndexes',
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = MetadataWrapper;
|
||||
|
|
|
@ -110,6 +110,17 @@ class BucketClientInterface {
|
|||
return null;
|
||||
}
|
||||
|
||||
listLifecycleObject(bucketName, params, log, cb) {
|
||||
this.client.listObject(bucketName, log.getSerializedUids(), params,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, JSON.parse(data));
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
listMultipartUploads(bucketName, params, log, cb) {
|
||||
this.client.listObject(bucketName, log.getSerializedUids(), params,
|
||||
(err, data) => {
|
||||
|
|
|
@ -325,6 +325,10 @@ class BucketFileInterface {
|
|||
return this.internalListObject(bucketName, params, log, cb);
|
||||
}
|
||||
|
||||
listLifecycleObject(bucketName, params, log, cb) {
|
||||
return this.internalListObject(bucketName, params, log, cb);
|
||||
}
|
||||
|
||||
listMultipartUploads(bucketName, params, log, cb) {
|
||||
return this.internalListObject(bucketName, params, log, cb);
|
||||
}
|
||||
|
|
|
@ -318,6 +318,10 @@ const metastore = {
|
|||
});
|
||||
},
|
||||
|
||||
listLifecycleObject(bucketName, params, log, cb) {
|
||||
return process.nextTick(cb, errors.NotImplemented);
|
||||
},
|
||||
|
||||
listMultipartUploads(bucketName, listingParams, log, cb) {
|
||||
process.nextTick(() => {
|
||||
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
|
||||
|
|
|
@ -108,9 +108,26 @@ class ListRecordStream extends stream.Readable {
|
|||
if (value && value.tags) {
|
||||
value.tags = unescape(value.tags);
|
||||
}
|
||||
entry = {
|
||||
type: 'put', // updates overwrite the whole metadata,
|
||||
// updates overwrite the whole metadata,
|
||||
// so they are considered as puts
|
||||
let type = 'put';
|
||||
// When the object metadata contain the "deleted"
|
||||
// flag, it means that the operation is the update
|
||||
// we perform before the deletion of an object. We
|
||||
// perform the update to keep all the metadata in the
|
||||
// oplog. This update is what will be used by backbeat
|
||||
// as the delete operation so we put the type of operation
|
||||
// for this event to a delete.
|
||||
// Backbeat still receives the actual delete operations
|
||||
// but they are ignored as they don't contain any metadata.
|
||||
// The delete operations are kept in case we want to listen
|
||||
// to delete events comming from special collections other
|
||||
// than "bucket" collections.
|
||||
if (value && value.deleted) {
|
||||
type = 'delete';
|
||||
}
|
||||
entry = {
|
||||
type,
|
||||
key: itemObj.o2._id,
|
||||
// updated value may be either stored directly in 'o'
|
||||
// attribute or in '$set' attribute (supposedly when
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -143,13 +143,13 @@ on top of the delete marker or if the delete marker is deleted.
|
|||
##### Format of master keys
|
||||
|
||||
```
|
||||
\x7fM{{key}
|
||||
\x7fM{{key}}
|
||||
```
|
||||
|
||||
##### Format of version keys
|
||||
|
||||
```
|
||||
\x7fV{{key}\x00{versionId}
|
||||
\x7fV{{key}}\x00{{versionId}}
|
||||
```
|
||||
|
||||
##### Sizing considerations
|
||||
|
@ -272,7 +272,61 @@ The mongoclient backend implements a readable key/value stream called
|
|||
in Arsenal/lib/algos listing algorithms. Note it does not require any
|
||||
LevelDB package.
|
||||
|
||||
A versioned object in MongoDB is stored using two types of documents:
|
||||
a version document for each version of the object, and a master document
|
||||
containing the data of the latest version of the object.
|
||||
|
||||
When deleting the latest version of an object, the master document is replaced
|
||||
by a temporary placeholder document (marked with `isPHD: true` in its metadata)
|
||||
that gets resolved later with the data
|
||||
of the new latest version of the object.
|
||||
|
||||
Listing a bucket’s objects (not versions) is done in two steps.
|
||||
First, all master documents are listed internally
|
||||
(these can contain the placeholder documents).
|
||||
Next, the placeholder documents are resolved by listing
|
||||
the object’s versions and returning the latest one’s data.
|
||||
|
||||
#### Generating the UUID
|
||||
|
||||
To avoid race conditions we always (try to) generate a new UUID and we
|
||||
condition the insertion to the non-existence of the document.
|
||||
|
||||
#### Oplog Events
|
||||
|
||||
The oplog (operations log) is a special capped collection that keeps a
|
||||
rolling record of all operations that modify the data stored in the database.
|
||||
|
||||
The oplog is read and used by multiple backbeat extensions such as replication
|
||||
and bucket notification to react to database changes.
|
||||
|
||||
oplog events have different types such as insertion, update and deletion.
|
||||
Within an oplog event we find the changes that occured at object level or
|
||||
collection level.
|
||||
|
||||
At object level if an object was inserted for example we'll find all of the object's
|
||||
data in the event. Update events contain the change that occured to an object, and
|
||||
delete events don't contain anything (only the object's _id).
|
||||
|
||||
To keep the latest object's metadata before it got deleted in the oplog, we update
|
||||
the full object setting a deletion flag before deleting the object. This adds an
|
||||
update event containing all of the object's metadata.
|
||||
|
||||
Note: updating the object with itself without change creates a no-op event that gets
|
||||
ignored.
|
||||
|
||||
**Special Cases:**
|
||||
When deleting the last version of an object in a versioned bucket, the master object
|
||||
goes into a PHD state (temporary non updated state) that gets resolved later. There
|
||||
is one case where a master can be non existent, it's where the last version is a
|
||||
delete marker. In this case the created PHD master is empty only containing the
|
||||
isPHD flag and the versionId.
|
||||
|
||||
When versioning is enabled in a bucket, only version events should be processed as
|
||||
they already contain all of the information needed.
|
||||
|
||||
In non versioned buckets, master object events are the ones to be processed.
|
||||
|
||||
In versioning suspended buckets, both master and version events should be processed,
|
||||
as the master object itself is considered a null version. No special case is present
|
||||
here as the master object is always present.
|
||||
|
|
|
@ -55,15 +55,38 @@ class MongoReadStream extends Readable {
|
|||
}
|
||||
}
|
||||
|
||||
if (options.lastModified) {
|
||||
query['value.last-modified'] = {};
|
||||
|
||||
if (options.lastModified.lt) {
|
||||
query['value.last-modified'].$lt = options.lastModified.lt;
|
||||
}
|
||||
}
|
||||
|
||||
if (options.dataStoreName) {
|
||||
query['value.dataStoreName'] = {};
|
||||
|
||||
if (options.dataStoreName.ne) {
|
||||
query['value.dataStoreName'].$ne = options.dataStoreName.ne;
|
||||
}
|
||||
}
|
||||
|
||||
if (!Object.keys(query._id).length) {
|
||||
delete query._id;
|
||||
}
|
||||
|
||||
// filtering out objects flagged for deletion
|
||||
query.$or = [
|
||||
{ 'value.deleted': { $exists: false } },
|
||||
{ 'value.deleted': { $eq: false } },
|
||||
];
|
||||
|
||||
if (searchOptions) {
|
||||
Object.assign(query, searchOptions);
|
||||
}
|
||||
|
||||
this._cursor = c.find(query).sort({
|
||||
const projection = { 'value.location': 0 };
|
||||
this._cursor = c.find(query, { projection }).sort({
|
||||
_id: options.reverse ? -1 : 1,
|
||||
});
|
||||
if (options.limit && options.limit !== -1) {
|
||||
|
@ -79,15 +102,10 @@ class MongoReadStream extends Readable {
|
|||
return;
|
||||
}
|
||||
|
||||
this._cursor.next((err, doc) => {
|
||||
this._cursor.next().then(doc => {
|
||||
if (this._destroyed) {
|
||||
return;
|
||||
}
|
||||
if (err) {
|
||||
this.emit('error', err);
|
||||
return;
|
||||
}
|
||||
|
||||
let key = undefined;
|
||||
let value = undefined;
|
||||
|
||||
|
@ -111,6 +129,12 @@ class MongoReadStream extends Readable {
|
|||
value,
|
||||
});
|
||||
}
|
||||
}).catch(err => {
|
||||
if (this._destroyed) {
|
||||
return;
|
||||
}
|
||||
this.emit('error', err);
|
||||
return;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -120,7 +144,7 @@ class MongoReadStream extends Readable {
|
|||
}
|
||||
this._destroyed = true;
|
||||
|
||||
this._cursor.close(err => {
|
||||
this._cursor.close().catch(err => {
|
||||
if (err) {
|
||||
this.emit('error', err);
|
||||
return;
|
||||
|
|
|
@ -185,6 +185,48 @@ function formatVersionKey(key, versionId, vFormat) {
|
|||
return formatVersionKeyV0(key, versionId);
|
||||
}
|
||||
|
||||
function indexFormatMongoArrayToObject(mongoIndexArray) {
|
||||
const indexObj = [];
|
||||
|
||||
for (const idx of mongoIndexArray) {
|
||||
const keys = [];
|
||||
let entries = [];
|
||||
|
||||
if (idx.key instanceof Map) {
|
||||
entries = idx.key.entries();
|
||||
} else {
|
||||
entries = Object.entries(idx.key);
|
||||
}
|
||||
|
||||
for (const k of entries) {
|
||||
keys.push({ key: k[0], order: k[1] });
|
||||
}
|
||||
|
||||
indexObj.push({ name: idx.name, keys });
|
||||
}
|
||||
|
||||
return indexObj;
|
||||
}
|
||||
|
||||
|
||||
function indexFormatObjectToMongoArray(indexObj) {
|
||||
const mongoIndexArray = [];
|
||||
|
||||
for (const idx of indexObj) {
|
||||
const key = new Map();
|
||||
|
||||
for (const k of idx.keys) {
|
||||
key.set(k.key, k.order);
|
||||
}
|
||||
|
||||
// copy all field except keys from idx
|
||||
// eslint-disable-next-line
|
||||
const { keys: _, ...toCopy } = idx;
|
||||
mongoIndexArray.push(Object.assign(toCopy, { name: idx.name, key }));
|
||||
}
|
||||
|
||||
return mongoIndexArray;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
credPrefix,
|
||||
|
@ -195,4 +237,6 @@ module.exports = {
|
|||
translateConditions,
|
||||
formatMasterKey,
|
||||
formatVersionKey,
|
||||
indexFormatMongoArrayToObject,
|
||||
indexFormatObjectToMongoArray,
|
||||
};
|
||||
|
|
|
@ -10,21 +10,21 @@ function trySetDirSyncFlag(path) {
|
|||
|
||||
const GETFLAGS = 2148034049;
|
||||
const SETFLAGS = 1074292226;
|
||||
const FS_DIRSYNC_FL = 65536;
|
||||
const FS_DIRSYNC_FL = 65536n;
|
||||
const buffer = Buffer.alloc(8, 0);
|
||||
const pathFD = fs.openSync(path, 'r');
|
||||
const status = ioctl(pathFD, GETFLAGS, buffer);
|
||||
assert.strictEqual(status, 0);
|
||||
const currentFlags = buffer.readUIntLE(0, 8);
|
||||
const currentFlags = buffer.readBigInt64LE(0);
|
||||
const flags = currentFlags | FS_DIRSYNC_FL;
|
||||
buffer.writeUIntLE(flags, 0, 8);
|
||||
buffer.writeBigInt64LE(flags, 0);
|
||||
const status2 = ioctl(pathFD, SETFLAGS, buffer);
|
||||
assert.strictEqual(status2, 0);
|
||||
fs.closeSync(pathFD);
|
||||
const pathFD2 = fs.openSync(path, 'r');
|
||||
const confirmBuffer = Buffer.alloc(8, 0);
|
||||
ioctl(pathFD2, GETFLAGS, confirmBuffer);
|
||||
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
|
||||
assert.strictEqual(confirmBuffer.readBigInt64LE(0),
|
||||
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
|
||||
fs.closeSync(pathFD2);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ import { VersioningConstants } from './constants';
|
|||
const VID_SEP = VersioningConstants.VersionId.Separator;
|
||||
/**
|
||||
* Class for manipulating an object version.
|
||||
* The format of a version: { isNull, isDeleteMarker, versionId, otherInfo }
|
||||
* The format of a version: { isNull, isNull2, isDeleteMarker, versionId, otherInfo }
|
||||
*
|
||||
* @note Some of these functions are optimized based on string search
|
||||
* prior to a full JSON parse/stringify. (Vinh: 18K op/s are achieved
|
||||
|
@ -13,24 +13,31 @@ const VID_SEP = VersioningConstants.VersionId.Separator;
|
|||
export class Version {
|
||||
version: {
|
||||
isNull?: boolean;
|
||||
isNull2?: boolean;
|
||||
isDeleteMarker?: boolean;
|
||||
versionId?: string;
|
||||
isPHD?: boolean;
|
||||
nullVersionId?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new version instantiation from its data object.
|
||||
* @param version - the data object to instantiate
|
||||
* @param version.isNull - is a null version
|
||||
* @param version.isNull2 - Whether new version is null or not AND has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352)
|
||||
* @param version.isDeleteMarker - is a delete marker
|
||||
* @param version.versionId - the version id
|
||||
* @constructor
|
||||
*/
|
||||
constructor(version?: {
|
||||
isNull?: boolean;
|
||||
isNull2?: boolean;
|
||||
isDeleteMarker?: boolean;
|
||||
versionId?: string;
|
||||
isPHD?: boolean;
|
||||
nullVersionId?: string;
|
||||
}) {
|
||||
this.version = version || {};
|
||||
}
|
||||
|
@ -83,6 +90,33 @@ export class Version {
|
|||
return `{ "isPHD": true, "versionId": "${versionId}" }`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a key-value pair to a JSON object represented as a string. It adds
|
||||
* a comma if the object is not empty (i.e., not just '{}'). It assumes the input
|
||||
* string is formatted as a JSON object.
|
||||
*
|
||||
* @param {string} stringifiedObject The JSON object as a string to which the key-value pair will be appended.
|
||||
* @param {string} key The key to append to the JSON object.
|
||||
* @param {string} value The value associated with the key to append to the JSON object.
|
||||
* @returns {string} The updated JSON object as a string with the new key-value pair appended.
|
||||
* @example
|
||||
* _jsonAppend('{"existingKey":"existingValue"}', 'newKey', 'newValue');
|
||||
* // returns '{"existingKey":"existingValue","newKey":"newValue"}'
|
||||
*/
|
||||
static _jsonAppend(stringifiedObject: string, key: string, value: string): string {
|
||||
// stringifiedObject value has the format of '{...}'
|
||||
let index = stringifiedObject.length - 2;
|
||||
while (stringifiedObject.charAt(index) === ' ') {
|
||||
index -= 1;
|
||||
}
|
||||
const needComma = stringifiedObject.charAt(index) !== '{';
|
||||
return (
|
||||
`${stringifiedObject.slice(0, stringifiedObject.length - 1)}` +
|
||||
(needComma ? ',' : '') +
|
||||
`"${key}":"${value}"}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Put versionId into an object in the (cheap) way of string manipulation,
|
||||
* instead of the more expensive alternative parsing and stringification.
|
||||
|
@ -93,14 +127,32 @@ export class Version {
|
|||
*/
|
||||
static appendVersionId(value: string, versionId: string): string {
|
||||
// assuming value has the format of '{...}'
|
||||
let index = value.length - 2;
|
||||
while (value.charAt(index--) === ' ');
|
||||
const comma = value.charAt(index + 1) !== '{';
|
||||
return (
|
||||
`${value.slice(0, value.length - 1)}` + // eslint-disable-line
|
||||
(comma ? ',' : '') +
|
||||
`"versionId":"${versionId}"}`
|
||||
);
|
||||
return Version._jsonAppend(value, 'versionId', versionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates or appends a `nullVersionId` property to a JSON-formatted string.
|
||||
* This function first checks if the `nullVersionId` property already exists within the input string.
|
||||
* If it exists, the function updates the `nullVersionId` with the new value provided.
|
||||
* If it does not exist, the function appends a `nullVersionId` property with the provided value.
|
||||
*
|
||||
* @static
|
||||
* @param {string} value - The JSON-formatted string that may already contain a `nullVersionId` property.
|
||||
* @param {string} nullVersionId - The new value for the `nullVersionId` property to be updated or appended.
|
||||
* @returns {string} The updated JSON-formatted string with the new `nullVersionId` value.
|
||||
*/
|
||||
static updateOrAppendNullVersionId(value: string, nullVersionId: string): string {
|
||||
// Check if "nullVersionId" already exists in the string
|
||||
const nullVersionIdPattern = /"nullVersionId":"[^"]*"/;
|
||||
const nullVersionIdExists = nullVersionIdPattern.test(value);
|
||||
|
||||
if (nullVersionIdExists) {
|
||||
// Replace the existing nullVersionId with the new one
|
||||
return value.replace(nullVersionIdPattern, `"nullVersionId":"${nullVersionId}"`);
|
||||
} else {
|
||||
// Append nullVersionId
|
||||
return Version._jsonAppend(value, 'nullVersionId', nullVersionId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -121,6 +173,19 @@ export class Version {
|
|||
return this.version.isNull ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a version is a null version and has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352).
|
||||
*
|
||||
* @return - stating if the value is a null version and has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352).
|
||||
*/
|
||||
isNull2Version(): boolean {
|
||||
return this.version.isNull2 ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a stringified object is a delete marker.
|
||||
*
|
||||
|
@ -190,6 +255,19 @@ export class Version {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark that the null version has been put with a Cloudserver handling null keys (i.e. supporting S3C-7352)
|
||||
*
|
||||
* If `isNull2` is set, `isNull` is also set to maintain consistency.
|
||||
* Explicitly setting both avoids misunderstandings and mistakes in future updates or fixes.
|
||||
* @return - the updated version
|
||||
*/
|
||||
setNull2Version() {
|
||||
this.version.isNull2 = true;
|
||||
this.version.isNull = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize the version.
|
||||
*
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import { Version } from './Version';
|
||||
import { generateVersionId as genVID } from './VersionID';
|
||||
import { generateVersionId as genVID, getInfVid } from './VersionID';
|
||||
import WriteCache from './WriteCache';
|
||||
import WriteGatheringManager from './WriteGatheringManager';
|
||||
|
||||
|
@ -22,11 +24,11 @@ function getPrefixUpperBoundary(prefix: string): string {
|
|||
return prefix;
|
||||
}
|
||||
|
||||
function formatVersionKey(key: string, versionId: string) {
|
||||
function formatVersionKey(key: string, versionId: string): string {
|
||||
return `${key}${VID_SEP}${versionId}`;
|
||||
}
|
||||
|
||||
function formatCacheKey(db: string, key: string) {
|
||||
function formatCacheKey(db: string, key: string): string {
|
||||
// using double VID_SEP to make sure the cache key is unique
|
||||
return `${db}${VID_SEP}${VID_SEP}${key}`;
|
||||
}
|
||||
|
@ -89,8 +91,10 @@ export default class VersioningRequestProcessor {
|
|||
callback: (error: ArsenalError | null, data?: any) => void,
|
||||
) {
|
||||
const { db, key, options } = request;
|
||||
logger.addDefaultFields({ bucket: db, key, options });
|
||||
if (options && options.versionId) {
|
||||
const versionKey = formatVersionKey(key, options.versionId);
|
||||
const keyVersionId = options.versionId === 'null' ? '' : options.versionId;
|
||||
const versionKey = formatVersionKey(key, keyVersionId);
|
||||
return this.wgm.get({ db, key: versionKey }, logger, callback);
|
||||
}
|
||||
return this.wgm.get(request, logger, (err, data) => {
|
||||
|
@ -101,13 +105,82 @@ export default class VersioningRequestProcessor {
|
|||
if (!Version.isPHD(data)) {
|
||||
return callback(null, data);
|
||||
}
|
||||
logger.debug('master version is a PHD, getting the latest version',
|
||||
{ db, key });
|
||||
logger.debug('master version is a PHD, getting the latest version');
|
||||
// otherwise, need to search for the latest version
|
||||
return this.getByListing(request, logger, callback);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper that lists version keys for a certain object key,
|
||||
* sorted by version ID. If a null key exists for this object, it is
|
||||
* sorted at the appropriate position by its internal version ID and
|
||||
* its key will be appended its internal version ID.
|
||||
*
|
||||
* @param {string} db - bucket name
|
||||
* @param {string} key - object key
|
||||
* @param {object} [options] - options object
|
||||
* @param {number} [options.limit] - max version keys returned
|
||||
* (returns all object version keys if not specified)
|
||||
* @param {object} logger - logger of the request
|
||||
* @param {function} callback - callback(err, {object|null} master, {array} versions)
|
||||
* master: { key, value }
|
||||
* versions: [{ key, value }, ...]
|
||||
* @return {undefined}
|
||||
*/
|
||||
listVersionKeys(db, key, options, logger, callback) {
|
||||
const { limit } = options || {};
|
||||
const listingParams: any = {};
|
||||
let nullKeyLength;
|
||||
// include master key in v0 listing
|
||||
listingParams.gte = key;
|
||||
listingParams.lt = `${key}${VID_SEPPLUS}`;
|
||||
if (limit !== undefined) {
|
||||
// may have to skip master + null key, so 2 extra to list in the worst case
|
||||
listingParams.limit = limit + 2;
|
||||
}
|
||||
nullKeyLength = key.length + 1;
|
||||
return this.wgm.list({
|
||||
db,
|
||||
params: listingParams,
|
||||
}, logger, (err, rawVersions) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (rawVersions.length === 0) {
|
||||
// object does not have any version key
|
||||
return callback(null, null, []);
|
||||
}
|
||||
let versions = rawVersions;
|
||||
let master;
|
||||
// in v0 there is always a master key before versions
|
||||
master = versions.shift();
|
||||
if (versions.length === 0) {
|
||||
return callback(null, master, []);
|
||||
}
|
||||
const firstItem = versions[0];
|
||||
if (firstItem.key.length === nullKeyLength) {
|
||||
// first version is the null key
|
||||
const nullVersion = Version.from(firstItem.value);
|
||||
const nullVersionKey = formatVersionKey(key, <string> nullVersion.getVersionId());
|
||||
// find null key's natural versioning order in the list
|
||||
let nullPos = versions.findIndex(item => item.key > nullVersionKey);
|
||||
if (nullPos === -1) {
|
||||
nullPos = versions.length;
|
||||
}
|
||||
// move null key at the correct position and append its real version ID to the key
|
||||
versions = versions.slice(1, nullPos)
|
||||
.concat([{ key: nullVersionKey, value: firstItem.value, isNullKey: true }])
|
||||
.concat(versions.slice(nullPos));
|
||||
}
|
||||
if (limit !== undefined) {
|
||||
// truncate versions to 'limit' entries
|
||||
versions.splice(limit);
|
||||
}
|
||||
return callback(null, master, versions);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest version of an object when the master version is a place
|
||||
* holder for deletion. For any given pair of db and key, only a
|
||||
|
@ -132,39 +205,39 @@ export default class VersioningRequestProcessor {
|
|||
if (!this.enqueueGet(request, logger, callback)) {
|
||||
return null;
|
||||
}
|
||||
logger.info('start listing latest versions', { request });
|
||||
logger.info('start listing latest versions');
|
||||
// otherwise, search for the latest version
|
||||
const cacheKey = formatCacheKey(request.db, request.key);
|
||||
clearTimeout(this.repairing[cacheKey]);
|
||||
delete this.repairing[cacheKey];
|
||||
const req = { db: request.db, params: {
|
||||
gte: request.key, lt: `${request.key}${VID_SEPPLUS}`, limit: 2 } };
|
||||
return this.wgm.list(req, logger, (err, list) => {
|
||||
logger.info('listing latest versions done', { err, list });
|
||||
return this.listVersionKeys(request.db, request.key, {
|
||||
limit: 1,
|
||||
}, logger, (err, master, versions) => {
|
||||
logger.info('listing latest versions done', { err, master, versions });
|
||||
if (err) {
|
||||
return this.dequeueGet(request, err);
|
||||
}
|
||||
// the complete list of versions is always: mst, v1, v2, ...
|
||||
if (list.length === 0) {
|
||||
if (!master) {
|
||||
return this.dequeueGet(request, errors.ObjNotFound);
|
||||
}
|
||||
if (!Version.isPHD(list[0].value)) {
|
||||
return this.dequeueGet(request, null, list[0].value);
|
||||
if (!Version.isPHD(master.value)) {
|
||||
return this.dequeueGet(request, null, master.value);
|
||||
}
|
||||
if (list.length === 1) {
|
||||
logger.info('no other versions', { request });
|
||||
if (versions.length === 0) {
|
||||
logger.info('no other versions');
|
||||
this.dequeueGet(request, errors.ObjNotFound);
|
||||
return this.repairMaster(request, logger,
|
||||
{ type: 'del',
|
||||
value: list[0].value });
|
||||
{ type: 'del', value: master.value });
|
||||
}
|
||||
// need repair
|
||||
logger.info('update master by the latest version', { request });
|
||||
const nextValue = list[1].value;
|
||||
this.dequeueGet(request, null, nextValue);
|
||||
logger.info('update master by the latest version');
|
||||
const next = {
|
||||
value: versions[0].value,
|
||||
isNullKey: versions[0].isNullKey,
|
||||
};
|
||||
this.dequeueGet(request, null, next.value);
|
||||
return this.repairMaster(request, logger,
|
||||
{ type: 'put', value: list[0].value,
|
||||
nextValue });
|
||||
{ type: 'put', value: master.value, next });
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -227,42 +300,60 @@ export default class VersioningRequestProcessor {
|
|||
* RepdConnection format { db, key
|
||||
* [, value][, type], method, options }
|
||||
* @param logger - logger
|
||||
* @param hints - storing reparing hints
|
||||
* @param hints.type - type of repair operation ('put' or 'del')
|
||||
* @param hints.value - existing value of the master version (PHD)
|
||||
* @param hints.nextValue - the suggested latest version
|
||||
(for 'put')
|
||||
* @param {object} data - storing reparing hints
|
||||
* @param {string} data.value - existing value of the master version (PHD)
|
||||
* @param {object} data.next - the suggested latest version
|
||||
* @param {string} data.next.value - the suggested latest version value
|
||||
* @param {boolean} data.next.isNullKey - whether the suggested
|
||||
* latest version is a null key
|
||||
* @return - to finish the call
|
||||
*/
|
||||
repairMaster(request: any, logger: RequestLogger, hints: {
|
||||
repairMaster(request: any, logger: RequestLogger, data: {
|
||||
type: 'put' | 'del';
|
||||
value: string;
|
||||
nextValue?: string;
|
||||
next?: {
|
||||
value: string;
|
||||
isNullKey: boolean;
|
||||
};
|
||||
}) {
|
||||
const { db, key } = request;
|
||||
logger.info('start repair process', { request });
|
||||
logger.info('start repair process');
|
||||
this.writeCache.get({ db, key }, logger, (err, value) => {
|
||||
// error or the new version is not a place holder for deletion
|
||||
if (err) {
|
||||
return logger.info('error repairing', { request, error: err });
|
||||
if (err.is.ObjNotFound) {
|
||||
return logger.debug('did not repair master: PHD was deleted');
|
||||
} else {
|
||||
return logger.error('error repairing', { error: err });
|
||||
}
|
||||
}
|
||||
if (!Version.isPHD(value)) {
|
||||
return logger.debug('master is updated already', { request });
|
||||
return logger.debug('master is updated already');
|
||||
}
|
||||
// the latest version is the same place holder for deletion
|
||||
if (hints.value === value) {
|
||||
if (data.value === value) {
|
||||
// update the latest version with the next version
|
||||
const ops: any = [];
|
||||
if (data.next) {
|
||||
ops.push({ key, value: data.next.value });
|
||||
// cleanup the null key if it is the new master
|
||||
if (data.next.isNullKey) {
|
||||
ops.push({ key: formatVersionKey(key, ''), type: 'del' });
|
||||
}
|
||||
} else {
|
||||
ops.push({ key, type: 'del' });
|
||||
}
|
||||
const repairRequest = {
|
||||
db,
|
||||
array: [
|
||||
{ type: hints.type, key, value: hints.nextValue },
|
||||
] };
|
||||
array: ops,
|
||||
};
|
||||
logger.info('replicate repair request', { repairRequest });
|
||||
return this.writeCache.batch(repairRequest, logger, () => {});
|
||||
}
|
||||
// The latest version is an updated place holder for deletion,
|
||||
// repeat the repair process from listing for latest versions.
|
||||
// The queue will ensure single repair process at any moment.
|
||||
logger.info('latest version is an updated PHD');
|
||||
return this.getByListing(request, logger, () => {});
|
||||
});
|
||||
}
|
||||
|
@ -284,6 +375,7 @@ export default class VersioningRequestProcessor {
|
|||
callback: (error: ArsenalError | null, data?: any) => void,
|
||||
) {
|
||||
const { db, key, value, options } = request;
|
||||
logger.addDefaultFields({ bucket: db, key, options });
|
||||
// valid combinations of versioning options:
|
||||
// - !versioning && !versionId: normal non-versioning put
|
||||
// - versioning && !versionId: create a new version
|
||||
|
@ -337,6 +429,7 @@ export default class VersioningRequestProcessor {
|
|||
versionId: string,
|
||||
) => void,
|
||||
) {
|
||||
logger.info('process new version put');
|
||||
// making a new versionId and a new version key
|
||||
const versionId = this.generateVersionId();
|
||||
const versionKey = formatVersionKey(request.key, versionId);
|
||||
|
@ -365,12 +458,22 @@ export default class VersioningRequestProcessor {
|
|||
logger: RequestLogger,
|
||||
callback: (err: ArsenalError | null, data?: any, versionId?: string) => void,
|
||||
) {
|
||||
logger.info('process version specific put');
|
||||
const { db, key } = request;
|
||||
// versionId is empty: update the master version
|
||||
if (request.options.versionId === '') {
|
||||
const versionId = this.generateVersionId();
|
||||
const value = Version.appendVersionId(request.value, versionId);
|
||||
return callback(null, [{ key, value }], versionId);
|
||||
const ops: any = [{ key, value }];
|
||||
if (request.options.deleteNullKey) {
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
ops.push({ key: nullKey, type: 'del' });
|
||||
}
|
||||
return callback(null, ops, versionId);
|
||||
}
|
||||
if (request.options.versionId === 'null') {
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
return callback(null, [{ key: nullKey, value: request.value }], 'null');
|
||||
}
|
||||
// need to get the master version to check if this is the master version
|
||||
this.writeCache.get({ db, key }, logger, (err, data) => {
|
||||
|
@ -378,14 +481,115 @@ export default class VersioningRequestProcessor {
|
|||
return callback(err);
|
||||
}
|
||||
const versionId = request.options.versionId;
|
||||
const versionKey = formatVersionKey(request.key, versionId);
|
||||
const ops = [{ key: versionKey, value: request.value }];
|
||||
if (data === undefined ||
|
||||
(Version.from(data).getVersionId() ?? '') >= versionId) {
|
||||
// master does not exist or is not newer than put
|
||||
// version and needs to be updated as well.
|
||||
// Note that older versions have a greater version ID.
|
||||
ops.push({ key: request.key, value: request.value });
|
||||
const versionKey = formatVersionKey(key, versionId);
|
||||
const ops: any = [];
|
||||
const masterVersion = data !== undefined &&
|
||||
Version.from(data);
|
||||
// push a version key if we're not updating the null
|
||||
// version (or in legacy Cloudservers not sending the
|
||||
// 'isNull' parameter, but this has an issue, see S3C-7526)
|
||||
if (request.options.isNull !== true) {
|
||||
const versionOp = { key: versionKey, value: request.value };
|
||||
ops.push(versionOp);
|
||||
}
|
||||
if (masterVersion) {
|
||||
// master key exists
|
||||
// note that older versions have a greater version ID
|
||||
const versionIdFromMaster = masterVersion.getVersionId();
|
||||
if (versionIdFromMaster === undefined ||
|
||||
versionIdFromMaster >= versionId) {
|
||||
let value = request.value;
|
||||
logger.debug('version to put is not older than master');
|
||||
// Delete the deprecated, null key for backward compatibility
|
||||
// to avoid storing both deprecated and new null keys.
|
||||
// If master null version was put with an older Cloudserver (or in compat mode),
|
||||
// there is a possibility that it also has a null versioned key
|
||||
// associated, so we need to delete it as we write the null key.
|
||||
// Deprecated null key gets deleted when the new CloudServer:
|
||||
// - updates metadata of a null master (options.isNull=true)
|
||||
// - puts metadata on top of a master null key (options.isNull=false)
|
||||
if (request.options.isNull !== undefined && // new null key behavior when isNull is defined.
|
||||
masterVersion.isNullVersion() && // master is null
|
||||
!masterVersion.isNull2Version()) { // master does not support the new null key behavior yet.
|
||||
const masterNullVersionId = masterVersion.getVersionId();
|
||||
// The deprecated null key is referenced in the "versionId" property of the master key.
|
||||
if (masterNullVersionId) {
|
||||
const oldNullVersionKey = formatVersionKey(key, masterNullVersionId);
|
||||
ops.push({ key: oldNullVersionKey, type: 'del' });
|
||||
}
|
||||
}
|
||||
// new behavior when isNull is defined is to only
|
||||
// update the master key if it is the latest
|
||||
// version, old behavior needs to copy master to
|
||||
// the null version because older Cloudservers
|
||||
// rely on version-specific PUT to copy master
|
||||
// contents to a new null version key (newer ones
|
||||
// use special versionId="null" requests for this
|
||||
// purpose).
|
||||
if (versionIdFromMaster !== versionId ||
|
||||
request.options.isNull === undefined) {
|
||||
// master key is strictly older than the put version
|
||||
let masterVersionId;
|
||||
if (masterVersion.isNullVersion() && versionIdFromMaster) {
|
||||
logger.debug('master key is a null version');
|
||||
masterVersionId = versionIdFromMaster;
|
||||
} else if (versionIdFromMaster === undefined) {
|
||||
logger.debug('master key is nonversioned');
|
||||
// master key does not have a versionID
|
||||
// => create one with the "infinite" version ID
|
||||
masterVersionId = getInfVid(this.replicationGroupId);
|
||||
masterVersion.setVersionId(masterVersionId);
|
||||
} else {
|
||||
logger.debug('master key is a regular version');
|
||||
}
|
||||
if (request.options.isNull === true) {
|
||||
if (!masterVersionId) {
|
||||
// master is a regular version: delete the null key that
|
||||
// may exist (older null version)
|
||||
logger.debug('delete null key');
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
ops.push({ key: nullKey, type: 'del' });
|
||||
}
|
||||
} else if (masterVersionId) {
|
||||
logger.debug('create version key from master version');
|
||||
// isNull === false means Cloudserver supports null keys,
|
||||
// so create a null key in this case, and a version key otherwise
|
||||
const masterKeyVersionId = request.options.isNull === false ?
|
||||
'' : masterVersionId;
|
||||
const masterVersionKey = formatVersionKey(key, masterKeyVersionId);
|
||||
masterVersion.setNullVersion();
|
||||
// isNull === false means Cloudserver supports null keys,
|
||||
// so create a null key with the isNull2 flag
|
||||
if (request.options.isNull === false) {
|
||||
masterVersion.setNull2Version();
|
||||
// else isNull === undefined means Cloudserver does not support null keys,
|
||||
// and versionIdFromMaster !== versionId means that a version is PUT on top of a null version
|
||||
// hence set/update the new master nullVersionId for backward compatibility
|
||||
} else if (versionIdFromMaster !== versionId) {
|
||||
// => set the nullVersionId to the master version if put version on top of null version.
|
||||
value = Version.updateOrAppendNullVersionId(request.value, masterVersionId);
|
||||
}
|
||||
ops.push({ key: masterVersionKey,
|
||||
value: masterVersion.toString() });
|
||||
}
|
||||
} else {
|
||||
logger.debug('version to put is the master');
|
||||
}
|
||||
ops.push({ key, value: value });
|
||||
} else {
|
||||
logger.debug('version to put is older than master');
|
||||
if (request.options.isNull === true && !masterVersion.isNullVersion()) {
|
||||
logger.debug('create or update null key');
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
const nullKeyOp = { key: nullKey, value: request.value };
|
||||
ops.push(nullKeyOp);
|
||||
// for backward compatibility: remove null version key
|
||||
ops.push({ key: versionKey, type: 'del' });
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// master key does not exist: create it
|
||||
ops.push({ key, value: request.value });
|
||||
}
|
||||
return callback(null, ops, versionId);
|
||||
});
|
||||
|
@ -399,8 +603,10 @@ export default class VersioningRequestProcessor {
|
|||
callback: (err: ArsenalError | null, data?: any) => void,
|
||||
) {
|
||||
const { db, key, options } = request;
|
||||
logger.addDefaultFields({ bucket: db, key, options });
|
||||
// no versioning or versioning configuration off
|
||||
if (!(options && options.versionId)) {
|
||||
logger.info('process non-versioned delete');
|
||||
return this.writeCache.batch({ db,
|
||||
array: [{ key, type: 'del' }] },
|
||||
logger, callback);
|
||||
|
@ -438,7 +644,12 @@ export default class VersioningRequestProcessor {
|
|||
versionId?: string,
|
||||
) => void,
|
||||
) {
|
||||
logger.info('process version specific delete');
|
||||
const { db, key, options } = request;
|
||||
if (options.versionId === 'null') {
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
return callback(null, [{ key: nullKey, type: 'del' }], 'null');
|
||||
}
|
||||
// deleting a specific version
|
||||
this.writeCache.get({ db, key }, logger, (err, data) => {
|
||||
if (err && !err.is.ObjNotFound) {
|
||||
|
@ -446,7 +657,8 @@ export default class VersioningRequestProcessor {
|
|||
}
|
||||
// delete the specific version
|
||||
const versionId = options.versionId;
|
||||
const versionKey = formatVersionKey(key, versionId);
|
||||
const keyVersionId = options.isNull ? '' : versionId;
|
||||
const versionKey = formatVersionKey(key, keyVersionId);
|
||||
const ops: any = [{ key: versionKey, type: 'del' }];
|
||||
// update the master version as PHD if it is the deleting version
|
||||
if (Version.isPHD(data) ||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import WriteGatheringManager from './WriteGatheringManager';
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue