Compare commits
497 Commits
w/8.6/impr
...
developmen
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | b5711e9cbf | |
Vitaliy Filippov | 36dc6298d2 | |
Vitaliy Filippov | bc2d637578 | |
Vitaliy Filippov | b543695048 | |
Vitaliy Filippov | 90024d044d | |
Vitaliy Filippov | 451ab33f68 | |
Vitaliy Filippov | c86107e912 | |
Vitaliy Filippov | 0a5962f256 | |
Vitaliy Filippov | 0e292791c6 | |
Vitaliy Filippov | fc07729bd0 | |
Vitaliy Filippov | 4527dd6795 | |
Vitaliy Filippov | 05fb581023 | |
Vitaliy Filippov | 956739a04e | |
Vitaliy Filippov | 7ad0888a66 | |
Vitaliy Filippov | bf01ba4ed1 | |
Vitaliy Filippov | ab019e7e50 | |
Vitaliy Filippov | 3797695e74 | |
Vitaliy Filippov | c8084196c4 | |
bert-e | b72e918ff9 | |
bert-e | 22887f47d8 | |
bert-e | 0cd10a73f3 | |
bert-e | e139406612 | |
Maha Benzekri | d91853a38b | |
Mickael Bourgois | a7e798f909 | |
Mickael Bourgois | 3a1ba29869 | |
Mickael Bourgois | dbb9b6d787 | |
Mickael Bourgois | fce76f0934 | |
Mickael Bourgois | 0e39aaac09 | |
Mickael Bourgois | 0b14c93fac | |
Mickael Bourgois | ab2960bbf4 | |
Mickael Bourgois | 7305b112e2 | |
Mickael Bourgois | cd9e2e757b | |
Mickael Bourgois | ca0904f584 | |
Mickael Bourgois | 0dd3dd35e6 | |
bert-e | bf7e4b7e23 | |
bert-e | 92f4794727 | |
Jonathan Gramain | c6ef85e3a1 | |
Jonathan Gramain | c0fe0cfbcf | |
bert-e | 9c936f2b83 | |
bert-e | d26bac2ebc | |
Jonathan Gramain | cfb9db5178 | |
Jonathan Gramain | 2ce004751a | |
Jonathan Gramain | 539219e046 | |
Jonathan Gramain | be49e55db5 | |
bert-e | e6b240421b | |
bert-e | 81739e3ecf | |
Jonathan Gramain | c475503248 | |
bert-e | 7acbd5d2fb | |
Jonathan Gramain | 8d726322e5 | |
williamlardier | 4f7aa54886 | |
williamlardier | 0117a5b0b4 | |
williamlardier | f679831ba2 | |
williamlardier | bb162ca7d3 | |
williamlardier | 0c6dfc7b6e | |
williamlardier | d608d849df | |
williamlardier | 2cb63f58d4 | |
williamlardier | 51585712f4 | |
bert-e | 61eb24e46f | |
bert-e | a34b162782 | |
bert-e | a9e50fe046 | |
bert-e | 4150a8432e | |
Taylor McKinnon | 7e70ff9cbc | |
bert-e | 09dc45289c | |
bert-e | 47c628e0e1 | |
Nicolas Humbert | a1f4d3fe8a | |
williamlardier | 926242b077 | |
williamlardier | aa2aac5db3 | |
williamlardier | f2e2d82e51 | |
williamlardier | 88ad86b0c6 | |
bert-e | 8f25892247 | |
bert-e | 9ac207187b | |
Anurag Mittal | 624a04805f | |
Anurag Mittal | ba99933765 | |
williamlardier | 38d1ac1d2c | |
Taylor McKinnon | 4f34a34a11 | |
Taylor McKinnon | 53f2a159fa | |
Maha Benzekri | 63f6a75a86 | |
Maha Benzekri | 41acc7968e | |
williamlardier | c98c5207fc | |
williamlardier | 615ee393a4 | |
williamlardier | 97dfc699aa | |
williamlardier | 76786282d1 | |
williamlardier | a19d6524be | |
williamlardier | bbf6dfba22 | |
williamlardier | f0663fd507 | |
williamlardier | d4decbbd6c | |
williamlardier | 288b2b7b87 | |
williamlardier | ccf9b62e59 | |
williamlardier | 9fc2d552ae | |
williamlardier | d7cc4cf7d5 | |
williamlardier | 334d33ef44 | |
williamlardier | 989b0214d9 | |
williamlardier | 04d0730f97 | |
williamlardier | fbc642c022 | |
williamlardier | 104435f0b6 | |
williamlardier | a362ac202e | |
williamlardier | 1277e58150 | |
williamlardier | 7727ccf5f0 | |
williamlardier | 71860fc90c | |
williamlardier | e504b52de7 | |
Maha Benzekri | b369a47c4d | |
Maha Benzekri | b4fa81e832 | |
Maha Benzekri | 1e03d53879 | |
Maha Benzekri | 63e502d419 | |
Maha Benzekri | d2a31dc20a | |
Maha Benzekri | f24411875f | |
Maha Benzekri | 4fd7faa6a3 | |
Francois Ferrand | 118aaba702 | |
Francois Ferrand | e4442fdc52 | |
Francois Ferrand | 7fa199741f | |
Francois Ferrand | f7f95af78f | |
Francois Ferrand | 2dc053a784 | |
Francois Ferrand | cc9bb9047e | |
Francois Ferrand | b824fc0828 | |
Francois Ferrand | a2e6d91cf2 | |
Francois Ferrand | c1060853dd | |
Francois Ferrand | 227d6edd09 | |
bert-e | b4754c68ea | |
bert-e | 11aea5d93b | |
Nicolas Humbert | a22719ed47 | |
Nicolas Humbert | 41975d539d | |
bert-e | 8796bf0f44 | |
bert-e | 735fcd04ef | |
bert-e | 1dee707eb8 | |
Jonathan Gramain | 2c8d69c20a | |
bert-e | 9dc34f2155 | |
bert-e | 08a4c3ade3 | |
bert-e | 5435c14116 | |
bert-e | 38c44ea874 | |
bert-e | 5472d0da59 | |
bert-e | cdc0bb1128 | |
KillianG | 39cba3ee6c | |
KillianG | a00952712f | |
KillianG | a246e18e17 | |
KillianG | 3bb3a4d161 | |
bert-e | c6ba7f981e | |
bert-e | 762ae5a0ff | |
bert-e | 3205d117f5 | |
bert-e | 4cab3c84f3 | |
williamlardier | 0dcc93cdbe | |
williamlardier | 2f2f91d6e8 | |
bert-e | 1433973e5c | |
bert-e | 201170b1ed | |
bert-e | 242b2ec85a | |
bert-e | 3186a97113 | |
Will Toozs | 0118dfabbb | |
Will Toozs | ff40dfaadf | |
bert-e | 9c99a6980f | |
bert-e | d4e255781b | |
bert-e | 1afaaec0ac | |
bert-e | e20e458971 | |
bert-e | bef9220032 | |
bert-e | de20f1efdc | |
bert-e | b89d19c9f8 | |
Nicolas Humbert | 4dc9788629 | |
bert-e | 06dc042154 | |
bert-e | aa4643644a | |
Francois Ferrand | 4c7d3ae4bc | |
Francois Ferrand | 23883dae8b | |
Francois Ferrand | 531c83a359 | |
Francois Ferrand | b84fa851f7 | |
Francois Ferrand | 4cb1a879f7 | |
Francois Ferrand | 7ae55b20e7 | |
Hervé Dombya | 363afcd17f | |
Frédéric Meinnel | 1cf0250ce9 | |
Frédéric Meinnel | 20d0b38d0b | |
Frédéric Meinnel | 601619f200 | |
Frédéric Meinnel | a92e71fd50 | |
bert-e | 43f62b847c | |
bert-e | a031905bba | |
bert-e | cd2406b827 | |
bert-e | 62f707caff | |
bert-e | 848bf318fe | |
bert-e | 0beb48a1fd | |
Will Toozs | d274acd8ed | |
Will Toozs | e6d9e8fc35 | |
bert-e | 7bb004586d | |
bert-e | d48de67723 | |
bert-e | b141c59bb7 | |
bert-e | 0b79ecd942 | |
bert-e | 10ca6b98fa | |
bert-e | 171925732f | |
bert-e | 70e8b20af9 | |
bert-e | 0ec5f4fee5 | |
bert-e | e600677545 | |
bert-e | 72e5da10b7 | |
bert-e | 759817c5a0 | |
bert-e | 035c7e8d7f | |
bert-e | de27a5b88e | |
bert-e | a4cc5e45f3 | |
bert-e | 9a8b707e82 | |
bert-e | 002dbe0019 | |
bert-e | d803bdcadc | |
bert-e | 4f1b8f25b7 | |
bert-e | e969eeaa20 | |
bert-e | 2ee78bcf6a | |
bert-e | f31fe2f2bf | |
bert-e | ee47cece90 | |
Mickael Bourgois | 2d50a76923 | |
Mickael Bourgois | 6b4f10ae56 | |
williamlardier | dbda5f16a6 | |
Maha Benzekri | 2959c950dd | |
Maha Benzekri | 462ddf7ef1 | |
Jonathan Gramain | ea7b69e313 | |
Jonathan Gramain | 8ec1c2f2db | |
bert-e | 43f9606598 | |
bert-e | be34e5ad59 | |
Mickael Bourgois | 3ce869cea3 | |
Mickael Bourgois | b7960784db | |
bert-e | bf235f3335 | |
bert-e | 569c9f4368 | |
bert-e | 1a3cb8108c | |
bert-e | 042120b17e | |
bert-e | ba4593592d | |
bert-e | 6efdb627da | |
bert-e | e5b692f3db | |
bert-e | 548ae8cd12 | |
bert-e | 2a919af071 | |
bert-e | 5c300b8b6c | |
Maha Benzekri | 99068e7265 | |
Maha Benzekri | cd039d8133 | |
Maha Benzekri | 75b293df8d | |
Maha Benzekri | a855e38998 | |
Maha Benzekri | ffe4ea4afe | |
Maha Benzekri | a16cfad0fc | |
bert-e | 556163e3e9 | |
Maha Benzekri | 869d554e43 | |
Maha Benzekri | 2f8b228595 | |
Maha Benzekri | e44b7ed918 | |
Maha Benzekri | 3cb29f7f8e | |
Maha Benzekri | 4f08a4dff2 | |
Maha Benzekri | 15a1aa7965 | |
Maha Benzekri | 4470ee9125 | |
Francois Ferrand | d8c12597ea | |
Francois Ferrand | c8eb9025fa | |
Francois Ferrand | 57e0f71e6a | |
Francois Ferrand | f22f920ee2 | |
Maha Benzekri | ed1bb6301d | |
Maha Benzekri | 70dfa5b11b | |
Francois Ferrand | a4e6f9d034 | |
Maha Benzekri | cf94b9de6a | |
Maha Benzekri | da0492d2bb | |
Maha Benzekri | 979b9065ed | |
Maha Benzekri | d5a3923f74 | |
bert-e | bc291fe3a7 | |
bert-e | 8dc7432c51 | |
bert-e | 6f963bdcd9 | |
bert-e | cd9024fd32 | |
bert-e | dff7610060 | |
bert-e | 757c2537ef | |
bert-e | 4515b2adbf | |
bert-e | 50ffdd260b | |
bert-e | b5f22d8c68 | |
bert-e | 68ff54d49a | |
bert-e | 3fe5579c80 | |
bert-e | 3fdd2bce21 | |
bert-e | c9b512174f | |
bert-e | 7b48624cf7 | |
bert-e | 55b07def2e | |
bert-e | fcc9468b63 | |
bert-e | efc44a620d | |
bert-e | 1bc19b39d7 | |
bert-e | b5fa3a1fd3 | |
bert-e | c0fc958365 | |
bert-e | d3c74d2c16 | |
Kerkesni | 07eda89a3f | |
bert-e | 27b4066ca4 | |
bert-e | 2ee5b356fa | |
bert-e | f5d3433413 | |
bert-e | 62b4b9bc25 | |
bert-e | ec56c77881 | |
bert-e | d0abde3962 | |
bert-e | fdc682f2db | |
bert-e | b184606dc2 | |
Maha Benzekri | 9ce0f2c2b6 | |
Maha Benzekri | 43b4e0c713 | |
bert-e | 9185f16554 | |
bert-e | 2df9a57f9c | |
bert-e | 68535f83d6 | |
bert-e | 41d63650be | |
bert-e | 12185f7c3b | |
bert-e | 5f82ee2d0e | |
Taylor McKinnon | d72bc5c6b9 | |
Taylor McKinnon | 0e47810963 | |
bert-e | 3b36cef85f | |
Jonathan Gramain | 114b885c7f | |
williamlardier | 3b95c033d2 | |
williamlardier | 04091dc316 | |
williamlardier | 56023a80ed | |
bert-e | 2deaebd89a | |
bert-e | c706ccf9c6 | |
Francois Ferrand | 583ea8490f | |
bert-e | 85a9480793 | |
bert-e | be2f65b69e | |
bert-e | 1ee6d0a87d | |
bert-e | 224af9a5d2 | |
bert-e | 74f05377f0 | |
bert-e | 111e14cc89 | |
Florent Monjalet | 00b20f00d1 | |
Florent Monjalet | a91d53a12c | |
Florent Monjalet | 63d2637046 | |
Maha Benzekri | 5d416ad190 | |
Maha Benzekri | ff29cda03f | |
Florent Monjalet | cb8baf2dab | |
bert-e | 22f470c6eb | |
bert-e | e510473116 | |
Florent Monjalet | 17a6808fe4 | |
Florent Monjalet | df646e4802 | |
Florent Monjalet | 267770d256 | |
Florent Monjalet | 1b92dc2c05 | |
Florent Monjalet | f80bb2f34b | |
Florent Monjalet | 4f89b67bb9 | |
Florent Monjalet | 8b5630923c | |
Florent Monjalet | 9ff5e376e5 | |
Florent Monjalet | a9b5a2e3a4 | |
Florent Monjalet | 7e9ec22ae3 | |
bert-e | 9d4664ae06 | |
bert-e | 662265ba2e | |
Taylor McKinnon | 17e4f14f9c | |
Taylor McKinnon | 014b071536 | |
bert-e | 2d45f92ae1 | |
bert-e | 48452496fa | |
bert-e | 18bf6b8d4a | |
bert-e | 858c31a542 | |
bert-e | 19d3e0bc9d | |
bert-e | bac044dc8f | |
bert-e | 8c0f709014 | |
Francois Ferrand | ce92d33a5d | |
Kerkesni | 0381cce85c | |
Kerkesni | 20a08a2a4e | |
Kerkesni | ff73d8ab12 | |
Kerkesni | 1ee44bc6d3 | |
bert-e | 614e876536 | |
bert-e | b40a77d94b | |
bert-e | 3a3a73b756 | |
bert-e | 3f6e85590d | |
bert-e | bc009945d2 | |
bert-e | 3ac30d9bab | |
bert-e | 32204fbfbf | |
bert-e | 5a26e1a80d | |
bert-e | 507a2d4ff5 | |
bert-e | 1207a6fb70 | |
bert-e | 5883286864 | |
bert-e | 2a37e809d9 | |
bert-e | 86ce7691cd | |
bert-e | e466b5e92a | |
bert-e | a4bc10f730 | |
Nicolas Humbert | c480301e95 | |
Nicolas Humbert | 276be285cc | |
bert-e | 897d41392a | |
bert-e | f4e3a19d61 | |
williamlardier | 7c52fcbbb0 | |
bert-e | da52688a39 | |
bert-e | 1cb54a66f8 | |
bert-e | d9fffdad9e | |
williamlardier | 389c32f819 | |
Kerkesni | d26b8bcfcc | |
Kerkesni | e4634621ee | |
williamlardier | 0b58b3ad2a | |
bert-e | 652bf92536 | |
bert-e | 344ee8a014 | |
bert-e | b7e7f65d52 | |
bert-e | c5b7450a4d | |
Nicolas Humbert | 18c8d4ecac | |
Nicolas Humbert | c8150c6857 | |
bert-e | 399a2a53ab | |
Alexander Chan | bbad049b5f | |
bert-e | 2a4e2e1584 | |
bert-e | b304d05614 | |
bert-e | 004bd63368 | |
Nicolas Humbert | 960d736962 | |
KillianG | 32401c9a83 | |
KillianG | 5f05b676cc | |
KillianG | fd662a8c2c | |
bert-e | 5d54dd58be | |
Nicolas Humbert | 1bd0deafcf | |
Francois Ferrand | 7c788d3dbf | |
Nicolas Humbert | 50cb6a2bf1 | |
bert-e | 58f7bb2877 | |
Francois Ferrand | ea284508d7 | |
Francois Ferrand | 0981fa42f3 | |
Francois Ferrand | 7e63064a52 | |
Francois Ferrand | 71b21e40ca | |
Francois Ferrand | ff894bb545 | |
Francois Ferrand | ae9f24e1bb | |
bert-e | 2dc01ce3ed | |
Kerkesni | 9bd9bef6c7 | |
bert-e | a6a5c273d5 | |
Kerkesni | 6479076fec | |
bert-e | df45f481d0 | |
bert-e | cd8c589eba | |
williamlardier | daec2661ae | |
Francois Ferrand | 0f266371a0 | |
Francois Ferrand | 73e56963bf | |
Alexander Chan | fb11d0f42e | |
williamlardier | 9cbd9f7be8 | |
williamlardier | c2fc8873cb | |
Francois Ferrand | bee1ae04bf | |
Francois Ferrand | eb86552a57 | |
bert-e | f5d8f2fac5 | |
bert-e | 36e841b542 | |
williamlardier | 1d12a430a0 | |
williamlardier | bea27b4fb4 | |
williamlardier | 76405d9179 | |
Alexander Chan | 31b7f1e71c | |
Alexander Chan | 8674cac9f8 | |
KillianG | d5b666a246 | |
KillianG | 4360772971 | |
KillianG | 6e152e33d5 | |
KillianG | 94f34979a5 | |
bert-e | 4b0f165b46 | |
Nicolas Humbert | 3590377554 | |
bert-e | 8a08f97492 | |
bert-e | 448afa50e3 | |
bert-e | 50b738cfff | |
bert-e | 951a98fcaf | |
bert-e | 8ca770dcb7 | |
bert-e | 3585b8d5eb | |
bert-e | 0a1489ee46 | |
Xin LI | de5b4331e2 | |
bert-e | 46dff0321d | |
bert-e | ddc6ea72be | |
bert-e | d266ff4e9f | |
bert-e | 7dc2f07cb6 | |
Kerkesni | 6c22d87c55 | |
Kerkesni | 310f67d3a7 | |
Kerkesni | 49841c5e0e | |
Kerkesni | b5334baca8 | |
Kerkesni | e592671b54 | |
bert-e | 6e0b66849d | |
bert-e | 18a1bfd325 | |
bert-e | 2c999f4c10 | |
bert-e | bf7a643d45 | |
bert-e | 3f3bf0fdf0 | |
bert-e | 2a44949048 | |
bert-e | 6660626190 | |
williamlardier | 58fc0b7146 | |
williamlardier | 11e3d7ecb2 | |
williamlardier | 1bab851ce3 | |
bert-e | 0bc0341f33 | |
bert-e | b5b0f6482b | |
bert-e | 755f282f8e | |
bert-e | c4dc928de2 | |
Killian Gardahaut | a0087e8d77 | |
KillianG | 8e5bea56b6 | |
KillianG | 976e349036 | |
KillianG | de1c23ac1b | |
KillianG | 0b4d04a2a3 | |
KillianG | 049d396c8d | |
Naren | 5c04cbe6d1 | |
bert-e | 5cb63991a8 | |
Alexander Chan | c310cb3dd1 | |
bert-e | 22cda51944 | |
williamlardier | 408d0de732 | |
williamlardier | 83916c91fb | |
bert-e | 110b2a35ed | |
williamlardier | a8117ca037 | |
bert-e | 9145d1cf79 | |
bert-e | ae1b6dc3d1 | |
bert-e | b1304b5f7f | |
bert-e | 6b1f8c61ec | |
bert-e | 335bfabed1 | |
bert-e | 3398db3c0f | |
bert-e | 836e9fb22d | |
bert-e | ead7f5f7c2 | |
bert-e | c17059dc77 | |
bert-e | 8ace5b24a5 | |
bert-e | 39f7035dbd | |
williamlardier | bb62ed4fa7 | |
williamlardier | c95368858d | |
bert-e | d8ff1377fc | |
Jonathan Gramain | 28f4c5baee | |
bert-e | 0a8f846f4b | |
Jonathan Gramain | ac5de47ca1 | |
williamlardier | c147785464 | |
williamlardier | ca8c788757 | |
williamlardier | cb2af364bb | |
williamlardier | 1eb27d610b | |
williamlardier | 73b295c91d | |
williamlardier | 8186c84bf9 | |
williamlardier | 93ef2d0545 | |
williamlardier | d7d0a31bb1 | |
williamlardier | 4c69b82508 | |
williamlardier | ca13284da3 | |
williamlardier | c6ed75a1d7 | |
williamlardier | 402d0dea1a | |
williamlardier | 95faec1db0 | |
Jonathan Gramain | ca9d53f430 | |
bert-e | b1ee1f8ef7 | |
williamlardier | e882cb6781 | |
Francois Ferrand | cb7303636c | |
Francois Ferrand | 6d0f889c23 | |
Francois Ferrand | c13f2ae6a5 | |
bert-e | b6611c4711 | |
bert-e | ae4ece471b | |
williamlardier | 15b61cd947 | |
williamlardier | 91536c575f |
|
@ -1,5 +1,8 @@
|
||||||
{
|
{
|
||||||
"extends": "scality",
|
"extends": "scality",
|
||||||
|
"plugins": [
|
||||||
|
"mocha"
|
||||||
|
],
|
||||||
"rules": {
|
"rules": {
|
||||||
"import/extensions": "off",
|
"import/extensions": "off",
|
||||||
"lines-around-directive": "off",
|
"lines-around-directive": "off",
|
||||||
|
@ -42,7 +45,8 @@
|
||||||
"no-restricted-properties": "off",
|
"no-restricted-properties": "off",
|
||||||
"new-parens": "off",
|
"new-parens": "off",
|
||||||
"no-multi-spaces": "off",
|
"no-multi-spaces": "off",
|
||||||
"quote-props": "off"
|
"quote-props": "off",
|
||||||
|
"mocha/no-exclusive-tests": "error",
|
||||||
},
|
},
|
||||||
"parserOptions": {
|
"parserOptions": {
|
||||||
"ecmaVersion": 2020
|
"ecmaVersion": 2020
|
||||||
|
|
|
@ -16,14 +16,14 @@ runs:
|
||||||
run: |-
|
run: |-
|
||||||
set -exu;
|
set -exu;
|
||||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
||||||
- uses: actions/setup-node@v2
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/cache@v2
|
- uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
|
@ -35,3 +35,9 @@ runs:
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libdigest-hmac-perl
|
sudo apt-get install -y libdigest-hmac-perl
|
||||||
pip install 's3cmd==2.3.0'
|
pip install 's3cmd==2.3.0'
|
||||||
|
- name: fix sproxyd.conf permissions
|
||||||
|
shell: bash
|
||||||
|
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
||||||
|
- name: ensure fuse kernel module is loaded (for sproxyd)
|
||||||
|
shell: bash
|
||||||
|
run: sudo modprobe fuse
|
||||||
|
|
|
@ -40,6 +40,11 @@ services:
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
- DEFAULT_BUCKET_KEY_FORMAT
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
- METADATA_MAX_CACHED_BUCKETS
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||||
|
- SCUBA_HOST
|
||||||
|
- SCUBA_PORT
|
||||||
|
- SCUBA_HEALTHCHECK_FREQUENCY
|
||||||
|
- S3QUOTA
|
||||||
|
- QUOTA_ENABLE_INFLIGHTS
|
||||||
env_file:
|
env_file:
|
||||||
- creds.env
|
- creds.env
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -67,14 +72,21 @@ services:
|
||||||
pykmip:
|
pykmip:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['pykmip']
|
profiles: ['pykmip']
|
||||||
image: registry.scality.com/cloudserver-dev/pykmip
|
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
mongo:
|
mongo:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['mongo', 'ceph']
|
profiles: ['mongo', 'ceph']
|
||||||
image: scality/ci-mongo:3.6.8
|
image: ${MONGODB_IMAGE}
|
||||||
ceph:
|
ceph:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['ceph']
|
profiles: ['ceph']
|
||||||
image: ghcr.io/scality/cloudserver/ci-ceph
|
image: ghcr.io/scality/cloudserver/ci-ceph
|
||||||
|
sproxyd:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['sproxyd']
|
||||||
|
image: sproxyd-standalone
|
||||||
|
build: ./sproxyd
|
||||||
|
user: 0:0
|
||||||
|
privileged: yes
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
FROM mongo:5.0.21
|
||||||
|
|
||||||
|
ENV USER=scality \
|
||||||
|
HOME_DIR=/home/scality \
|
||||||
|
CONF_DIR=/conf \
|
||||||
|
DATA_DIR=/data
|
||||||
|
|
||||||
|
# Set up directories and permissions
|
||||||
|
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
||||||
|
mkdir /logs; \
|
||||||
|
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
||||||
|
|
||||||
|
# Set up environment variables and directories for scality user
|
||||||
|
RUN mkdir ${CONF_DIR} && \
|
||||||
|
chown -R ${USER} ${CONF_DIR} && \
|
||||||
|
chown -R ${USER} ${DATA_DIR}
|
||||||
|
|
||||||
|
# copy the mongo config file
|
||||||
|
COPY /conf/mongod.conf /conf/mongod.conf
|
||||||
|
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
||||||
|
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
||||||
|
|
||||||
|
EXPOSE 27017/tcp
|
||||||
|
EXPOSE 27018
|
||||||
|
|
||||||
|
# Set up CMD
|
||||||
|
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
||||||
|
CMD ["bash", "/conf/mongo-run.sh"]
|
|
@ -0,0 +1,4 @@
|
||||||
|
rs.initiate({
|
||||||
|
_id: "rs0",
|
||||||
|
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
||||||
|
});
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -exo pipefail
|
||||||
|
|
||||||
|
init_RS() {
|
||||||
|
sleep 5
|
||||||
|
mongo --port 27018 /conf/initReplicaSet.js
|
||||||
|
}
|
||||||
|
init_RS &
|
||||||
|
|
||||||
|
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
@ -0,0 +1,15 @@
|
||||||
|
storage:
|
||||||
|
journal:
|
||||||
|
enabled: true
|
||||||
|
engine: wiredTiger
|
||||||
|
dbPath: "/data/db"
|
||||||
|
processManagement:
|
||||||
|
fork: false
|
||||||
|
net:
|
||||||
|
port: 27018
|
||||||
|
bindIp: 0.0.0.0
|
||||||
|
replication:
|
||||||
|
replSetName: "rs0"
|
||||||
|
enableMajorityReadConcern: true
|
||||||
|
security:
|
||||||
|
authorization: disabled
|
|
@ -0,0 +1,3 @@
|
||||||
|
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
||||||
|
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
||||||
|
RUN chown root:root /conf/sproxyd0.conf
|
|
@ -0,0 +1,26 @@
|
||||||
|
fastcgi_param QUERY_STRING $query_string;
|
||||||
|
fastcgi_param REQUEST_METHOD $request_method;
|
||||||
|
fastcgi_param CONTENT_TYPE $content_type;
|
||||||
|
fastcgi_param CONTENT_LENGTH $content_length;
|
||||||
|
|
||||||
|
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||||
|
fastcgi_param SCRIPT_NAME /var/www;
|
||||||
|
fastcgi_param PATH_INFO $document_uri;
|
||||||
|
|
||||||
|
fastcgi_param REQUEST_URI $request_uri;
|
||||||
|
fastcgi_param DOCUMENT_URI $document_uri;
|
||||||
|
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||||
|
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||||
|
fastcgi_param HTTPS $https if_not_empty;
|
||||||
|
|
||||||
|
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||||
|
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||||
|
|
||||||
|
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||||
|
fastcgi_param REMOTE_PORT $remote_port;
|
||||||
|
fastcgi_param SERVER_ADDR $server_addr;
|
||||||
|
fastcgi_param SERVER_PORT $server_port;
|
||||||
|
fastcgi_param SERVER_NAME $server_name;
|
||||||
|
|
||||||
|
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
||||||
|
fastcgi_param REDIRECT_STATUS 200;
|
|
@ -0,0 +1,88 @@
|
||||||
|
worker_processes 1;
|
||||||
|
error_log /logs/error.log;
|
||||||
|
user root root;
|
||||||
|
events {
|
||||||
|
worker_connections 1000;
|
||||||
|
reuse_port on;
|
||||||
|
multi_accept on;
|
||||||
|
}
|
||||||
|
worker_rlimit_nofile 20000;
|
||||||
|
http {
|
||||||
|
root /var/www/;
|
||||||
|
upstream sproxyds {
|
||||||
|
least_conn;
|
||||||
|
keepalive 40;
|
||||||
|
server 127.0.0.1:20000;
|
||||||
|
}
|
||||||
|
server {
|
||||||
|
client_max_body_size 0;
|
||||||
|
client_body_timeout 150;
|
||||||
|
client_header_timeout 150;
|
||||||
|
postpone_output 0;
|
||||||
|
client_body_postpone_size 0;
|
||||||
|
keepalive_requests 1100;
|
||||||
|
keepalive_timeout 300s;
|
||||||
|
server_tokens off;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
gzip off;
|
||||||
|
tcp_nodelay on;
|
||||||
|
tcp_nopush on;
|
||||||
|
sendfile on;
|
||||||
|
listen 81;
|
||||||
|
server_name localhost;
|
||||||
|
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
||||||
|
location ~* ^/proxy/(.*)$ {
|
||||||
|
rewrite ^/proxy/(.*)$ /$1 last;
|
||||||
|
}
|
||||||
|
allow 127.0.0.1;
|
||||||
|
|
||||||
|
deny all;
|
||||||
|
set $usermd '-';
|
||||||
|
set $sentusermd '-';
|
||||||
|
set $elapsed_ms '-';
|
||||||
|
set $now '-';
|
||||||
|
log_by_lua '
|
||||||
|
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
||||||
|
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
||||||
|
end
|
||||||
|
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
||||||
|
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
||||||
|
end
|
||||||
|
local elapsed_ms = tonumber(ngx.var.request_time)
|
||||||
|
if not ( elapsed_ms == nil) then
|
||||||
|
elapsed_ms = elapsed_ms * 1000
|
||||||
|
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
||||||
|
end
|
||||||
|
local time = tonumber(ngx.var.msec) * 1000
|
||||||
|
ngx.var.now = time
|
||||||
|
';
|
||||||
|
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
||||||
|
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
||||||
|
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
||||||
|
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
||||||
|
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
||||||
|
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
||||||
|
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
||||||
|
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
||||||
|
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
||||||
|
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
||||||
|
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
||||||
|
access_log /dev/stdout irm;
|
||||||
|
error_log /dev/stdout error;
|
||||||
|
location / {
|
||||||
|
proxy_request_buffering off;
|
||||||
|
fastcgi_request_buffering off;
|
||||||
|
fastcgi_no_cache 1;
|
||||||
|
fastcgi_cache_bypass 1;
|
||||||
|
fastcgi_buffering off;
|
||||||
|
fastcgi_ignore_client_abort on;
|
||||||
|
fastcgi_keep_conn on;
|
||||||
|
include fastcgi_params;
|
||||||
|
fastcgi_pass sproxyds;
|
||||||
|
fastcgi_next_upstream error timeout;
|
||||||
|
fastcgi_send_timeout 285s;
|
||||||
|
fastcgi_read_timeout 285s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"general": {
|
||||||
|
"ring": "DATA",
|
||||||
|
"port": 20000,
|
||||||
|
"syslog_facility": "local0"
|
||||||
|
},
|
||||||
|
"ring_driver:0": {
|
||||||
|
"alias": "dc1",
|
||||||
|
"type": "local",
|
||||||
|
"queue_path": "/tmp/ring-objs"
|
||||||
|
},
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
[supervisord]
|
||||||
|
nodaemon = true
|
||||||
|
loglevel = info
|
||||||
|
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
||||||
|
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
||||||
|
logfile_maxbytes = 20MB
|
||||||
|
logfile_backups = 2
|
||||||
|
|
||||||
|
[unix_http_server]
|
||||||
|
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||||
|
|
||||||
|
[rpcinterface:supervisor]
|
||||||
|
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||||
|
|
||||||
|
[supervisorctl]
|
||||||
|
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||||
|
|
||||||
|
[program:nginx]
|
||||||
|
directory=%(ENV_SUP_RUN_DIR)s
|
||||||
|
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
||||||
|
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||||
|
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
||||||
|
stdout_logfile_maxbytes=100MB
|
||||||
|
stdout_logfile_backups=7
|
||||||
|
stderr_logfile_maxbytes=100MB
|
||||||
|
stderr_logfile_backups=7
|
||||||
|
autorestart=true
|
||||||
|
autostart=true
|
||||||
|
user=root
|
||||||
|
|
||||||
|
[program:sproxyd]
|
||||||
|
directory=%(ENV_SUP_RUN_DIR)s
|
||||||
|
process_name=%(program_name)s-%(process_num)s
|
||||||
|
numprocs=1
|
||||||
|
numprocs_start=0
|
||||||
|
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
||||||
|
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||||
|
stdout_logfile_maxbytes=100MB
|
||||||
|
stdout_logfile_backups=7
|
||||||
|
redirect_stderr=true
|
||||||
|
autorestart=true
|
||||||
|
autostart=true
|
||||||
|
user=root
|
|
@ -20,13 +20,16 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
- name: Render and test ${{ matrix.tests.name }}
|
||||||
uses: scality/action-prom-render-test@1.0.1
|
uses: scality/action-prom-render-test@1.0.3
|
||||||
with:
|
with:
|
||||||
alert_file_path: monitoring/alerts.yaml
|
alert_file_path: monitoring/alerts.yaml
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
test_file_path: ${{ matrix.tests.file }}
|
||||||
alert_inputs: >-
|
alert_inputs: |
|
||||||
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
|
namespace=zenko
|
||||||
|
service=artesca-data-connector-s3api-metrics
|
||||||
|
reportJob=artesca-data-ops-report-handler
|
||||||
|
replicas=3
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
@ -14,12 +14,12 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v3
|
||||||
with:
|
with:
|
||||||
languages: javascript, python, ruby
|
languages: javascript, python, ruby
|
||||||
|
|
||||||
- name: Build and analyze
|
- name: Build and analyze
|
||||||
uses: github/codeql-action/analyze@v2
|
uses: github/codeql-action/analyze@v3
|
||||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
uses: actions/dependency-review-action@v3
|
uses: actions/dependency-review-action@v4
|
||||||
|
|
|
@ -10,58 +10,69 @@ on:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
REGISTRY_NAME: registry.scality.com
|
|
||||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
PROJECT_NAME: ${{ github.event.repository.name }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-federation-image:
|
build-federation-image:
|
||||||
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
runs-on: ubuntu-20.04
|
||||||
secrets: inherit
|
steps:
|
||||||
with:
|
- name: Checkout
|
||||||
push: true
|
uses: actions/checkout@v4
|
||||||
registry: registry.scality.com
|
- name: Set up Docker Buildx
|
||||||
namespace: ${{ github.event.repository.name }}
|
uses: docker/setup-buildx-action@v3
|
||||||
name: ${{ github.event.repository.name }}
|
- name: Login to GitHub Registry
|
||||||
context: .
|
uses: docker/login-action@v3
|
||||||
file: images/svc-base/Dockerfile
|
with:
|
||||||
tag: ${{ github.event.inputs.tag }}-svc-base
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
- name: Build and push image for federation
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .
|
||||||
|
file: images/svc-base/Dockerfile
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
||||||
|
cache-from: type=gha,scope=federation
|
||||||
|
cache-to: type=gha,mode=max,scope=federation
|
||||||
|
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Docker Buildk
|
- name: Set up Docker Buildk
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Login to Registry
|
- name: Login to Registry
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY_NAME }}
|
registry: ghcr.io
|
||||||
username: ${{ secrets.REGISTRY_LOGIN }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
password: ${{ github.token }}
|
||||||
|
|
||||||
- name: Push dashboards into the production namespace
|
- name: Push dashboards into the production namespace
|
||||||
run: |
|
run: |
|
||||||
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
||||||
dashboard.json:application/grafana-dashboard+json \
|
dashboard.json:application/grafana-dashboard+json \
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
alerts.yaml:application/prometheus-alerts+yaml
|
||||||
working-directory: monitoring
|
working-directory: monitoring
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
|
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
with:
|
with:
|
||||||
name: Release ${{ github.event.inputs.tag }}
|
name: Release ${{ github.event.inputs.tag }}
|
||||||
tag_name: ${{ github.event.inputs.tag }}
|
tag_name: ${{ github.event.inputs.tag }}
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
name: tests
|
name: tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
push:
|
push:
|
||||||
branches-ignore:
|
branches-ignore:
|
||||||
- 'development/**'
|
- 'development/**'
|
||||||
|
@ -65,23 +67,24 @@ env:
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
REPORT_TOKEN: "report-token-1"
|
REPORT_TOKEN: "report-token-1"
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
|
# https://github.com/git-lfs/git-lfs/issues/5749
|
||||||
|
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
||||||
jobs:
|
jobs:
|
||||||
linting-coverage:
|
linting-coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v2
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: yarn
|
cache: yarn
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- uses: actions/cache@v2
|
- uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
|
@ -114,7 +117,7 @@ jobs:
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||||
if: always()
|
if: always()
|
||||||
- name: Upload files to artifacts
|
- name: Upload files to artifacts
|
||||||
uses: scality/action-artifacts@v2
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -125,61 +128,88 @@ jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1.6.0
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v1.10.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ github.token }}
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
registry: registry.scality.com
|
|
||||||
username: ${{ secrets.REGISTRY_LOGIN }}
|
|
||||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
|
||||||
- name: Build and push cloudserver image
|
- name: Build and push cloudserver image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
context: .
|
context: .
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
|
labels: |
|
||||||
|
git.repository=${{ github.repository }}
|
||||||
|
git.commit-sha=${{ github.sha }}
|
||||||
cache-from: type=gha,scope=cloudserver
|
cache-from: type=gha,scope=cloudserver
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
|
- name: Build and push pykmip image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .github/pykmip
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||||
|
labels: |
|
||||||
|
git.repository=${{ github.repository }}
|
||||||
|
git.commit-sha=${{ github.sha }}
|
||||||
|
cache-from: type=gha,scope=pykmip
|
||||||
|
cache-to: type=gha,mode=max,scope=pykmip
|
||||||
|
- name: Build and push MongoDB
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .github/docker/mongodb
|
||||||
|
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
cache-from: type=gha,scope=mongodb
|
||||||
|
cache-to: type=gha,mode=max,scope=mongodb
|
||||||
|
|
||||||
multiple-backend:
|
multiple-backend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
S3DATA: multiple
|
S3DATA: multiple
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose up -d
|
run: docker compose --profile sproxyd up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run multiple backend test
|
- name: Run multiple backend test
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
bash wait_for_local_port.bash 81 40
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -198,11 +228,12 @@ jobs:
|
||||||
S3KMS: file
|
S3KMS: file
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
DEFAULT_BUCKET_KEY_FORMAT: v0
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -216,7 +247,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -236,11 +267,12 @@ jobs:
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
DEFAULT_BUCKET_KEY_FORMAT: v1
|
||||||
METADATA_MAX_CACHED_BUCKETS: 1
|
METADATA_MAX_CACHED_BUCKETS: 1
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -255,7 +287,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -275,12 +307,13 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
JOB_NAME: ${{ matrix.job-name }}
|
JOB_NAME: ${{ matrix.job-name }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup matrix job artifacts directory
|
- name: Setup matrix job artifacts directory
|
||||||
|
@ -297,7 +330,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -311,13 +344,14 @@ jobs:
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
ENABLE_UTAPI_V2: t
|
ENABLE_UTAPI_V2: t
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -329,7 +363,51 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
quota-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
inflights:
|
||||||
|
- name: "With Inflights"
|
||||||
|
value: "true"
|
||||||
|
- name: "Without Inflights"
|
||||||
|
value: "false"
|
||||||
|
env:
|
||||||
|
S3METADATA: mongodb
|
||||||
|
S3BACKEND: mem
|
||||||
|
S3QUOTA: scuba
|
||||||
|
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
||||||
|
SCUBA_HOST: localhost
|
||||||
|
SCUBA_PORT: 8100
|
||||||
|
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile mongo up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run quota tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -345,11 +423,13 @@ jobs:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Copy KMIP certs
|
- name: Copy KMIP certs
|
||||||
|
@ -365,7 +445,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 5696 40
|
bash wait_for_local_port.bash 5696 40
|
||||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -373,7 +453,7 @@ jobs:
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
source: /tmp/artifacts
|
source: /tmp/artifacts
|
||||||
if: always()
|
if: always()
|
||||||
|
|
||||||
ceph-backend-test:
|
ceph-backend-test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
|
@ -384,17 +464,18 @@ jobs:
|
||||||
CI_CEPH: 'true'
|
CI_CEPH: 'true'
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v1.10.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ github.token }}
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- uses: ruby/setup-ruby@v1
|
- uses: ruby/setup-ruby@v1
|
||||||
|
@ -442,7 +523,7 @@ jobs:
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
S3METADATA: mongodb
|
S3METADATA: mongodb
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v3
|
uses: scality/action-artifacts@v4
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
ARG NODE_VERSION=16.17.1-bullseye-slim
|
ARG NODE_VERSION=16.20-bullseye-slim
|
||||||
|
|
||||||
FROM node:${NODE_VERSION} as builder
|
FROM node:${NODE_VERSION} as builder
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ RUN apt-get update \
|
||||||
|
|
||||||
ENV PYTHON=python3
|
ENV PYTHON=python3
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
COPY package.json yarn.lock /usr/src/app/
|
||||||
|
RUN npm install typescript -g
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -42,6 +43,7 @@ EXPOSE 8002
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
jq \
|
jq \
|
||||||
|
tini \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
@ -53,6 +55,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
||||||
|
|
||||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD [ "yarn", "start" ]
|
CMD [ "yarn", "start" ]
|
||||||
|
|
175
README.md
175
README.md
|
@ -1,10 +1,7 @@
|
||||||
# Zenko CloudServer
|
# Zenko CloudServer with Vitastor Backend
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
|
|
||||||
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -14,137 +11,71 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
CloudServer is useful for Developers, either to run as part of a
|
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
||||||
continous integration test environment to emulate the AWS S3 service locally
|
backend support.
|
||||||
or as an abstraction layer to develop object storage enabled
|
|
||||||
application on the go.
|
|
||||||
|
|
||||||
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
## Quick Start with Vitastor
|
||||||
|
|
||||||
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
Vitastor Backend is in experimental status, however you can already try to
|
||||||
|
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
||||||
|
it works too 😊.
|
||||||
|
|
||||||
## Docker
|
Installation instructions:
|
||||||
|
|
||||||
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
|
### Install Vitastor
|
||||||
|
|
||||||
## Contributing
|
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
||||||
|
|
||||||
In order to contribute, please follow the
|
### Install Zenko with Vitastor Backend
|
||||||
[Contributing Guidelines](
|
|
||||||
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
|
||||||
|
|
||||||
## Installation
|
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
||||||
|
- Install dependencies: `npm install --omit dev` or just `npm install`
|
||||||
|
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
||||||
|
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
||||||
|
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
||||||
|
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
||||||
|
|
||||||
### Dependencies
|
### Install and Configure MongoDB
|
||||||
|
|
||||||
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
||||||
. Up-to-date versions can be found at
|
|
||||||
[Nodesource](https://github.com/nodesource/distributions).
|
|
||||||
|
|
||||||
### Clone source code
|
### Setup Zenko
|
||||||
|
|
||||||
```shell
|
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
||||||
git clone https://github.com/scality/S3.git
|
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
||||||
|
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
||||||
|
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
||||||
|
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
||||||
|
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
||||||
|
access keys, but it's not published, so let's use a file for now.
|
||||||
|
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
||||||
|
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
||||||
|
in this file.
|
||||||
|
|
||||||
|
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
||||||
|
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
||||||
|
|
||||||
|
### Start Zenko
|
||||||
|
|
||||||
|
Start the S3 server with: `node index.js`
|
||||||
|
|
||||||
|
If you use default settings, Zenko CloudServer starts on port 8000.
|
||||||
|
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
||||||
|
|
||||||
|
Now you can access your S3 with `s3cmd` or `geesefs`:
|
||||||
|
|
||||||
|
```
|
||||||
|
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install js dependencies
|
```
|
||||||
|
AWS_ACCESS_KEY_ID=accessKey1 \
|
||||||
Go to the ./S3 folder,
|
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
||||||
|
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
||||||
```shell
|
|
||||||
yarn install --frozen-lockfile
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you get an error regarding installation of the diskUsage module,
|
# Author & License
|
||||||
please install g++.
|
|
||||||
|
|
||||||
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
||||||
```shell
|
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
||||||
yarn cache clean
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run it with a file backend
|
|
||||||
|
|
||||||
```shell
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
|
||||||
9991 are also open locally for internal transfer of metadata and data,
|
|
||||||
respectively.
|
|
||||||
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
By default the metadata files will be saved in the
|
|
||||||
localMetadata directory and the data files will be saved
|
|
||||||
in the localData directory within the ./S3 directory on your
|
|
||||||
machine. These directories have been pre-created within the
|
|
||||||
repository. If you would like to save the data or metadata in
|
|
||||||
different locations of your choice, you must specify them with absolute paths.
|
|
||||||
So, when starting the server:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
|
||||||
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
|
||||||
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
|
||||||
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run it with multiple data backends
|
|
||||||
|
|
||||||
```shell
|
|
||||||
export S3DATA='multiple'
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
With multiple backends, you have the ability to
|
|
||||||
choose where each object will be saved by setting
|
|
||||||
the following header with a locationConstraint on
|
|
||||||
a PUT request:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
|
||||||
```
|
|
||||||
|
|
||||||
If no header is sent with a PUT object request, the
|
|
||||||
location constraint of the bucket will determine
|
|
||||||
where the data is saved. If the bucket has no location
|
|
||||||
constraint, the endpoint of the PUT request will be
|
|
||||||
used to determine location.
|
|
||||||
|
|
||||||
See the Configuration section in our documentation
|
|
||||||
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
|
||||||
to learn how to set location constraints.
|
|
||||||
|
|
||||||
## Run it with an in-memory backend
|
|
||||||
|
|
||||||
```shell
|
|
||||||
yarn run mem_backend
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
## Run it with Vault user management
|
|
||||||
|
|
||||||
Note: Vault is proprietary and must be accessed separately.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
export S3VAULT=vault
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer using Vault for user management.
|
|
||||||
|
|
||||||
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
|
||||||
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
|
||||||
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
|
||||||
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const {
|
|
||||||
startWSManagementClient,
|
|
||||||
startPushConnectionHealthCheckServer,
|
|
||||||
} = require('../lib/management/push');
|
|
||||||
|
|
||||||
const logger = require('../lib/utilities/logger');
|
|
||||||
|
|
||||||
const {
|
|
||||||
PUSH_ENDPOINT: pushEndpoint,
|
|
||||||
INSTANCE_ID: instanceId,
|
|
||||||
MANAGEMENT_TOKEN: managementToken,
|
|
||||||
} = process.env;
|
|
||||||
|
|
||||||
if (!pushEndpoint) {
|
|
||||||
logger.error('missing push endpoint env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!instanceId) {
|
|
||||||
logger.error('missing instance id env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!managementToken) {
|
|
||||||
logger.error('missing management token env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
startPushConnectionHealthCheckServer(err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('could not start healthcheck server', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
|
|
||||||
startWSManagementClient(url, managementToken, err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('connection failed, exiting', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
logger.info('no more connection, exiting');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,46 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const {
|
|
||||||
startWSManagementClient,
|
|
||||||
startPushConnectionHealthCheckServer,
|
|
||||||
} = require('../lib/management/push');
|
|
||||||
|
|
||||||
const logger = require('../lib/utilities/logger');
|
|
||||||
|
|
||||||
const {
|
|
||||||
PUSH_ENDPOINT: pushEndpoint,
|
|
||||||
INSTANCE_ID: instanceId,
|
|
||||||
MANAGEMENT_TOKEN: managementToken,
|
|
||||||
} = process.env;
|
|
||||||
|
|
||||||
if (!pushEndpoint) {
|
|
||||||
logger.error('missing push endpoint env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!instanceId) {
|
|
||||||
logger.error('missing instance id env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!managementToken) {
|
|
||||||
logger.error('missing management token env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
startPushConnectionHealthCheckServer(err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('could not start healthcheck server', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
|
|
||||||
startWSManagementClient(url, managementToken, err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('connection failed, exiting', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
logger.info('no more connection, exiting');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -4,6 +4,7 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
|
"workers": 4,
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -101,6 +102,14 @@
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
},
|
||||||
|
"authdata": "authdata.json",
|
||||||
|
"backends": {
|
||||||
|
"auth": "file",
|
||||||
|
"data": "file",
|
||||||
|
"metadata": "mongodb",
|
||||||
|
"kms": "file",
|
||||||
|
"quota": "none"
|
||||||
|
},
|
||||||
"externalBackends": {
|
"externalBackends": {
|
||||||
"aws_s3": {
|
"aws_s3": {
|
||||||
"httpAgent": {
|
"httpAgent": {
|
|
@ -0,0 +1,71 @@
|
||||||
|
{
|
||||||
|
"port": 8000,
|
||||||
|
"listenOn": [],
|
||||||
|
"metricsPort": 8002,
|
||||||
|
"metricsListenOn": [],
|
||||||
|
"replicationGroupId": "RG001",
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "STANDARD",
|
||||||
|
"127.0.0.1": "STANDARD",
|
||||||
|
"yourhostname.ru": "STANDARD"
|
||||||
|
},
|
||||||
|
"websiteEndpoints": [
|
||||||
|
"static.yourhostname.ru"
|
||||||
|
],
|
||||||
|
"replicationEndpoints": [ {
|
||||||
|
"site": "zenko",
|
||||||
|
"servers": ["127.0.0.1:8000"],
|
||||||
|
"default": true
|
||||||
|
} ],
|
||||||
|
"log": {
|
||||||
|
"logLevel": "info",
|
||||||
|
"dumpLevel": "error"
|
||||||
|
},
|
||||||
|
"healthChecks": {
|
||||||
|
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||||
|
},
|
||||||
|
"backends": {
|
||||||
|
"metadata": "mongodb"
|
||||||
|
},
|
||||||
|
"mongodb": {
|
||||||
|
"replicaSetHosts": "127.0.0.1:27017",
|
||||||
|
"writeConcern": "majority",
|
||||||
|
"replicaSet": "rs0",
|
||||||
|
"readPreference": "primary",
|
||||||
|
"database": "s3",
|
||||||
|
"authCredentials": {
|
||||||
|
"username": "s3",
|
||||||
|
"password": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"externalBackends": {
|
||||||
|
"aws_s3": {
|
||||||
|
"httpAgent": {
|
||||||
|
"keepAlive": false,
|
||||||
|
"keepAliveMsecs": 1000,
|
||||||
|
"maxFreeSockets": 256,
|
||||||
|
"maxSockets": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gcp": {
|
||||||
|
"httpAgent": {
|
||||||
|
"keepAlive": true,
|
||||||
|
"keepAliveMsecs": 1000,
|
||||||
|
"maxFreeSockets": 256,
|
||||||
|
"maxSockets": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"requests": {
|
||||||
|
"viaProxy": false,
|
||||||
|
"trustedProxyCIDRs": [],
|
||||||
|
"extractClientIPFromHeader": ""
|
||||||
|
},
|
||||||
|
"bucketNotificationDestinations": [
|
||||||
|
{
|
||||||
|
"resource": "target1",
|
||||||
|
"type": "dummy",
|
||||||
|
"host": "localhost:6000"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
10
constants.js
10
constants.js
|
@ -116,7 +116,7 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
objectLocationConstraintHeader: 'x-amz-storage-class',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
// declare here all existing service accounts and their properties
|
||||||
|
@ -130,7 +130,7 @@ const constants = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true },
|
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
||||||
// some of the available data backends (if called directly rather
|
// some of the available data backends (if called directly rather
|
||||||
// than through the multiple backend gateway) need a key provided
|
// than through the multiple backend gateway) need a key provided
|
||||||
// as a string as first parameter of the get/delete methods.
|
// as a string as first parameter of the get/delete methods.
|
||||||
|
@ -205,9 +205,6 @@ const constants = {
|
||||||
],
|
],
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||||
validStorageClasses: [
|
|
||||||
'STANDARD',
|
|
||||||
],
|
|
||||||
lifecycleListing: {
|
lifecycleListing: {
|
||||||
CURRENT_TYPE: 'current',
|
CURRENT_TYPE: 'current',
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
NON_CURRENT_TYPE: 'noncurrent',
|
||||||
|
@ -243,6 +240,9 @@ const constants = {
|
||||||
'objectPutPart',
|
'objectPutPart',
|
||||||
'completeMultipartUpload',
|
'completeMultipartUpload',
|
||||||
],
|
],
|
||||||
|
// if requester is not bucket owner, bucket policy actions should be denied with
|
||||||
|
// MethodNotAllowed error
|
||||||
|
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -2,11 +2,12 @@
|
||||||
|
|
||||||
## Docker Image Generation
|
## Docker Image Generation
|
||||||
|
|
||||||
Docker images are hosted on [registry.scality.com](registry.scality.com).
|
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
||||||
CloudServer has two namespaces there:
|
CloudServer has a few images there:
|
||||||
|
|
||||||
* Production Namespace: registry.scality.com/cloudserver
|
* Cloudserver container image: ghcr.io/scality/cloudserver
|
||||||
* Dev Namespace: registry.scality.com/cloudserver-dev
|
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||||
|
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
With every CI build, the CI will push images, tagging the
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
content with the developer branch's short SHA-1 commit hash.
|
||||||
|
@ -18,8 +19,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
|
||||||
## How to Pull Docker Images
|
## How to Pull Docker Images
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
|
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
||||||
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
|
docker pull ghcr.io/scality/cloudserver:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Release Process
|
## Release Process
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
|
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
||||||
|
|
||||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||||
|
@ -14,8 +14,10 @@ RUN rm -f ~/.gitconfig && \
|
||||||
git config --global --add safe.directory . && \
|
git config --global --add safe.directory . && \
|
||||||
git lfs install && \
|
git lfs install && \
|
||||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||||
|
yarn global add typescript && \
|
||||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||||
yarn cache clean --all
|
yarn cache clean --all && \
|
||||||
|
yarn global remove typescript
|
||||||
|
|
||||||
# run symlinking separately to avoid yarn installation errors
|
# run symlinking separately to avoid yarn installation errors
|
||||||
# we might have to check if the symlinking is really needed!
|
# we might have to check if the symlinking is really needed!
|
||||||
|
|
12
index.js
12
index.js
|
@ -1,10 +1,10 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
/**
|
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
||||||
* Catch uncaught exceptions and add timestamp to aid debugging
|
undefined,
|
||||||
*/
|
// Do not exit as workers have their own listener that will exit
|
||||||
process.on('uncaughtException', err => {
|
// But primary don't have another listener
|
||||||
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
|
require('cluster').isPrimary ? 1 : null,
|
||||||
});
|
);
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
406
lib/Config.js
406
lib/Config.js
|
@ -8,15 +8,17 @@ const crypto = require('crypto');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const cronParser = require('cron-parser');
|
const cronParser = require('cron-parser');
|
||||||
const joi = require('@hapi/joi');
|
const joi = require('@hapi/joi');
|
||||||
|
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
|
||||||
const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
|
const { isValidBucketName } = s3routes.routesUtils;
|
||||||
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
|
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
|
||||||
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
||||||
const validExternalBackends = require('../constants').externalBackends;
|
const validExternalBackends = require('../constants').externalBackends;
|
||||||
const { azureAccountNameRegex, base64Regex,
|
const { azureAccountNameRegex, base64Regex,
|
||||||
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
||||||
} = require('../constants');
|
} = require('../constants');
|
||||||
const { utapiVersion } = require('utapi');
|
const { utapiVersion } = require('utapi');
|
||||||
|
const { scaleMsPerDay } = s3middleware.objectUtils;
|
||||||
|
|
||||||
const constants = require('../constants');
|
const constants = require('../constants');
|
||||||
|
|
||||||
// config paths
|
// config paths
|
||||||
|
@ -105,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
|
||||||
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function parseRedisConfig(redisConfig) {
|
||||||
|
const joiSchema = joi.object({
|
||||||
|
password: joi.string().allow(''),
|
||||||
|
host: joi.string(),
|
||||||
|
port: joi.number(),
|
||||||
|
retry: joi.object({
|
||||||
|
connectBackoff: joi.object({
|
||||||
|
min: joi.number().required(),
|
||||||
|
max: joi.number().required(),
|
||||||
|
jitter: joi.number().required(),
|
||||||
|
factor: joi.number().required(),
|
||||||
|
deadline: joi.number().required(),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
// sentinel config
|
||||||
|
sentinels: joi.alternatives().try(
|
||||||
|
joi.string()
|
||||||
|
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
|
||||||
|
.custom(hosts => hosts.split(',').map(item => {
|
||||||
|
const [host, port] = item.split(':');
|
||||||
|
return { host, port: Number.parseInt(port, 10) };
|
||||||
|
})),
|
||||||
|
joi.array().items(
|
||||||
|
joi.object({
|
||||||
|
host: joi.string().required(),
|
||||||
|
port: joi.number().required(),
|
||||||
|
})
|
||||||
|
).min(1),
|
||||||
|
),
|
||||||
|
name: joi.string(),
|
||||||
|
sentinelPassword: joi.string().allow(''),
|
||||||
|
})
|
||||||
|
.and('host', 'port')
|
||||||
|
.and('sentinels', 'name')
|
||||||
|
.xor('host', 'sentinels')
|
||||||
|
.without('sentinels', ['host', 'port'])
|
||||||
|
.without('host', ['sentinels', 'sentinelPassword']);
|
||||||
|
|
||||||
|
return joi.attempt(redisConfig, joiSchema, 'bad config');
|
||||||
|
}
|
||||||
|
|
||||||
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
assert(typeof restEndpoints === 'object',
|
assert(typeof restEndpoints === 'object',
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
@ -237,6 +280,60 @@ function hdClientLocationConstraintAssert(configHd) {
|
||||||
return hdclientFields;
|
return hdclientFields;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function azureArchiveLocationConstraintAssert(locationObj) {
|
||||||
|
const checkedFields = [
|
||||||
|
'azureContainerName',
|
||||||
|
'azureStorageEndpoint',
|
||||||
|
];
|
||||||
|
if (Object.keys(locationObj.details).length === 0 ||
|
||||||
|
!checkedFields.every(field => field in locationObj.details)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const {
|
||||||
|
azureContainerName,
|
||||||
|
azureStorageEndpoint,
|
||||||
|
} = locationObj.details;
|
||||||
|
const stringFields = [
|
||||||
|
azureContainerName,
|
||||||
|
azureStorageEndpoint,
|
||||||
|
];
|
||||||
|
stringFields.forEach(field => {
|
||||||
|
assert(typeof field === 'string',
|
||||||
|
`bad config: ${field} must be a string`);
|
||||||
|
});
|
||||||
|
|
||||||
|
let hasAuthMethod = false;
|
||||||
|
if (locationObj.details.sasToken !== undefined) {
|
||||||
|
assert(typeof locationObj.details.sasToken === 'string',
|
||||||
|
`bad config: ${locationObj.details.sasToken} must be a string`);
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (locationObj.details.azureStorageAccountName !== undefined &&
|
||||||
|
locationObj.details.azureStorageAccessKey !== undefined) {
|
||||||
|
assert(typeof locationObj.details.azureStorageAccountName === 'string',
|
||||||
|
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
|
||||||
|
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
|
||||||
|
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
|
||||||
|
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (locationObj.details.tenantId !== undefined &&
|
||||||
|
locationObj.details.clientId !== undefined &&
|
||||||
|
locationObj.details.clientKey !== undefined) {
|
||||||
|
assert(typeof locationObj.details.tenantId === 'string',
|
||||||
|
`bad config: ${locationObj.details.tenantId} must be a string`);
|
||||||
|
assert(typeof locationObj.details.clientId === 'string',
|
||||||
|
`bad config: ${locationObj.details.clientId} must be a string`);
|
||||||
|
assert(typeof locationObj.details.clientKey === 'string',
|
||||||
|
`bad config: ${locationObj.details.clientKey} must be a string`);
|
||||||
|
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
assert(hasAuthMethod, 'Missing authentication method');
|
||||||
|
}
|
||||||
|
|
||||||
function dmfLocationConstraintAssert(locationObj) {
|
function dmfLocationConstraintAssert(locationObj) {
|
||||||
const checkedFields = [
|
const checkedFields = [
|
||||||
'endpoint',
|
'endpoint',
|
||||||
|
@ -280,7 +377,7 @@ function dmfLocationConstraintAssert(locationObj) {
|
||||||
function locationConstraintAssert(locationConstraints) {
|
function locationConstraintAssert(locationConstraints) {
|
||||||
const supportedBackends =
|
const supportedBackends =
|
||||||
['mem', 'file', 'scality',
|
['mem', 'file', 'scality',
|
||||||
'mongodb', 'dmf'].concat(Object.keys(validExternalBackends));
|
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
|
||||||
assert(typeof locationConstraints === 'object',
|
assert(typeof locationConstraints === 'object',
|
||||||
'bad config: locationConstraints must be an object');
|
'bad config: locationConstraints must be an object');
|
||||||
Object.keys(locationConstraints).forEach(l => {
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
@ -391,6 +488,9 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
if (locationConstraints[l].type === 'dmf') {
|
if (locationConstraints[l].type === 'dmf') {
|
||||||
dmfLocationConstraintAssert(locationConstraints[l]);
|
dmfLocationConstraintAssert(locationConstraints[l]);
|
||||||
}
|
}
|
||||||
|
if (locationConstraints[l].type === 'azure_archive') {
|
||||||
|
azureArchiveLocationConstraintAssert(locationConstraints[l]);
|
||||||
|
}
|
||||||
if (locationConstraints[l].type === 'pfs') {
|
if (locationConstraints[l].type === 'pfs') {
|
||||||
assert(typeof details.pfsDaemonEndpoint === 'object',
|
assert(typeof details.pfsDaemonEndpoint === 'object',
|
||||||
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
||||||
|
@ -402,27 +502,23 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
locationConstraints[l].details.connector.hdclient);
|
locationConstraints[l].details.connector.hdclient);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
assert(Object.keys(locationConstraints)
|
|
||||||
.includes('us-east-1'), 'bad locationConfig: must ' +
|
|
||||||
'include us-east-1 as a locationConstraint');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseUtapiReindex(config) {
|
function parseUtapiReindex(config) {
|
||||||
const {
|
const {
|
||||||
enabled,
|
enabled,
|
||||||
schedule,
|
schedule,
|
||||||
sentinel,
|
redis,
|
||||||
bucketd,
|
bucketd,
|
||||||
onlyCountLatestWhenObjectLocked,
|
onlyCountLatestWhenObjectLocked,
|
||||||
} = config;
|
} = config;
|
||||||
assert(typeof enabled === 'boolean',
|
assert(typeof enabled === 'boolean',
|
||||||
'bad config: utapi.reindex.enabled must be a boolean');
|
'bad config: utapi.reindex.enabled must be a boolean');
|
||||||
assert(typeof sentinel === 'object',
|
|
||||||
'bad config: utapi.reindex.sentinel must be an object');
|
const parsedRedis = parseRedisConfig(redis);
|
||||||
assert(typeof sentinel.port === 'number',
|
assert(Array.isArray(parsedRedis.sentinels),
|
||||||
'bad config: utapi.reindex.sentinel.port must be a number');
|
'bad config: utapi reindex redis config requires a list of sentinels');
|
||||||
assert(typeof sentinel.name === 'string',
|
|
||||||
'bad config: utapi.reindex.sentinel.name must be a string');
|
|
||||||
assert(typeof bucketd === 'object',
|
assert(typeof bucketd === 'object',
|
||||||
'bad config: utapi.reindex.bucketd must be an object');
|
'bad config: utapi.reindex.bucketd must be an object');
|
||||||
assert(typeof bucketd.port === 'number',
|
assert(typeof bucketd.port === 'number',
|
||||||
|
@ -440,6 +536,13 @@ function parseUtapiReindex(config) {
|
||||||
'bad config: utapi.reindex.schedule must be a valid ' +
|
'bad config: utapi.reindex.schedule must be a valid ' +
|
||||||
`cron schedule. ${e.message}.`);
|
`cron schedule. ${e.message}.`);
|
||||||
}
|
}
|
||||||
|
return {
|
||||||
|
enabled,
|
||||||
|
schedule,
|
||||||
|
redis: parsedRedis,
|
||||||
|
bucketd,
|
||||||
|
onlyCountLatestWhenObjectLocked,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
|
@ -527,7 +630,6 @@ class Config extends EventEmitter {
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getLocationConfig();
|
this._getLocationConfig();
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
this._configureBackends();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_getLocationConfig() {
|
_getLocationConfig() {
|
||||||
|
@ -739,11 +841,11 @@ class Config extends EventEmitter {
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.clusters = false;
|
this.workers = false;
|
||||||
if (config.clusters !== undefined) {
|
if (config.workers !== undefined) {
|
||||||
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
assert(Number.isInteger(config.workers) && config.workers > 0,
|
||||||
'bad config: clusters must be a positive integer');
|
'bad config: workers must be a positive integer');
|
||||||
this.clusters = config.clusters;
|
this.workers = config.workers;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
|
@ -981,8 +1083,7 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.localCache.port === 'number',
|
assert(typeof config.localCache.port === 'number',
|
||||||
'config: bad port for localCache. port must be a number');
|
'config: bad port for localCache. port must be a number');
|
||||||
if (config.localCache.password !== undefined) {
|
if (config.localCache.password !== undefined) {
|
||||||
assert(
|
assert(typeof config.localCache.password === 'string',
|
||||||
this._verifyRedisPassword(config.localCache.password),
|
|
||||||
'config: vad password for localCache. password must' +
|
'config: vad password for localCache. password must' +
|
||||||
' be a string');
|
' be a string');
|
||||||
}
|
}
|
||||||
|
@ -1008,56 +1109,46 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.redis) {
|
if (config.redis) {
|
||||||
if (config.redis.sentinels) {
|
this.redis = parseRedisConfig(config.redis);
|
||||||
this.redis = { sentinels: [], name: null };
|
}
|
||||||
|
if (config.scuba) {
|
||||||
assert(typeof config.redis.name === 'string',
|
this.scuba = {};
|
||||||
'bad config: redis sentinel name must be a string');
|
if (config.scuba.host) {
|
||||||
this.redis.name = config.redis.name;
|
assert(typeof config.scuba.host === 'string',
|
||||||
assert(Array.isArray(config.redis.sentinels) ||
|
'bad config: scuba host must be a string');
|
||||||
typeof config.redis.sentinels === 'string',
|
this.scuba.host = config.scuba.host;
|
||||||
'bad config: redis sentinels must be an array or string');
|
|
||||||
|
|
||||||
if (typeof config.redis.sentinels === 'string') {
|
|
||||||
config.redis.sentinels.split(',').forEach(item => {
|
|
||||||
const [host, port] = item.split(':');
|
|
||||||
this.redis.sentinels.push({ host,
|
|
||||||
port: Number.parseInt(port, 10) });
|
|
||||||
});
|
|
||||||
} else if (Array.isArray(config.redis.sentinels)) {
|
|
||||||
config.redis.sentinels.forEach(item => {
|
|
||||||
const { host, port } = item;
|
|
||||||
assert(typeof host === 'string',
|
|
||||||
'bad config: redis sentinel host must be a string');
|
|
||||||
assert(typeof port === 'number',
|
|
||||||
'bad config: redis sentinel port must be a number');
|
|
||||||
this.redis.sentinels.push({ host, port });
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config.redis.sentinelPassword !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(config.redis.sentinelPassword));
|
|
||||||
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// check for standalone configuration
|
|
||||||
this.redis = {};
|
|
||||||
assert(typeof config.redis.host === 'string',
|
|
||||||
'bad config: redis.host must be a string');
|
|
||||||
assert(typeof config.redis.port === 'number',
|
|
||||||
'bad config: redis.port must be a number');
|
|
||||||
this.redis.host = config.redis.host;
|
|
||||||
this.redis.port = config.redis.port;
|
|
||||||
}
|
}
|
||||||
if (config.redis.password !== undefined) {
|
if (config.scuba.port) {
|
||||||
assert(
|
assert(Number.isInteger(config.scuba.port)
|
||||||
this._verifyRedisPassword(config.redis.password),
|
&& config.scuba.port > 0,
|
||||||
'bad config: invalid password for redis. password must ' +
|
'bad config: scuba port must be a positive integer');
|
||||||
'be a string');
|
this.scuba.port = config.scuba.port;
|
||||||
this.redis.password = config.redis.password;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
|
||||||
|
assert(typeof process.env.SCUBA_HOST === 'string',
|
||||||
|
'bad config: scuba host must be a string');
|
||||||
|
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
|
||||||
|
&& Number(process.env.SCUBA_PORT) > 0,
|
||||||
|
'bad config: scuba port must be a positive integer');
|
||||||
|
this.scuba = {
|
||||||
|
host: process.env.SCUBA_HOST,
|
||||||
|
port: Number(process.env.SCUBA_PORT),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if (this.scuba) {
|
||||||
|
this.quotaEnabled = true;
|
||||||
|
}
|
||||||
|
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
|
||||||
|
config.quota?.maxStatenessMS ||
|
||||||
|
24 * 60 * 60 * 1000;
|
||||||
|
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
|
||||||
|
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
|
||||||
|
config.quota?.enableInflights || false;
|
||||||
|
this.quota = {
|
||||||
|
maxStaleness,
|
||||||
|
enableInflights,
|
||||||
|
};
|
||||||
if (config.utapi) {
|
if (config.utapi) {
|
||||||
this.utapi = { component: 's3' };
|
this.utapi = { component: 's3' };
|
||||||
if (config.utapi.host) {
|
if (config.utapi.host) {
|
||||||
|
@ -1086,50 +1177,8 @@ class Config extends EventEmitter {
|
||||||
assert(config.redis, 'missing required property of utapi ' +
|
assert(config.redis, 'missing required property of utapi ' +
|
||||||
'configuration: redis');
|
'configuration: redis');
|
||||||
if (config.utapi.redis) {
|
if (config.utapi.redis) {
|
||||||
if (config.utapi.redis.sentinels) {
|
this.utapi.redis = parseRedisConfig(config.utapi.redis);
|
||||||
this.utapi.redis = { sentinels: [], name: null };
|
if (this.utapi.redis.retry === undefined) {
|
||||||
|
|
||||||
assert(typeof config.utapi.redis.name === 'string',
|
|
||||||
'bad config: redis sentinel name must be a string');
|
|
||||||
this.utapi.redis.name = config.utapi.redis.name;
|
|
||||||
|
|
||||||
assert(Array.isArray(config.utapi.redis.sentinels),
|
|
||||||
'bad config: redis sentinels must be an array');
|
|
||||||
config.utapi.redis.sentinels.forEach(item => {
|
|
||||||
const { host, port } = item;
|
|
||||||
assert(typeof host === 'string',
|
|
||||||
'bad config: redis sentinel host must be a string');
|
|
||||||
assert(typeof port === 'number',
|
|
||||||
'bad config: redis sentinel port must be a number');
|
|
||||||
this.utapi.redis.sentinels.push({ host, port });
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// check for standalone configuration
|
|
||||||
this.utapi.redis = {};
|
|
||||||
assert(typeof config.utapi.redis.host === 'string',
|
|
||||||
'bad config: redis.host must be a string');
|
|
||||||
assert(typeof config.utapi.redis.port === 'number',
|
|
||||||
'bad config: redis.port must be a number');
|
|
||||||
this.utapi.redis.host = config.utapi.redis.host;
|
|
||||||
this.utapi.redis.port = config.utapi.redis.port;
|
|
||||||
}
|
|
||||||
if (config.utapi.redis.retry !== undefined) {
|
|
||||||
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
|
||||||
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
|
||||||
assert.strictEqual(typeof min, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: min must be a number');
|
|
||||||
assert.strictEqual(typeof max, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: max must be a number');
|
|
||||||
assert.strictEqual(typeof jitter, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
|
||||||
assert.strictEqual(typeof factor, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: factor must be a number');
|
|
||||||
assert.strictEqual(typeof deadline, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
|
||||||
}
|
|
||||||
|
|
||||||
this.utapi.redis.retry = config.utapi.redis.retry;
|
|
||||||
} else {
|
|
||||||
this.utapi.redis.retry = {
|
this.utapi.redis.retry = {
|
||||||
connectBackoff: {
|
connectBackoff: {
|
||||||
min: 10,
|
min: 10,
|
||||||
|
@ -1140,22 +1189,6 @@ class Config extends EventEmitter {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (config.utapi.redis.password !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(config.utapi.redis.password),
|
|
||||||
'config: invalid password for utapi redis. password' +
|
|
||||||
' must be a string');
|
|
||||||
this.utapi.redis.password = config.utapi.redis.password;
|
|
||||||
}
|
|
||||||
if (config.utapi.redis.sentinelPassword !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(
|
|
||||||
config.utapi.redis.sentinelPassword),
|
|
||||||
'config: invalid password for utapi redis. password' +
|
|
||||||
' must be a string');
|
|
||||||
this.utapi.redis.sentinelPassword =
|
|
||||||
config.utapi.redis.sentinelPassword;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (config.utapi.metrics) {
|
if (config.utapi.metrics) {
|
||||||
this.utapi.metrics = config.utapi.metrics;
|
this.utapi.metrics = config.utapi.metrics;
|
||||||
|
@ -1225,8 +1258,7 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.utapi && config.utapi.reindex) {
|
if (config.utapi && config.utapi.reindex) {
|
||||||
parseUtapiReindex(config.utapi.reindex);
|
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
|
||||||
this.utapi.reindex = config.utapi.reindex;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1271,6 +1303,8 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.authdata = config.authdata || 'authdata.json';
|
||||||
|
|
||||||
this.kms = {};
|
this.kms = {};
|
||||||
if (config.kms) {
|
if (config.kms) {
|
||||||
assert(typeof config.kms.userName === 'string');
|
assert(typeof config.kms.userName === 'string');
|
||||||
|
@ -1490,25 +1524,6 @@ class Config extends EventEmitter {
|
||||||
this.outboundProxy.certs = certObj.certs;
|
this.outboundProxy.certs = certObj.certs;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.managementAgent = {};
|
|
||||||
this.managementAgent.port = 8010;
|
|
||||||
this.managementAgent.host = 'localhost';
|
|
||||||
if (config.managementAgent !== undefined) {
|
|
||||||
if (config.managementAgent.port !== undefined) {
|
|
||||||
assert(Number.isInteger(config.managementAgent.port)
|
|
||||||
&& config.managementAgent.port > 0,
|
|
||||||
'bad config: managementAgent port must be a positive ' +
|
|
||||||
'integer');
|
|
||||||
this.managementAgent.port = config.managementAgent.port;
|
|
||||||
}
|
|
||||||
if (config.managementAgent.host !== undefined) {
|
|
||||||
assert.strictEqual(typeof config.managementAgent.host, 'string',
|
|
||||||
'bad config: management agent host must ' +
|
|
||||||
'be a string');
|
|
||||||
this.managementAgent.host = config.managementAgent.host;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ephemeral token to protect the reporting endpoint:
|
// Ephemeral token to protect the reporting endpoint:
|
||||||
// try inherited from parent first, then hardcoded in conf file,
|
// try inherited from parent first, then hardcoded in conf file,
|
||||||
// then create a fresh one as last resort.
|
// then create a fresh one as last resort.
|
||||||
|
@ -1574,6 +1589,7 @@ class Config extends EventEmitter {
|
||||||
// Version of the configuration we're running under
|
// Version of the configuration we're running under
|
||||||
this.overlayVersion = config.overlayVersion || 0;
|
this.overlayVersion = config.overlayVersion || 0;
|
||||||
|
|
||||||
|
this._setTimeOptions();
|
||||||
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
|
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
|
||||||
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
|
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
|
||||||
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
|
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
|
||||||
|
@ -1597,43 +1613,83 @@ class Config extends EventEmitter {
|
||||||
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
||||||
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this._configureBackends(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
_setTimeOptions() {
|
||||||
|
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
|
||||||
|
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
|
||||||
|
// expedite the lifecycle of objects.
|
||||||
|
|
||||||
|
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
||||||
|
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
||||||
|
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
||||||
|
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
||||||
|
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
|
||||||
|
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
|
||||||
|
|
||||||
|
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
|
||||||
|
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
|
||||||
|
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
|
||||||
|
|
||||||
|
// The scaledMsPerDay value is initially set to the number of milliseconds per day
|
||||||
|
// (24 * 60 * 60 * 1000) as the default value.
|
||||||
|
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
|
||||||
|
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
|
||||||
|
// earlier in time.
|
||||||
|
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
|
||||||
|
|
||||||
|
this.timeOptions = {
|
||||||
|
expireOneDayEarlier,
|
||||||
|
transitionOneDayEarlier,
|
||||||
|
timeProgressionFactor,
|
||||||
|
scaledMsPerDay,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getTimeOptions() {
|
||||||
|
return this.timeOptions;
|
||||||
}
|
}
|
||||||
|
|
||||||
_getAuthData() {
|
_getAuthData() {
|
||||||
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
|
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
|
||||||
}
|
}
|
||||||
|
|
||||||
_configureBackends() {
|
_configureBackends(config) {
|
||||||
|
const backends = config.backends || {};
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
let auth = 'mem';
|
let auth = backends.auth || 'mem';
|
||||||
let data = 'multiple';
|
let data = backends.data || 'multiple';
|
||||||
let metadata = 'file';
|
let metadata = backends.metadata || 'file';
|
||||||
let kms = 'file';
|
let kms = backends.kms || 'file';
|
||||||
|
let quota = backends.quota || 'none';
|
||||||
if (process.env.S3BACKEND) {
|
if (process.env.S3BACKEND) {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi'
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND;
|
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
metadata = process.env.S3BACKEND;
|
metadata = process.env.S3BACKEND;
|
||||||
kms = process.env.S3BACKEND;
|
kms = process.env.S3BACKEND;
|
||||||
}
|
}
|
||||||
if (process.env.S3VAULT) {
|
if (process.env.S3VAULT) {
|
||||||
auth = process.env.S3VAULT;
|
auth = process.env.S3VAULT;
|
||||||
|
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
||||||
// Auth only checks for 'mem' since mem === file
|
// Auth only checks for 'mem' since mem === file
|
||||||
auth = 'mem';
|
|
||||||
let authData;
|
let authData;
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
authData = buildAuthDataAccount(
|
authData = buildAuthDataAccount(
|
||||||
process.env.SCALITY_ACCESS_KEY_ID,
|
process.env.SCALITY_ACCESS_KEY_ID,
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY);
|
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||||
} else {
|
} else {
|
||||||
authData = this._getAuthData();
|
authData = this._getAuthData();
|
||||||
}
|
}
|
||||||
|
@ -1641,7 +1697,7 @@ class Config extends EventEmitter {
|
||||||
throw new Error('bad config: invalid auth config file.');
|
throw new Error('bad config: invalid auth config file.');
|
||||||
}
|
}
|
||||||
this.authData = authData;
|
this.authData = authData;
|
||||||
} else if (auth === 'multiple') {
|
} else if (auth === 'multiple') {
|
||||||
const authData = this._getAuthData();
|
const authData = this._getAuthData();
|
||||||
if (validateAuthConfig(authData)) {
|
if (validateAuthConfig(authData)) {
|
||||||
throw new Error('bad config: invalid auth config file.');
|
throw new Error('bad config: invalid auth config file.');
|
||||||
|
@ -1656,9 +1712,9 @@ class Config extends EventEmitter {
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple'
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
}
|
if (data === 'scality' || data === 'multiple') {
|
||||||
if (data === 'scality' || data === 'multiple') {
|
data = 'multiple';
|
||||||
data = 'multiple';
|
}
|
||||||
}
|
}
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
|
@ -1671,18 +1727,18 @@ class Config extends EventEmitter {
|
||||||
if (process.env.S3KMS) {
|
if (process.env.S3KMS) {
|
||||||
kms = process.env.S3KMS;
|
kms = process.env.S3KMS;
|
||||||
}
|
}
|
||||||
|
if (process.env.S3QUOTA) {
|
||||||
|
quota = process.env.S3QUOTA;
|
||||||
|
}
|
||||||
this.backends = {
|
this.backends = {
|
||||||
auth,
|
auth,
|
||||||
data,
|
data,
|
||||||
metadata,
|
metadata,
|
||||||
kms,
|
kms,
|
||||||
|
quota,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
_verifyRedisPassword(password) {
|
|
||||||
return typeof password === 'string';
|
|
||||||
}
|
|
||||||
|
|
||||||
setAuthDataAccounts(accounts) {
|
setAuthDataAccounts(accounts) {
|
||||||
this.authData.accounts = accounts;
|
this.authData.accounts = accounts;
|
||||||
this.emit('authdata-update');
|
this.emit('authdata-update');
|
||||||
|
@ -1805,10 +1861,19 @@ class Config extends EventEmitter {
|
||||||
.update(instanceId)
|
.update(instanceId)
|
||||||
.digest('hex');
|
.digest('hex');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isQuotaEnabled() {
|
||||||
|
return !!this.quotaEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
isQuotaInflightEnabled() {
|
||||||
|
return this.quota.enableInflights;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
parseSproxydConfig,
|
parseSproxydConfig,
|
||||||
|
parseRedisConfig,
|
||||||
locationConstraintAssert,
|
locationConstraintAssert,
|
||||||
ConfigObject: Config,
|
ConfigObject: Config,
|
||||||
config: new Config(),
|
config: new Config(),
|
||||||
|
@ -1816,4 +1881,5 @@ module.exports = {
|
||||||
bucketNotifAssert,
|
bucketNotifAssert,
|
||||||
azureGetStorageAccountName,
|
azureGetStorageAccountName,
|
||||||
azureGetLocationCredentials,
|
azureGetLocationCredentials,
|
||||||
|
azureArchiveLocationConstraintAssert,
|
||||||
};
|
};
|
||||||
|
|
|
@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
|
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
||||||
const { bucketGet } = require('./bucketGet');
|
const { bucketGet } = require('./bucketGet');
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
|
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
|
const bucketGetQuota = require('./bucketGetQuota');
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
|
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
|
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -44,7 +47,7 @@ const metadataSearch = require('./metadataSearch');
|
||||||
const { multiObjectDelete } = require('./multiObjectDelete');
|
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||||
const multipartDelete = require('./multipartDelete');
|
const multipartDelete = require('./multipartDelete');
|
||||||
const objectCopy = require('./objectCopy');
|
const objectCopy = require('./objectCopy');
|
||||||
const objectDelete = require('./objectDelete');
|
const { objectDelete } = require('./objectDelete');
|
||||||
const objectDeleteTagging = require('./objectDeleteTagging');
|
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||||
const objectGet = require('./objectGet');
|
const objectGet = require('./objectGet');
|
||||||
const objectGetACL = require('./objectGetACL');
|
const objectGetACL = require('./objectGetACL');
|
||||||
|
@ -82,6 +85,10 @@ const api = {
|
||||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.apiMethod = apiMethod;
|
request.apiMethod = apiMethod;
|
||||||
|
// Array of end of API callbacks, used to perform some logic
|
||||||
|
// at the end of an API.
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
request.finalizerHooks = [];
|
||||||
|
|
||||||
const actionLog = monitoringMap[apiMethod];
|
const actionLog = monitoringMap[apiMethod];
|
||||||
if (!actionLog &&
|
if (!actionLog &&
|
||||||
|
@ -190,14 +197,17 @@ const api = {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => auth.server.doAuth(
|
next => auth.server.doAuth(
|
||||||
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
// VaultClient returns standard errors, but the route requires
|
||||||
|
// Arsenal errors
|
||||||
|
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
||||||
log.trace('authentication error', { error: err });
|
log.trace('authentication error', { error: err });
|
||||||
return next(err);
|
return next(arsenalError);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
}, 's3', requestContexts),
|
}, 's3', requestContexts),
|
||||||
(userInfo, authorizationResults, streamingV4Params, next) => {
|
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
||||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||||
if (userInfo.isRequesterAnIAMUser()) {
|
if (userInfo.isRequesterAnIAMUser()) {
|
||||||
authNames.userName = userInfo.getIAMdisplayName();
|
authNames.userName = userInfo.getIAMdisplayName();
|
||||||
|
@ -207,7 +217,7 @@ const api = {
|
||||||
}
|
}
|
||||||
log.addDefaultFields(authNames);
|
log.addDefaultFields(authNames);
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
}
|
}
|
||||||
// issue 100 Continue to the client
|
// issue 100 Continue to the client
|
||||||
writeContinue(request, response);
|
writeContinue(request, response);
|
||||||
|
@ -238,12 +248,12 @@ const api = {
|
||||||
}
|
}
|
||||||
// Convert array of post buffers into one string
|
// Convert array of post buffers into one string
|
||||||
request.post = Buffer.concat(post, postLength).toString();
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
},
|
},
|
||||||
// Tag condition keys require information from CloudServer for evaluation
|
// Tag condition keys require information from CloudServer for evaluation
|
||||||
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
|
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
||||||
authorizationResults,
|
authorizationResults,
|
||||||
request,
|
request,
|
||||||
requestContexts,
|
requestContexts,
|
||||||
|
@ -254,13 +264,14 @@ const api = {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authResultsWithTags, streamingV4Params);
|
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
], (err, userInfo, authorizationResults, streamingV4Params) => {
|
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
|
request.accountQuotas = infos?.accountQuota;
|
||||||
if (authorizationResults) {
|
if (authorizationResults) {
|
||||||
const checkedResults = checkAuthResults(authorizationResults);
|
const checkedResults = checkAuthResults(authorizationResults);
|
||||||
if (checkedResults instanceof Error) {
|
if (checkedResults instanceof Error) {
|
||||||
|
@ -277,19 +288,23 @@ const api = {
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
}
|
}
|
||||||
|
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
||||||
|
(hook, done) => hook(err, done),
|
||||||
|
() => callback(err, ...results));
|
||||||
|
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
request._response = response;
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
log, callback, authorizationResults);
|
log, methodCallback, authorizationResults);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, callback);
|
sourceObject, sourceVersionId, log, methodCallback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, callback);
|
return this[apiMethod](userInfo, request, log, methodCallback);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -316,11 +331,14 @@ const api = {
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
|
bucketDeleteQuota,
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
|
bucketUpdateQuota,
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
|
bucketGetQuota,
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
if (apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethodAfterVersionCheck === 'objectCopy'
|
if (apiMethod === 'multiObjectDelete') {
|
||||||
|
// MultiObjectDelete does not require any authorization when evaluating
|
||||||
|
// the API. Instead, we authorize each object passed.
|
||||||
|
// But in order to get any relevant information from the authorization service
|
||||||
|
// for example, the account quota, we must send a request context object
|
||||||
|
// with no `specificResource`. We expect the result to be an implicit deny.
|
||||||
|
// In the API, we then ignore these authorization results, and we can use
|
||||||
|
// any information returned, e.g., the quota.
|
||||||
|
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
||||||
|
requestContexts.push(requestContextMultiObjectDelete);
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
|
|
@ -2,11 +2,13 @@
|
||||||
* Code based on Yutaka Oishi (Fujifilm) contributions
|
* Code based on Yutaka Oishi (Fujifilm) contributions
|
||||||
* Date: 11 Sep 2020
|
* Date: 11 Sep 2020
|
||||||
*/
|
*/
|
||||||
const ObjectMDArchive = require('arsenal').models.ObjectMDArchive;
|
const { ObjectMDArchive } = require('arsenal').models;
|
||||||
const errors = require('arsenal').errors;
|
const errors = require('arsenal').errors;
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
const { locationConstraints } = config;
|
const { locationConstraints } = config;
|
||||||
|
|
||||||
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get response header "x-amz-restore"
|
* Get response header "x-amz-restore"
|
||||||
* Be called by objectHead.js
|
* Be called by objectHead.js
|
||||||
|
@ -32,7 +34,6 @@ function getAmzRestoreResHeader(objMD) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if restore can be done.
|
* Check if restore can be done.
|
||||||
*
|
*
|
||||||
|
@ -41,6 +42,23 @@ function getAmzRestoreResHeader(objMD) {
|
||||||
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
||||||
*/
|
*/
|
||||||
function _validateStartRestore(objectMD, log) {
|
function _validateStartRestore(objectMD, log) {
|
||||||
|
if (objectMD.archive?.restoreCompletedAt) {
|
||||||
|
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
||||||
|
// return InvalidObjectState error if the restored object is expired
|
||||||
|
// but restore info md of this object has not yet been cleared
|
||||||
|
log.debug('The restored object already expired.',
|
||||||
|
{
|
||||||
|
archive: objectMD.archive,
|
||||||
|
method: '_validateStartRestore',
|
||||||
|
});
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If object is already restored, no further check is needed
|
||||||
|
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
|
||||||
|
// been reset.
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
||||||
if (!isLocationCold) {
|
if (!isLocationCold) {
|
||||||
// return InvalidObjectState error if the object is not in cold storage,
|
// return InvalidObjectState error if the object is not in cold storage,
|
||||||
|
@ -52,18 +70,7 @@ function _validateStartRestore(objectMD, log) {
|
||||||
});
|
});
|
||||||
return errors.InvalidObjectState;
|
return errors.InvalidObjectState;
|
||||||
}
|
}
|
||||||
if (objectMD.archive?.restoreCompletedAt
|
if (objectMD.archive?.restoreRequestedAt) {
|
||||||
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
|
||||||
// return InvalidObjectState error if the restored object is expired
|
|
||||||
// but restore info md of this object has not yet been cleared
|
|
||||||
log.debug('The restored object already expired.',
|
|
||||||
{
|
|
||||||
archive: objectMD.archive,
|
|
||||||
method: '_validateStartRestore',
|
|
||||||
});
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
|
|
||||||
// return RestoreAlreadyInProgress error if the object is currently being restored
|
// return RestoreAlreadyInProgress error if the object is currently being restored
|
||||||
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
||||||
log.debug('The object is currently being restored.',
|
log.debug('The object is currently being restored.',
|
||||||
|
@ -120,22 +127,36 @@ function validatePutVersionId(objMD, versionId, log) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the object is already restored
|
* Check if the object is already restored, and update the expiration date accordingly:
|
||||||
|
* > After restoring an archived object, you can update the restoration period by reissuing the
|
||||||
|
* > request with a new period. Amazon S3 updates the restoration period relative to the current
|
||||||
|
* > time.
|
||||||
*
|
*
|
||||||
* @param {ObjectMD} objectMD - object metadata
|
* @param {ObjectMD} objectMD - object metadata
|
||||||
* @param {object} log - werelogs logger
|
* @param {object} log - werelogs logger
|
||||||
* @return {boolean} - true if the object is already restored
|
* @return {boolean} - true if the object is already restored
|
||||||
*/
|
*/
|
||||||
function isObjectAlreadyRestored(objectMD, log) {
|
function _updateObjectExpirationDate(objectMD, log) {
|
||||||
// check if restoreCompletedAt field exists
|
// Check if restoreCompletedAt field exists
|
||||||
// and archive.restoreWillExpireAt > current time
|
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
|
||||||
const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt
|
// checked earlier in the process, so checking again here would create weird states
|
||||||
&& new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now());
|
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
|
||||||
log.debug('The restore status of the object.',
|
log.debug('The restore status of the object.', {
|
||||||
{
|
isObjectAlreadyRestored,
|
||||||
isObjectAlreadyRestored,
|
method: 'isObjectAlreadyRestored'
|
||||||
method: 'isObjectAlreadyRestored'
|
});
|
||||||
});
|
if (isObjectAlreadyRestored) {
|
||||||
|
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
||||||
|
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
||||||
|
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
objectMD.archive.restoreWillExpireAt = expiryDate;
|
||||||
|
objectMD['x-amz-restore'] = {
|
||||||
|
'ongoing-request': false,
|
||||||
|
'expiry-date': expiryDate,
|
||||||
|
};
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
}
|
||||||
return isObjectAlreadyRestored;
|
return isObjectAlreadyRestored;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,12 +216,32 @@ function startRestore(objectMD, restoreParam, log, cb) {
|
||||||
if (updateResultError) {
|
if (updateResultError) {
|
||||||
return cb(updateResultError);
|
return cb(updateResultError);
|
||||||
}
|
}
|
||||||
return cb(null, isObjectAlreadyRestored(objectMD, log));
|
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
|
||||||
|
return cb(null, isObjectAlreadyRestored);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checks if object data is available or if it's in cold storage
|
||||||
|
* @param {ObjectMD} objMD Object metadata
|
||||||
|
* @returns {ArsenalError|null} error if object data is not available
|
||||||
|
*/
|
||||||
|
function verifyColdObjectAvailable(objMD) {
|
||||||
|
// return error when object is cold
|
||||||
|
if (objMD.archive &&
|
||||||
|
// Object is in cold backend
|
||||||
|
(!objMD.archive.restoreRequestedAt ||
|
||||||
|
// Object is being restored
|
||||||
|
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
|
||||||
|
const err = errors.InvalidObjectState
|
||||||
|
.customizeDescription('The operation is not valid for the object\'s storage class');
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
startRestore,
|
startRestore,
|
||||||
getAmzRestoreResHeader,
|
getAmzRestoreResHeader,
|
||||||
validatePutVersionId,
|
validatePutVersionId,
|
||||||
|
verifyColdObjectAvailable,
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,6 +52,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
||||||
* @param {RequestLogger} log - logger instance
|
* @param {RequestLogger} log - logger instance
|
||||||
|
* @param {string} originOp - Origin operation
|
||||||
* @param {function} callback - callback function
|
* @param {function} callback - callback function
|
||||||
* @return {undefined} and call callback with (err, result) -
|
* @return {undefined} and call callback with (err, result) -
|
||||||
* result.contentMD5 - content md5 of new object or version
|
* result.contentMD5 - content md5 of new object or version
|
||||||
|
@ -59,7 +60,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
*/
|
*/
|
||||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
overheadField, log, callback) {
|
overheadField, log, originOp, callback) {
|
||||||
const putVersionId = request.headers['x-scal-s3-version-id'];
|
const putVersionId = request.headers['x-scal-s3-version-id'];
|
||||||
const isPutVersion = putVersionId || putVersionId === '';
|
const isPutVersion = putVersionId || putVersionId === '';
|
||||||
|
|
||||||
|
@ -142,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
removeAWSChunked(request.headers['content-encoding']);
|
removeAWSChunked(request.headers['content-encoding']);
|
||||||
metadataStoreParams.expires = request.headers.expires;
|
metadataStoreParams.expires = request.headers.expires;
|
||||||
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||||
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
|
metadataStoreParams.originOp = originOp;
|
||||||
const defaultObjectLockConfiguration
|
const defaultObjectLockConfiguration
|
||||||
= bucketMD.getObjectLockConfiguration();
|
= bucketMD.getObjectLockConfiguration();
|
||||||
if (defaultObjectLockConfiguration) {
|
if (defaultObjectLockConfiguration) {
|
||||||
|
@ -157,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.headers[constants.objectLocationConstraintHeader] =
|
request.headers[constants.objectLocationConstraintHeader] =
|
||||||
objMD[constants.objectLocationConstraintHeader];
|
objMD[constants.objectLocationConstraintHeader];
|
||||||
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
|
metadataStoreParams.originOp = originOp;
|
||||||
}
|
}
|
||||||
|
|
||||||
const backendInfoObj =
|
const backendInfoObj =
|
||||||
|
|
|
@ -4,23 +4,25 @@ const {
|
||||||
LifecycleDateTime,
|
LifecycleDateTime,
|
||||||
LifecycleUtils,
|
LifecycleUtils,
|
||||||
} = require('arsenal').s3middleware.lifecycleHelpers;
|
} = require('arsenal').s3middleware.lifecycleHelpers;
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
const {
|
||||||
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
expireOneDayEarlier,
|
||||||
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
transitionOneDayEarlier,
|
||||||
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
timeProgressionFactor,
|
||||||
|
scaledMsPerDay,
|
||||||
|
} = config.getTimeOptions();
|
||||||
|
|
||||||
const lifecycleDateTime = new LifecycleDateTime({
|
const lifecycleDateTime = new LifecycleDateTime({
|
||||||
transitionOneDayEarlier,
|
transitionOneDayEarlier,
|
||||||
expireOneDayEarlier,
|
expireOneDayEarlier,
|
||||||
|
timeProgressionFactor,
|
||||||
});
|
});
|
||||||
|
|
||||||
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
|
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
|
||||||
|
|
||||||
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
|
||||||
|
|
||||||
function calculateDate(objDate, expDays, datetime) {
|
function calculateDate(objDate, expDays, datetime) {
|
||||||
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
|
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
|
||||||
}
|
}
|
||||||
|
|
||||||
function formatExpirationHeader(date, id) {
|
function formatExpirationHeader(date, id) {
|
||||||
|
|
|
@ -5,6 +5,7 @@ const { config } = require('../../../Config');
|
||||||
const vault = require('../../../auth/vault');
|
const vault = require('../../../auth/vault');
|
||||||
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
||||||
|
|
||||||
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
/**
|
/**
|
||||||
* Calculates retain until date for the locked object version
|
* Calculates retain until date for the locked object version
|
||||||
* @param {object} retention - includes days or years retention period
|
* @param {object} retention - includes days or years retention period
|
||||||
|
@ -20,8 +21,9 @@ function calculateRetainUntilDate(retention) {
|
||||||
const date = moment();
|
const date = moment();
|
||||||
// Calculate the number of days to retain the lock on the object
|
// Calculate the number of days to retain the lock on the object
|
||||||
const retainUntilDays = days || years * 365;
|
const retainUntilDays = days || years * 365;
|
||||||
|
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
|
||||||
const retainUntilDate
|
const retainUntilDate
|
||||||
= date.add(retainUntilDays, 'days');
|
= date.add(retainUntilDaysInMs, 'ms');
|
||||||
return retainUntilDate.toISOString();
|
return retainUntilDate.toISOString();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
|
||||||
const { decodeVersionId } = require('./versioning');
|
const { decodeVersionId } = require('./versioning');
|
||||||
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
||||||
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
||||||
|
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if tier is supported
|
* Check if tier is supported
|
||||||
|
@ -59,6 +59,14 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId: decodedVidResult,
|
versionId: decodedVidResult,
|
||||||
requestType: request.apiMethods || 'restoreObject',
|
requestType: request.apiMethods || 'restoreObject',
|
||||||
|
/**
|
||||||
|
* Restoring an object might not cause any impact on
|
||||||
|
* the storage, if the object is already restored: in
|
||||||
|
* this case, the duration is extended. We disable the
|
||||||
|
* quota evaluation and trigger it manually.
|
||||||
|
*/
|
||||||
|
checkQuota: false,
|
||||||
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
@ -116,6 +124,16 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
return next(err, bucketMD, objectMD);
|
return next(err, bucketMD, objectMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
function evaluateQuotas(bucketMD, objectMD, next) {
|
||||||
|
if (isObjectRestored) {
|
||||||
|
return next(null, bucketMD, objectMD);
|
||||||
|
}
|
||||||
|
const actions = Array.isArray(mdValueParams.requestType) ?
|
||||||
|
mdValueParams.requestType : [mdValueParams.requestType];
|
||||||
|
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
|
||||||
|
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
|
||||||
|
false, log, err => next(err, bucketMD, objectMD));
|
||||||
|
},
|
||||||
function updateObjectMD(bucketMD, objectMD, next) {
|
function updateObjectMD(bucketMD, objectMD, next) {
|
||||||
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
||||||
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
||||||
|
|
|
@ -4,7 +4,7 @@ const async = require('async');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
const oneDay = 24 * 60 * 60 * 1000;
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
// Use Arsenal function to generate a version ID used internally by metadata
|
// Use Arsenal function to generate a version ID used internally by metadata
|
||||||
|
@ -460,6 +460,47 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Keep metadatas when the object is restored from cold storage
|
||||||
|
* but remove the specific ones we don't want to keep
|
||||||
|
* @param {object} objMD - obj metadata
|
||||||
|
* @param {object} metadataStoreParams - custom built object containing resource details.
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function restoreMetadata(objMD, metadataStoreParams) {
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
|
||||||
|
// We need to keep user metadata and tags
|
||||||
|
Object.keys(objMD).forEach(key => {
|
||||||
|
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
|
||||||
|
metadataStoreParams.metaHeaders[key] = objMD[key];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (objMD['x-amz-website-redirect-location']) {
|
||||||
|
if (!metadataStoreParams.headers) {
|
||||||
|
metadataStoreParams.headers = {};
|
||||||
|
}
|
||||||
|
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objMD.replicationInfo) {
|
||||||
|
metadataStoreParams.replicationInfo = objMD.replicationInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objMD.legalHold) {
|
||||||
|
metadataStoreParams.legalHold = objMD.legalHold;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objMD.acl) {
|
||||||
|
metadataStoreParams.acl = objMD.acl;
|
||||||
|
}
|
||||||
|
|
||||||
|
metadataStoreParams.creationTime = objMD['creation-time'];
|
||||||
|
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
||||||
|
metadataStoreParams.taggingCopy = objMD.tags;
|
||||||
|
}
|
||||||
|
|
||||||
/** overwritingVersioning - return versioning information for S3 to handle
|
/** overwritingVersioning - return versioning information for S3 to handle
|
||||||
* storing version metadata with a specific version id.
|
* storing version metadata with a specific version id.
|
||||||
* @param {object} objMD - obj metadata
|
* @param {object} objMD - obj metadata
|
||||||
|
@ -471,10 +512,8 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
|
||||||
* version id of the null version
|
* version id of the null version
|
||||||
*/
|
*/
|
||||||
function overwritingVersioning(objMD, metadataStoreParams) {
|
function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
metadataStoreParams.creationTime = objMD['creation-time'];
|
|
||||||
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
|
||||||
metadataStoreParams.updateMicroVersionId = true;
|
metadataStoreParams.updateMicroVersionId = true;
|
||||||
|
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
|
||||||
|
|
||||||
// set correct originOp
|
// set correct originOp
|
||||||
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
||||||
|
@ -487,7 +526,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
||||||
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
||||||
restoreCompletedAt: new Date(now),
|
restoreCompletedAt: new Date(now),
|
||||||
restoreWillExpireAt: new Date(now + (days * oneDay)),
|
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* eslint-enable no-param-reassign */
|
/* eslint-enable no-param-reassign */
|
||||||
|
@ -503,6 +542,8 @@ function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restoreMetadata(objMD, metadataStoreParams);
|
||||||
|
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,314 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const monitoring = require('../../../utilities/monitoringHandler');
|
||||||
|
const {
|
||||||
|
actionNeedQuotaCheckCopy,
|
||||||
|
actionNeedQuotaCheck,
|
||||||
|
actionWithDataDeletion,
|
||||||
|
} = require('arsenal').policies;
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const QuotaService = require('../../../quotas/quotas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process the bytes to write based on the request and object metadata
|
||||||
|
* @param {string} apiMethod - api method
|
||||||
|
* @param {BucketInfo} bucket - bucket info
|
||||||
|
* @param {string} versionId - version id of the object
|
||||||
|
* @param {number} contentLength - content length of the object
|
||||||
|
* @param {object} objMD - object metadata
|
||||||
|
* @param {object} destObjMD - destination object metadata
|
||||||
|
* @return {number} processed content length
|
||||||
|
*/
|
||||||
|
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
|
||||||
|
let bytes = contentLength;
|
||||||
|
if (apiMethod === 'objectRestore') {
|
||||||
|
// object is being restored
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (!bytes && objMD?.['content-length']) {
|
||||||
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
|
if (!destObjMD || bucket.isVersioningEnabled()) {
|
||||||
|
// object is being copied
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (!bucket.isVersioningEnabled()) {
|
||||||
|
// object is being copied and replaces the target
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10) -
|
||||||
|
Number.parseInt(destObjMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
|
||||||
|
// object is being deleted
|
||||||
|
bytes = -Number.parseInt(objMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
|
||||||
|
// object is being replaced: store the diff, if the bucket is not versioned
|
||||||
|
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
return bytes || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a metric is stale based on the provided parameters.
|
||||||
|
*
|
||||||
|
* @param {Object} metric - The metric object to check.
|
||||||
|
* @param {string} resourceType - The type of the resource.
|
||||||
|
* @param {string} resourceName - The name of the resource.
|
||||||
|
* @param {string} action - The action being performed.
|
||||||
|
* @param {number} inflight - The number of inflight requests.
|
||||||
|
* @param {Object} log - The logger object.
|
||||||
|
* @returns {boolean} Returns true if the metric is stale, false otherwise.
|
||||||
|
*/
|
||||||
|
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
|
||||||
|
if (metric.date && Date.now() - new Date(metric.date).getTime() >
|
||||||
|
QuotaService.maxStaleness) {
|
||||||
|
log.warn('Stale metrics from the quota service, allowing the request', {
|
||||||
|
resourceType,
|
||||||
|
resourceName,
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
});
|
||||||
|
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Evaluates quotas for a bucket and an account and update inflight count.
|
||||||
|
*
|
||||||
|
* @param {number} bucketQuota - The quota limit for the bucket.
|
||||||
|
* @param {number} accountQuota - The quota limit for the account.
|
||||||
|
* @param {object} bucket - The bucket object.
|
||||||
|
* @param {object} account - The account object.
|
||||||
|
* @param {number} inflight - The number of inflight requests.
|
||||||
|
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
||||||
|
* @param {string} action - The action being performed.
|
||||||
|
* @param {object} log - The logger object.
|
||||||
|
* @param {function} callback - The callback function to be called when evaluation is complete.
|
||||||
|
* @returns {object} - The result of the evaluation.
|
||||||
|
*/
|
||||||
|
function _evaluateQuotas(
|
||||||
|
bucketQuota,
|
||||||
|
accountQuota,
|
||||||
|
bucket,
|
||||||
|
account,
|
||||||
|
inflight,
|
||||||
|
inflightForCheck,
|
||||||
|
action,
|
||||||
|
log,
|
||||||
|
callback,
|
||||||
|
) {
|
||||||
|
let bucketQuotaExceeded = false;
|
||||||
|
let accountQuotaExceeded = false;
|
||||||
|
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
||||||
|
return async.parallel({
|
||||||
|
bucketQuota: parallelDone => {
|
||||||
|
if (bucketQuota > 0) {
|
||||||
|
return QuotaService.getUtilizationMetrics('bucket',
|
||||||
|
`${bucket.getName()}_${creationDate}`, null, {
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
}, (err, bucketMetrics) => {
|
||||||
|
if (err || inflight < 0) {
|
||||||
|
return parallelDone(err);
|
||||||
|
}
|
||||||
|
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
||||||
|
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
||||||
|
log.debug('Bucket quota exceeded', {
|
||||||
|
bucket: bucket.getName(),
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
quota: bucketQuota,
|
||||||
|
bytesTotal: bucketMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
bucketQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
},
|
||||||
|
accountQuota: parallelDone => {
|
||||||
|
if (accountQuota > 0 && account?.account) {
|
||||||
|
return QuotaService.getUtilizationMetrics('account',
|
||||||
|
account.account, null, {
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
}, (err, accountMetrics) => {
|
||||||
|
if (err || inflight < 0) {
|
||||||
|
return parallelDone(err);
|
||||||
|
}
|
||||||
|
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
||||||
|
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
||||||
|
log.debug('Account quota exceeded', {
|
||||||
|
accountId: account.account,
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
quota: accountQuota,
|
||||||
|
bytesTotal: accountMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
accountQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
},
|
||||||
|
}, err => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('Error evaluating quotas', {
|
||||||
|
error: err.name,
|
||||||
|
description: err.message,
|
||||||
|
isInflightDeletion: inflight < 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Monitors the duration of quota evaluation for a specific API method.
|
||||||
|
*
|
||||||
|
* @param {string} apiMethod - The name of the API method being monitored.
|
||||||
|
* @param {string} type - The type of quota being evaluated.
|
||||||
|
* @param {string} code - The code associated with the quota being evaluated.
|
||||||
|
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
|
||||||
|
* @returns {undefined} - Returns nothing.
|
||||||
|
*/
|
||||||
|
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
||||||
|
monitoring.quotaEvaluationDuration.labels({
|
||||||
|
action: apiMethod,
|
||||||
|
type,
|
||||||
|
code,
|
||||||
|
}).observe(duration / 1e9);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {Request} request - request object
|
||||||
|
* @param {BucketInfo} bucket - bucket object
|
||||||
|
* @param {Account} account - account object
|
||||||
|
* @param {array} apiNames - action names: operations to authorize
|
||||||
|
* @param {string} apiMethod - the main API call
|
||||||
|
* @param {number} inflight - inflight bytes
|
||||||
|
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
||||||
|
* the incoming bytes, are under the limit.
|
||||||
|
* @param {Logger} log - logger
|
||||||
|
* @param {function} callback - callback function
|
||||||
|
* @returns {boolean} - true if the quota is valid, false otherwise
|
||||||
|
*/
|
||||||
|
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
||||||
|
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
||||||
|
return callback(null);
|
||||||
|
}
|
||||||
|
let type;
|
||||||
|
let bucketQuotaExceeded = false;
|
||||||
|
let accountQuotaExceeded = false;
|
||||||
|
let quotaEvaluationDuration;
|
||||||
|
const requestStartTime = process.hrtime.bigint();
|
||||||
|
const bucketQuota = bucket.getQuota();
|
||||||
|
const accountQuota = account?.quota || 0;
|
||||||
|
const shouldSendInflights = config.isQuotaInflightEnabled();
|
||||||
|
|
||||||
|
if (bucketQuota && accountQuota) {
|
||||||
|
type = 'bucket+account';
|
||||||
|
} else if (bucketQuota) {
|
||||||
|
type = 'bucket';
|
||||||
|
} else {
|
||||||
|
type = 'account';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (actionWithDataDeletion[apiMethod]) {
|
||||||
|
type = 'delete';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
|
||||||
|
if (bucketQuota > 0 || accountQuota > 0) {
|
||||||
|
log.warn('quota is set for a bucket, but the quota service is disabled', {
|
||||||
|
bucketName: bucket.getName(),
|
||||||
|
});
|
||||||
|
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
||||||
|
}
|
||||||
|
return callback(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isStorageReserved) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
inflight = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return async.forEach(apiNames, (apiName, done) => {
|
||||||
|
// Object copy operations first check the target object,
|
||||||
|
// meaning the source object, containing the current bytes,
|
||||||
|
// is checked second. This logic handles these APIs calls by
|
||||||
|
// ensuring the bytes are positives (i.e., not an object
|
||||||
|
// replacement).
|
||||||
|
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
inflight = Math.abs(inflight);
|
||||||
|
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
|
||||||
|
return done();
|
||||||
|
}
|
||||||
|
// When inflights are disabled, the sum of the current utilization metrics
|
||||||
|
// and the current bytes are compared with the quota. The current bytes
|
||||||
|
// are not sent to the utilization service. When inflights are enabled,
|
||||||
|
// the sum of the current utilization metrics only are compared with the
|
||||||
|
// quota. They include the current inflight bytes sent in the request.
|
||||||
|
let _inflights = shouldSendInflights ? inflight : undefined;
|
||||||
|
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
||||||
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
|
inflightForCheck, apiName, log,
|
||||||
|
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
||||||
|
if (err) {
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketQuotaExceeded = _bucketQuotaExceeded;
|
||||||
|
accountQuotaExceeded = _accountQuotaExceeded;
|
||||||
|
|
||||||
|
// Inflights are inverted: in case of cleanup, we just re-issue
|
||||||
|
// the same API call.
|
||||||
|
if (_inflights) {
|
||||||
|
_inflights = -_inflights;
|
||||||
|
}
|
||||||
|
|
||||||
|
request.finalizerHooks.push((errorFromAPI, _done) => {
|
||||||
|
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
|
||||||
|
const quotaCleanUpStartTime = process.hrtime.bigint();
|
||||||
|
// Quotas are cleaned only in case of error in the API
|
||||||
|
async.waterfall([
|
||||||
|
cb => {
|
||||||
|
if (errorFromAPI) {
|
||||||
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
|
null, apiName, log, cb);
|
||||||
|
}
|
||||||
|
return cb();
|
||||||
|
},
|
||||||
|
], () => {
|
||||||
|
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
|
||||||
|
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
|
||||||
|
return _done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
}, err => {
|
||||||
|
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
|
||||||
|
if (err) {
|
||||||
|
log.warn('Error getting metrics from the quota service, allowing the request', {
|
||||||
|
error: err.name,
|
||||||
|
description: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (!actionWithDataDeletion[apiMethod] &&
|
||||||
|
(bucketQuotaExceeded || accountQuotaExceeded)) {
|
||||||
|
return callback(errors.QuotaExceeded);
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
processBytesToWrite,
|
||||||
|
isMetricStale,
|
||||||
|
validateQuotas,
|
||||||
|
};
|
|
@ -0,0 +1,58 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
const requestType = 'bucketDeleteQuota';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Update Quota - Update bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketDeleteQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketDeleteQuota' });
|
||||||
|
|
||||||
|
const { bucketName } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || requestType,
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, bucket) => next(err, bucket)),
|
||||||
|
(bucket, next) => {
|
||||||
|
bucket.setQuota(0);
|
||||||
|
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
||||||
|
next(err, bucket));
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketDeleteQuota'
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, err.code,
|
||||||
|
'bucketDeleteQuota');
|
||||||
|
return callback(err, err.code, corsHeaders);
|
||||||
|
}
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'DELETE', bucketName, '204', 'bucketDeleteQuota');
|
||||||
|
pushMetric('bucketDeleteQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, 204, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteQuota;
|
|
@ -0,0 +1,58 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bucketGetQuota - Get the bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketGetQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketGetQuota' });
|
||||||
|
const { bucketName, headers, method } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketGetQuota',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
const xml = [];
|
||||||
|
|
||||||
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketGetQuota',
|
||||||
|
});
|
||||||
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
xml.push(
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
|
'<GetBucketQuota>',
|
||||||
|
'<Name>', bucket.getName(), '</Name>',
|
||||||
|
);
|
||||||
|
const bucketQuota = bucket.getQuota();
|
||||||
|
if (!bucketQuota) {
|
||||||
|
log.debug('bucket has no quota', {
|
||||||
|
method: 'bucketGetQuota',
|
||||||
|
});
|
||||||
|
return callback(errors.NoSuchQuota, null,
|
||||||
|
corsHeaders);
|
||||||
|
}
|
||||||
|
xml.push('<Quota>', bucketQuota, '</Quota>',
|
||||||
|
'</GetBucketQuota>');
|
||||||
|
|
||||||
|
pushMetric('getBucketQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, xml.join(''), corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetQuota;
|
|
@ -45,9 +45,8 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
} else if (parsedHost && restEndpoints[parsedHost]) {
|
} else if (parsedHost && restEndpoints[parsedHost]) {
|
||||||
locationConstraintChecked = restEndpoints[parsedHost];
|
locationConstraintChecked = restEndpoints[parsedHost];
|
||||||
} else {
|
} else {
|
||||||
log.trace('no location constraint provided on bucket put;' +
|
locationConstraintChecked = Object.keys(locationConstrains)[0];
|
||||||
'setting us-east-1');
|
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
|
||||||
locationConstraintChecked = 'us-east-1';
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!locationConstraints[locationConstraintChecked]) {
|
if (!locationConstraints[locationConstraintChecked]) {
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
const { parseString } = require('xml2js');
|
||||||
|
|
||||||
|
function validateBucketQuotaProperty(requestBody, next) {
|
||||||
|
const quota = requestBody.quota;
|
||||||
|
const quotaValue = parseInt(quota, 10);
|
||||||
|
if (Number.isNaN(quotaValue)) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
|
||||||
|
}
|
||||||
|
if (quotaValue <= 0) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
|
||||||
|
}
|
||||||
|
return next(null, quotaValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseRequestBody(requestBody, next) {
|
||||||
|
try {
|
||||||
|
const jsonData = JSON.parse(requestBody);
|
||||||
|
if (typeof jsonData !== 'object') {
|
||||||
|
throw new Error('Invalid JSON');
|
||||||
|
}
|
||||||
|
return next(null, jsonData);
|
||||||
|
} catch (jsonError) {
|
||||||
|
return parseString(requestBody, (xmlError, xmlData) => {
|
||||||
|
if (xmlError) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
|
||||||
|
}
|
||||||
|
return next(null, xmlData);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function bucketUpdateQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketUpdateQuota' });
|
||||||
|
|
||||||
|
const { bucketName } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketUpdateQuota',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
let bucket = null;
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, b) => {
|
||||||
|
bucket = b;
|
||||||
|
return next(err, bucket);
|
||||||
|
}),
|
||||||
|
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
|
||||||
|
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
|
||||||
|
next(err, bucket, quotaValue)),
|
||||||
|
(bucket, quotaValue, next) => {
|
||||||
|
bucket.setQuota(quotaValue);
|
||||||
|
return metadata.updateBucket(bucket.getName(), bucket, log, next);
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketUpdateQuota'
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('PUT', bucketName, err.code,
|
||||||
|
'updateBucketQuota');
|
||||||
|
return callback(err, err.code, corsHeaders);
|
||||||
|
}
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'PUT', bucketName, '200', 'updateBucketQuota');
|
||||||
|
pushMetric('updateBucketQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketUpdateQuota;
|
|
@ -6,6 +6,7 @@ const convertToXml = s3middleware.convertToXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { hasNonPrintables } = require('../utilities/stringChecks');
|
const { hasNonPrintables } = require('../utilities/stringChecks');
|
||||||
|
const { config } = require('../Config');
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
|
@ -65,7 +66,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', bucketName,
|
monitoring.promMetrics('PUT', bucketName,
|
||||||
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
||||||
|
|
|
@ -23,13 +23,15 @@ const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissi
|
||||||
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
||||||
= require('./apiUtils/object/objectLockHelpers');
|
= require('./apiUtils/object/objectLockHelpers');
|
||||||
const requestUtils = policies.requestUtils;
|
const requestUtils = policies.requestUtils;
|
||||||
const { data } = require('../data/wrapper');
|
const { validObjectKeys } = require('../routes/routeVeeam');
|
||||||
const logger = require('../utilities/logger');
|
const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
|
||||||
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
|
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
|
||||||
const { overheadField } = require('../../constants');
|
const { overheadField } = require('../../constants');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
const { data } = require('../data/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -331,6 +333,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
|
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
},
|
},
|
||||||
|
(objMD, versionId, callback) => validateQuotas(
|
||||||
|
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
|
||||||
|
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
|
||||||
(objMD, versionId, callback) => {
|
(objMD, versionId, callback) => {
|
||||||
const options = preprocessingVersioningDelete(
|
const options = preprocessingVersioningDelete(
|
||||||
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
||||||
|
@ -346,7 +351,8 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
options.replayId = objMD.uploadId;
|
options.replayId = objMD.uploadId;
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objMD,
|
return services.deleteObject(bucketName, objMD,
|
||||||
entry.key, options, config.multiObjectDeleteEnableOptimizations, log, (err, toDelete) => {
|
entry.key, options, config.multiObjectDeleteEnableOptimizations, log,
|
||||||
|
's3:ObjectRemoved:Delete', (err, toDelete) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
|
@ -360,8 +366,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
// This call will create a delete-marker
|
// This call will create a delete-marker
|
||||||
return createAndStoreObject(bucketName, bucket, entry.key,
|
return createAndStoreObject(bucketName, bucket, entry.key,
|
||||||
objMD, authInfo, canonicalID, null, request,
|
objMD, authInfo, canonicalID, null, request,
|
||||||
deleteInfo.newDeleteMarker, null, overheadField, log, (err, result) =>
|
deleteInfo.newDeleteMarker, null, overheadField, log,
|
||||||
callback(err, objMD, deleteInfo, result.versionId));
|
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
|
||||||
|
callback(err, objMD, deleteInfo, result.versionId));
|
||||||
},
|
},
|
||||||
], (err, objMD, deleteInfo, versionId) => {
|
], (err, objMD, deleteInfo, versionId) => {
|
||||||
if (err === skipError) {
|
if (err === skipError) {
|
||||||
|
@ -475,6 +482,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return callback(errors.BadDigest);
|
return callback(errors.BadDigest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const inPlayInternal = [];
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -500,8 +508,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
if (bucketShield(bucketMD, 'objectDelete')) {
|
if (bucketShield(bucketMD, 'objectDelete')) {
|
||||||
return next(errors.NoSuchBucket);
|
return next(errors.NoSuchBucket);
|
||||||
}
|
}
|
||||||
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
|
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
|
||||||
request.actionImplicitDenies)) {
|
// affects the objects.
|
||||||
|
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
|
||||||
log.trace("access denied due to bucket acl's");
|
log.trace("access denied due to bucket acl's");
|
||||||
// if access denied at the bucket level, no access for
|
// if access denied at the bucket level, no access for
|
||||||
// any of the objects so all results will be error results
|
// any of the objects so all results will be error results
|
||||||
|
@ -631,7 +640,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
request);
|
request);
|
||||||
|
|
||||||
if (areAllActionsAllowed) {
|
if (areAllActionsAllowed) {
|
||||||
inPlay.push(entry);
|
if (validObjectKeys.includes(entry.key)) {
|
||||||
|
inPlayInternal.push(entry.key);
|
||||||
|
} else {
|
||||||
|
inPlay.push(entry);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
entry,
|
entry,
|
||||||
|
@ -642,6 +655,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return next(null, quietSetting, errorResults, inPlay, bucketMD);
|
return next(null, quietSetting, errorResults, inPlay, bucketMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
|
||||||
|
return async.each(inPlayInternal,
|
||||||
|
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),
|
||||||
|
err => next(err, quietSetting, errorResults, inPlay, bucketMD));
|
||||||
|
},
|
||||||
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
||||||
bucket, next) {
|
bucket, next) {
|
||||||
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
|
|
|
@ -23,6 +23,7 @@ const monitoring = require('../utilities/monitoringHandler');
|
||||||
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
|
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
|
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const locationHeader = constants.objectLocationConstraintHeader;
|
const locationHeader = constants.objectLocationConstraintHeader;
|
||||||
|
@ -219,6 +220,14 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
versionId: sourceVersionId,
|
versionId: sourceVersionId,
|
||||||
getDeleteMarker: true,
|
getDeleteMarker: true,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
|
/**
|
||||||
|
* Authorization will first check the target object, with an objectPut
|
||||||
|
* action. But in this context, the source object metadata is still
|
||||||
|
* unknown. In the context of quotas, to know the number of bytes that
|
||||||
|
* are being written, we explicitly enable the quota evaluation logic
|
||||||
|
* during the objectGet action instead.
|
||||||
|
*/
|
||||||
|
checkQuota: true,
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const valPutParams = {
|
const valPutParams = {
|
||||||
|
@ -226,6 +235,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
|
checkQuota: false,
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const dataStoreContext = {
|
const dataStoreContext = {
|
||||||
|
@ -239,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
const responseHeaders = {};
|
const responseHeaders = {};
|
||||||
|
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', destBucketName,
|
monitoring.promMetrics('PUT', destBucketName,
|
||||||
errors.InvalidStorageClass.code, 'copyObject');
|
errors.InvalidStorageClass.code, 'copyObject');
|
||||||
|
@ -277,7 +287,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
||||||
return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log,
|
return standardMetadataValidateBucketAndObj({
|
||||||
|
...valGetParams,
|
||||||
|
destObjMD,
|
||||||
|
}, request.actionImplicitDenies, log,
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
|
@ -290,6 +303,11 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
log.debug('no source object', { sourceObject });
|
log.debug('no source object', { sourceObject });
|
||||||
return next(err, null, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
|
// check if object data is in a cold storage
|
||||||
|
const coldErr = verifyColdObjectAvailable(sourceObjMD);
|
||||||
|
if (coldErr) {
|
||||||
|
return next(coldErr, null);
|
||||||
|
}
|
||||||
if (sourceObjMD.isDeleteMarker) {
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
log.debug('delete marker on source object',
|
log.debug('delete marker on source object',
|
||||||
{ sourceObject });
|
{ sourceObject });
|
||||||
|
|
|
@ -21,16 +21,17 @@ const objectLockedError = new Error('object locked');
|
||||||
const { overheadField } = require('../../constants');
|
const { overheadField } = require('../../constants');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* objectDelete - DELETE an object from a bucket
|
* objectDeleteInternal - DELETE an object from a bucket
|
||||||
* @param {AuthInfo} authInfo - requester's infos
|
* @param {AuthInfo} authInfo - requester's infos
|
||||||
* @param {object} request - request object given by router,
|
* @param {object} request - request object given by router,
|
||||||
* includes normalized headers
|
* includes normalized headers
|
||||||
* @param {Logger} log - werelogs request instance
|
* @param {Logger} log - werelogs request instance
|
||||||
|
* @param {boolean} isExpiration - true if the call comes from LifecycleExpiration
|
||||||
* @param {function} cb - final cb to call with the result and response headers
|
* @param {function} cb - final cb to call with the result and response headers
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function objectDelete(authInfo, request, log, cb) {
|
function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
log.debug('processing request', { method: 'objectDelete' });
|
log.debug('processing request', { method: 'objectDeleteInternal' });
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
log.debug('operation not available for public user');
|
log.debug('operation not available for public user');
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
|
@ -166,7 +167,10 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
// source does not have versioning.
|
// source does not have versioning.
|
||||||
return createAndStoreObject(bucketName, bucketMD, objectKey,
|
return createAndStoreObject(bucketName, bucketMD, objectKey,
|
||||||
objectMD, authInfo, canonicalID, null, request, true, null,
|
objectMD, authInfo, canonicalID, null, request, true, null,
|
||||||
log, err => {
|
log, isExpiration ?
|
||||||
|
's3:LifecycleExpiration:DeleteMarkerCreated' :
|
||||||
|
's3:ObjectRemoved:DeleteMarkerCreated',
|
||||||
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
@ -176,9 +180,11 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
deleteInfo.removeDeleteMarker = true;
|
deleteInfo.removeDeleteMarker = true;
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objectMD,
|
return services.deleteObject(bucketName, objectMD,
|
||||||
objectKey, delOptions, log, (err, delResult) =>
|
objectKey, delOptions, false, log, isExpiration ?
|
||||||
next(err, bucketMD, objectMD, delResult,
|
's3:LifecycleExpiration:Delete' :
|
||||||
deleteInfo));
|
's3:ObjectRemoved:Delete',
|
||||||
|
(err, delResult) =>
|
||||||
|
next(err, bucketMD, objectMD, delResult, deleteInfo));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (delOptions && delOptions.deleteData) {
|
if (delOptions && delOptions.deleteData) {
|
||||||
|
@ -199,14 +205,20 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return services.deleteObject(bucketName, objectMD, objectKey,
|
return services.deleteObject(bucketName, objectMD, objectKey,
|
||||||
delOptions, false, log, (err, delResult) => next(err, bucketMD,
|
delOptions, false, log, isExpiration ?
|
||||||
objectMD, delResult, deleteInfo));
|
's3:LifecycleExpiration:Delete' :
|
||||||
|
's3:ObjectRemoved:Delete',
|
||||||
|
(err, delResult) => next(err, bucketMD,
|
||||||
|
objectMD, delResult, deleteInfo));
|
||||||
}
|
}
|
||||||
// putting a new delete marker
|
// putting a new delete marker
|
||||||
deleteInfo.newDeleteMarker = true;
|
deleteInfo.newDeleteMarker = true;
|
||||||
return createAndStoreObject(bucketName, bucketMD,
|
return createAndStoreObject(bucketName, bucketMD,
|
||||||
objectKey, objectMD, authInfo, canonicalID, null, request,
|
objectKey, objectMD, authInfo, canonicalID, null, request,
|
||||||
deleteInfo.newDeleteMarker, null, overheadField, log, (err, newDelMarkerRes) => {
|
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ?
|
||||||
|
's3:LifecycleExpiration:DeleteMarkerCreated' :
|
||||||
|
's3:ObjectRemoved:DeleteMarkerCreated',
|
||||||
|
(err, newDelMarkerRes) => {
|
||||||
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
|
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
@ -295,4 +307,21 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = objectDelete;
|
/**
|
||||||
|
* This function is used to delete an object from a bucket. The bucket must
|
||||||
|
* already exist and the user must have permission to delete the object.
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {werelogs.Logger} log - Logger object
|
||||||
|
* @param {function} cb - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function objectDelete(authInfo, request, log, cb) {
|
||||||
|
log.debug('processing request', { method: 'objectDelete' });
|
||||||
|
return objectDeleteInternal(authInfo, request, log, false, cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
objectDelete,
|
||||||
|
objectDeleteInternal,
|
||||||
|
};
|
||||||
|
|
|
@ -91,7 +91,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
(bucket, objectMD, next) =>
|
(bucket, objectMD, next) =>
|
||||||
// if external backends handles tagging
|
// if external backends handles tagging
|
||||||
data.objectTagging('Delete', objectKey, bucket, objectMD,
|
data.objectTagging('Delete', objectKey, bucket.getName(), objectMD,
|
||||||
log, err => next(err, bucket, objectMD)),
|
log, err => next(err, bucket, objectMD)),
|
||||||
], (err, bucket, objectMD) => {
|
], (err, bucket, objectMD) => {
|
||||||
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
|
|
@ -21,6 +21,7 @@ const { locationConstraints } = config;
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
|
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
|
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
||||||
|
|
||||||
const validateHeaders = s3middleware.validateConditionalHeaders;
|
const validateHeaders = s3middleware.validateConditionalHeaders;
|
||||||
|
|
||||||
|
@ -89,16 +90,12 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
if (objMD.archive &&
|
// check if object data is in a cold storage
|
||||||
// Object is in cold backend
|
const coldErr = verifyColdObjectAvailable(objMD);
|
||||||
(!objMD.archive.restoreRequestedAt ||
|
if (coldErr) {
|
||||||
// Object is being restored
|
|
||||||
(objMD.archive.restoreRequestedAt &&
|
|
||||||
!objMD.archive.restoreCompletedAt))) {
|
|
||||||
const error = errors.InvalidObjectState;
|
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, error.code, 'getObject');
|
'GET', bucketName, coldErr.code, 'getObject');
|
||||||
return callback(error, null, corsHeaders);
|
return callback(coldErr, null, corsHeaders);
|
||||||
}
|
}
|
||||||
if (objMD.isDeleteMarker) {
|
if (objMD.isDeleteMarker) {
|
||||||
const responseMetaHeaders = Object.assign({},
|
const responseMetaHeaders = Object.assign({},
|
||||||
|
|
|
@ -3,6 +3,7 @@ const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const aclUtils = require('../utilities/aclUtils');
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
|
const { config } = require('../Config');
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
@ -71,7 +72,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
query,
|
query,
|
||||||
} = request;
|
} = request;
|
||||||
if (headers['x-amz-storage-class'] &&
|
if (headers['x-amz-storage-class'] &&
|
||||||
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
|
!config.locationConstraints[headers['x-amz-storage-class']]) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', request.bucketName,
|
monitoring.promMetrics('PUT', request.bucketName,
|
||||||
errors.InvalidStorageClass.code, 'putObject');
|
errors.InvalidStorageClass.code, 'putObject');
|
||||||
|
@ -98,7 +99,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
'The encryption method specified is not supported');
|
'The encryption method specified is not supported');
|
||||||
const requestType = request.apiMethods || 'objectPut';
|
const requestType = request.apiMethods || 'objectPut';
|
||||||
const valParams = { authInfo, bucketName, objectKey, versionId,
|
const valParams = { authInfo, bucketName, objectKey, versionId,
|
||||||
requestType, request };
|
requestType, request, withVersionId: isPutVersion };
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
if (hasNonPrintables(objectKey)) {
|
if (hasNonPrintables(objectKey)) {
|
||||||
|
@ -174,7 +175,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
writeContinue(request, request._response);
|
writeContinue(request, request._response);
|
||||||
return createAndStoreObject(bucketName,
|
return createAndStoreObject(bucketName,
|
||||||
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
||||||
request, false, streamingV4Params, overheadField, log, next);
|
request, false, streamingV4Params, overheadField, log, 's3:ObjectCreated:Put', next);
|
||||||
},
|
},
|
||||||
], (err, storingResult) => {
|
], (err, storingResult) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -242,6 +243,14 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
monitoring.promMetrics('PUT', bucketName, '200',
|
monitoring.promMetrics('PUT', bucketName, '200',
|
||||||
'putObject', newByteLength, oldByteLength, isVersionedObj,
|
'putObject', newByteLength, oldByteLength, isVersionedObj,
|
||||||
null, ingestSize);
|
null, ingestSize);
|
||||||
|
|
||||||
|
if (isPutVersion) {
|
||||||
|
const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt);
|
||||||
|
monitoring.lifecycleDuration.observe(
|
||||||
|
{ type: 'restore', location: objMD.dataStoreName },
|
||||||
|
durationMs / 1000);
|
||||||
|
}
|
||||||
|
|
||||||
return callback(null, responseHeaders);
|
return callback(null, responseHeaders);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -13,6 +13,8 @@ const services = require('../services');
|
||||||
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
||||||
|
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
@ -44,6 +46,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
versionId: reqVersionId,
|
versionId: reqVersionId,
|
||||||
getDeleteMarker: true,
|
getDeleteMarker: true,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
|
/**
|
||||||
|
* Authorization will first check the target object, with an objectPut
|
||||||
|
* action. But in this context, the source object metadata is still
|
||||||
|
* unknown. In the context of quotas, to know the number of bytes that
|
||||||
|
* are being written, we explicitly enable the quota evaluation logic
|
||||||
|
* during the objectGet action instead.
|
||||||
|
*/
|
||||||
|
checkQuota: true,
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -67,6 +77,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPutPart',
|
requestType: 'objectPutPart',
|
||||||
|
checkQuota: false,
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -87,6 +98,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
partNumber: paddedPartNumber,
|
partNumber: paddedPartNumber,
|
||||||
uploadId,
|
uploadId,
|
||||||
|
enableQuota: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
@ -133,6 +145,11 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
sourceLocationConstraintName =
|
sourceLocationConstraintName =
|
||||||
sourceObjMD.location[0].dataStoreName;
|
sourceObjMD.location[0].dataStoreName;
|
||||||
}
|
}
|
||||||
|
// check if object data is in a cold storage
|
||||||
|
const coldErr = verifyColdObjectAvailable(sourceObjMD);
|
||||||
|
if (coldErr) {
|
||||||
|
return next(coldErr, null);
|
||||||
|
}
|
||||||
if (sourceObjMD.isDeleteMarker) {
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
log.debug('delete marker on source object',
|
log.debug('delete marker on source object',
|
||||||
{ sourceObject });
|
{ sourceObject });
|
||||||
|
@ -175,9 +192,16 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
return next(null, copyLocator.dataLocator, destBucketMD,
|
return next(null, copyLocator.dataLocator, destBucketMD,
|
||||||
copyLocator.copyObjectSize, sourceVerId,
|
copyLocator.copyObjectSize, sourceVerId,
|
||||||
sourceLocationConstraintName);
|
sourceLocationConstraintName, sourceObjMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
function _validateQuotas(dataLocator, destBucketMD,
|
||||||
|
copyObjectSize, sourceVerId,
|
||||||
|
sourceLocationConstraintName, sourceObjMD, next) {
|
||||||
|
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
|
||||||
|
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
|
||||||
|
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
|
||||||
|
},
|
||||||
// get MPU shadow bucket to get splitter based on MD version
|
// get MPU shadow bucket to get splitter based on MD version
|
||||||
function getMpuShadowBucket(dataLocator, destBucketMD,
|
function getMpuShadowBucket(dataLocator, destBucketMD,
|
||||||
copyObjectSize, sourceVerId,
|
copyObjectSize, sourceVerId,
|
||||||
|
|
|
@ -21,6 +21,7 @@ const { BackendInfo } = models;
|
||||||
const writeContinue = require('../utilities/writeContinue');
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
|
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
|
||||||
|
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
||||||
|
|
||||||
const skipError = new Error('skip');
|
const skipError = new Error('skip');
|
||||||
|
|
||||||
|
@ -60,6 +61,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
log.debug('processing request', { method: 'objectPutPart' });
|
log.debug('processing request', { method: 'objectPutPart' });
|
||||||
const size = request.parsedContentLength;
|
const size = request.parsedContentLength;
|
||||||
|
|
||||||
|
const putVersionId = request.headers['x-scal-s3-version-id'];
|
||||||
|
const isPutVersion = putVersionId || putVersionId === '';
|
||||||
|
|
||||||
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
|
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
|
||||||
log.debug('put part size too large', { size });
|
log.debug('put part size too large', { size });
|
||||||
monitoring.promMetrics('PUT', request.bucketName, 400,
|
monitoring.promMetrics('PUT', request.bucketName, 400,
|
||||||
|
@ -103,6 +107,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
const { objectKey } = request;
|
const { objectKey } = request;
|
||||||
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
||||||
|
// For validating the request at the destinationBucket level the
|
||||||
|
// `requestType` is the general 'objectPut'.
|
||||||
|
const requestType = request.apiMethods || 'objectPutPart';
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
|
@ -122,9 +129,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}),
|
}),
|
||||||
// Check the bucket authorization.
|
// Check the bucket authorization.
|
||||||
(destinationBucket, next) => {
|
(destinationBucket, next) => {
|
||||||
// For validating the request at the destinationBucket level the
|
|
||||||
// `requestType` is the general 'objectPut'.
|
|
||||||
const requestType = request.apiMethods || 'objectPutPart';
|
|
||||||
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
||||||
log, request, request.actionImplicitDenies)) {
|
log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', { requestType });
|
log.debug('access denied for user on bucket', { requestType });
|
||||||
|
@ -132,6 +136,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket);
|
return next(null, destinationBucket);
|
||||||
},
|
},
|
||||||
|
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
|
||||||
|
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
|
||||||
// Get bucket server-side encryption, if it exists.
|
// Get bucket server-side encryption, if it exists.
|
||||||
(destinationBucket, next) => getObjectSSEConfiguration(
|
(destinationBucket, next) => getObjectSSEConfiguration(
|
||||||
request.headers, destinationBucket, log,
|
request.headers, destinationBucket, log,
|
||||||
|
|
|
@ -96,7 +96,7 @@ function objectPutTagging(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
(bucket, objectMD, next) =>
|
(bucket, objectMD, next) =>
|
||||||
// if external backend handles tagging
|
// if external backend handles tagging
|
||||||
data.objectTagging('Put', objectKey, bucket, objectMD,
|
data.objectTagging('Put', objectKey, bucket.getName(), objectMD,
|
||||||
log, err => next(err, bucket, objectMD)),
|
log, err => next(err, bucket, objectMD)),
|
||||||
], (err, bucket, objectMD) => {
|
], (err, bucket, objectMD) => {
|
||||||
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
const vaultclient = require('vaultclient');
|
|
||||||
const { auth } = require('arsenal');
|
const { auth } = require('arsenal');
|
||||||
|
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
@ -21,6 +20,7 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: true,
|
https: true,
|
||||||
});
|
});
|
||||||
|
const vaultclient = require('vaultclient');
|
||||||
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
||||||
} else {
|
} else {
|
||||||
logger.info('vaultclient configuration', {
|
logger.info('vaultclient configuration', {
|
||||||
|
@ -28,6 +28,7 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: false,
|
https: false,
|
||||||
});
|
});
|
||||||
|
const vaultclient = require('vaultclient');
|
||||||
vaultClient = new vaultclient.Client(host, port);
|
vaultClient = new vaultclient.Client(host, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,10 +50,6 @@ function getMemBackend(config) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (config.backends.auth) {
|
switch (config.backends.auth) {
|
||||||
case 'mem':
|
|
||||||
implName = 'vaultMem';
|
|
||||||
client = getMemBackend(config);
|
|
||||||
break;
|
|
||||||
case 'multiple':
|
case 'multiple':
|
||||||
implName = 'vaultChain';
|
implName = 'vaultChain';
|
||||||
client = new ChainBackend('s3', [
|
client = new ChainBackend('s3', [
|
||||||
|
@ -60,9 +57,14 @@ case 'multiple':
|
||||||
getVaultClient(config),
|
getVaultClient(config),
|
||||||
]);
|
]);
|
||||||
break;
|
break;
|
||||||
default: // vault
|
case 'vault':
|
||||||
implName = 'vault';
|
implName = 'vault';
|
||||||
client = getVaultClient(config);
|
client = getVaultClient(config);
|
||||||
|
break;
|
||||||
|
default: // mem
|
||||||
|
implName = 'vaultMem';
|
||||||
|
client = getMemBackend(config);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = new Vault(client, implName);
|
module.exports = new Vault(client, implName);
|
||||||
|
|
|
@ -8,20 +8,6 @@ const inMemory = require('./in_memory/backend').backend;
|
||||||
const file = require('./file/backend');
|
const file = require('./file/backend');
|
||||||
const KMIPClient = require('arsenal').network.kmipClient;
|
const KMIPClient = require('arsenal').network.kmipClient;
|
||||||
const Common = require('./common');
|
const Common = require('./common');
|
||||||
let scalityKMS;
|
|
||||||
let scalityKMSImpl;
|
|
||||||
try {
|
|
||||||
// eslint-disable-next-line import/no-unresolved
|
|
||||||
const ScalityKMS = require('scality-kms');
|
|
||||||
scalityKMS = new ScalityKMS(config.kms);
|
|
||||||
scalityKMSImpl = 'scalityKms';
|
|
||||||
} catch (error) {
|
|
||||||
logger.warn('scality kms unavailable. ' +
|
|
||||||
'Using file kms backend unless mem specified.',
|
|
||||||
{ error });
|
|
||||||
scalityKMS = file;
|
|
||||||
scalityKMSImpl = 'fileKms';
|
|
||||||
}
|
|
||||||
|
|
||||||
let client;
|
let client;
|
||||||
let implName;
|
let implName;
|
||||||
|
@ -33,8 +19,9 @@ if (config.backends.kms === 'mem') {
|
||||||
client = file;
|
client = file;
|
||||||
implName = 'fileKms';
|
implName = 'fileKms';
|
||||||
} else if (config.backends.kms === 'scality') {
|
} else if (config.backends.kms === 'scality') {
|
||||||
client = scalityKMS;
|
const ScalityKMS = require('scality-kms');
|
||||||
implName = scalityKMSImpl;
|
client = new ScalityKMS(config.kms);
|
||||||
|
implName = 'scalityKms';
|
||||||
} else if (config.backends.kms === 'kmip') {
|
} else if (config.backends.kms === 'kmip') {
|
||||||
const kmipConfig = { kmip: config.kmip };
|
const kmipConfig = { kmip: config.kmip };
|
||||||
if (!kmipConfig.kmip) {
|
if (!kmipConfig.kmip) {
|
||||||
|
|
|
@ -1,131 +0,0 @@
|
||||||
/**
|
|
||||||
* Target service that should handle a message
|
|
||||||
* @readonly
|
|
||||||
* @enum {number}
|
|
||||||
*/
|
|
||||||
const MessageType = {
|
|
||||||
/** Message that contains a configuration overlay */
|
|
||||||
CONFIG_OVERLAY_MESSAGE: 1,
|
|
||||||
/** Message that requests a metrics report */
|
|
||||||
METRICS_REQUEST_MESSAGE: 2,
|
|
||||||
/** Message that contains a metrics report */
|
|
||||||
METRICS_REPORT_MESSAGE: 3,
|
|
||||||
/** Close the virtual TCP socket associated to the channel */
|
|
||||||
CHANNEL_CLOSE_MESSAGE: 4,
|
|
||||||
/** Write data to the virtual TCP socket associated to the channel */
|
|
||||||
CHANNEL_PAYLOAD_MESSAGE: 5,
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Target service that should handle a message
|
|
||||||
* @readonly
|
|
||||||
* @enum {number}
|
|
||||||
*/
|
|
||||||
const TargetType = {
|
|
||||||
/** Let the dispatcher choose the most appropriate message */
|
|
||||||
TARGET_ANY: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
const headerSize = 3;
|
|
||||||
|
|
||||||
class ChannelMessageV0 {
|
|
||||||
/**
|
|
||||||
* @param {Buffer} buffer Message bytes
|
|
||||||
*/
|
|
||||||
constructor(buffer) {
|
|
||||||
this.messageType = buffer.readUInt8(0);
|
|
||||||
this.channelNumber = buffer.readUInt8(1);
|
|
||||||
this.target = buffer.readUInt8(2);
|
|
||||||
this.payload = buffer.slice(headerSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @returns {number} Message type
|
|
||||||
*/
|
|
||||||
getType() {
|
|
||||||
return this.messageType;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @returns {number} Channel number if applicable
|
|
||||||
*/
|
|
||||||
getChannelNumber() {
|
|
||||||
return this.channelNumber;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @returns {number} Target service, or 0 to choose automatically
|
|
||||||
*/
|
|
||||||
getTarget() {
|
|
||||||
return this.target;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @returns {Buffer} Message payload if applicable
|
|
||||||
*/
|
|
||||||
getPayload() {
|
|
||||||
return this.payload;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a wire representation of a channel close message
|
|
||||||
*
|
|
||||||
* @param {number} channelId Channel number
|
|
||||||
*
|
|
||||||
* @returns {Buffer} wire representation
|
|
||||||
*/
|
|
||||||
static encodeChannelCloseMessage(channelId) {
|
|
||||||
const buf = Buffer.alloc(headerSize);
|
|
||||||
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
|
|
||||||
buf.writeUInt8(channelId, 1);
|
|
||||||
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a wire representation of a channel data message
|
|
||||||
*
|
|
||||||
* @param {number} channelId Channel number
|
|
||||||
* @param {Buffer} data Payload
|
|
||||||
*
|
|
||||||
* @returns {Buffer} wire representation
|
|
||||||
*/
|
|
||||||
static encodeChannelDataMessage(channelId, data) {
|
|
||||||
const buf = Buffer.alloc(data.length + headerSize);
|
|
||||||
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
|
|
||||||
buf.writeUInt8(channelId, 1);
|
|
||||||
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
|
||||||
data.copy(buf, headerSize);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a wire representation of a metrics message
|
|
||||||
*
|
|
||||||
* @param {object} body Metrics report
|
|
||||||
*
|
|
||||||
* @returns {Buffer} wire representation
|
|
||||||
*/
|
|
||||||
static encodeMetricsReportMessage(body) {
|
|
||||||
const report = JSON.stringify(body);
|
|
||||||
const buf = Buffer.alloc(report.length + headerSize);
|
|
||||||
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
|
|
||||||
buf.writeUInt8(0, 1);
|
|
||||||
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
|
||||||
buf.write(report, headerSize);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Protocol name used for subprotocol negociation
|
|
||||||
*/
|
|
||||||
static get protocolName() {
|
|
||||||
return 'zenko-secure-channel-v0';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
ChannelMessageV0,
|
|
||||||
MessageType,
|
|
||||||
TargetType,
|
|
||||||
};
|
|
|
@ -1,94 +0,0 @@
|
||||||
const WebSocket = require('ws');
|
|
||||||
const arsenal = require('arsenal');
|
|
||||||
|
|
||||||
const logger = require('../utilities/logger');
|
|
||||||
const _config = require('../Config').config;
|
|
||||||
const { patchConfiguration } = require('./configuration');
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
|
|
||||||
|
|
||||||
const managementAgentMessageType = {
|
|
||||||
/** Message that contains the loaded overlay */
|
|
||||||
NEW_OVERLAY: 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
|
|
||||||
|
|
||||||
|
|
||||||
function initManagementClient() {
|
|
||||||
const { host, port } = _config.managementAgent;
|
|
||||||
|
|
||||||
const ws = new WebSocket(`ws://${host}:${port}/watch`);
|
|
||||||
|
|
||||||
ws.on('open', () => {
|
|
||||||
logger.info('connected with management agent');
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('close', (code, reason) => {
|
|
||||||
logger.info('disconnected from management agent', { reason });
|
|
||||||
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('error', error => {
|
|
||||||
logger.error('error on connection with management agent', { error });
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('message', data => {
|
|
||||||
const method = 'initManagementclient::onMessage';
|
|
||||||
const log = logger.newRequestLogger();
|
|
||||||
let msg;
|
|
||||||
|
|
||||||
if (!data) {
|
|
||||||
log.error('message without data', { method });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
msg = JSON.parse(data);
|
|
||||||
} catch (err) {
|
|
||||||
log.error('data is an invalid json', { method, err, data });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msg.payload === undefined) {
|
|
||||||
log.error('message without payload', { method });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (typeof msg.messageType !== 'number') {
|
|
||||||
log.error('messageType is not an integer', {
|
|
||||||
type: typeof msg.messageType,
|
|
||||||
method,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (msg.messageType) {
|
|
||||||
case managementAgentMessageType.NEW_OVERLAY:
|
|
||||||
patchConfiguration(msg.payload, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.error('failed to patch overlay', {
|
|
||||||
error: reshapeExceptionError(err),
|
|
||||||
method,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
default:
|
|
||||||
log.error('new overlay message with unmanaged message type', {
|
|
||||||
method,
|
|
||||||
type: msg.messageType,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function isManagementAgentUsed() {
|
|
||||||
return process.env.MANAGEMENT_USE_AGENT === '1';
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
managementAgentMessageType,
|
|
||||||
initManagementClient,
|
|
||||||
isManagementAgentUsed,
|
|
||||||
};
|
|
|
@ -1,240 +0,0 @@
|
||||||
const arsenal = require('arsenal');
|
|
||||||
|
|
||||||
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
|
|
||||||
const _config = require('../Config').config;
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
|
|
||||||
const { getStoredCredentials } = require('./credentials');
|
|
||||||
|
|
||||||
const latestOverlayVersionKey = 'configuration/overlay-version';
|
|
||||||
const managementDatabaseName = 'PENSIEVE';
|
|
||||||
const replicatorEndpoint = 'zenko-cloudserver-replicator';
|
|
||||||
const { decryptSecret } = arsenal.pensieve.credentialUtils;
|
|
||||||
const { patchLocations } = arsenal.patches.locationConstraints;
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
const { replicationBackends } = require('arsenal').constants;
|
|
||||||
|
|
||||||
function overlayHasVersion(overlay) {
|
|
||||||
return overlay && overlay.version !== undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
|
|
||||||
return (overlayHasVersion(remoteOverlay) &&
|
|
||||||
(!overlayHasVersion(cachedOverlay) ||
|
|
||||||
remoteOverlay.version > cachedOverlay.version));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Updates the live {Config} object with the new overlay configuration.
|
|
||||||
*
|
|
||||||
* No-op if this version was already applied to the live {Config}.
|
|
||||||
*
|
|
||||||
* @param {object} newConf Overlay configuration to apply
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger
|
|
||||||
* @param {function} cb Function to call with (error, newConf)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function patchConfiguration(newConf, log, cb) {
|
|
||||||
if (newConf.version === undefined) {
|
|
||||||
log.debug('no remote configuration created yet');
|
|
||||||
return process.nextTick(cb, null, newConf);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_config.overlayVersion !== undefined &&
|
|
||||||
newConf.version <= _config.overlayVersion) {
|
|
||||||
log.debug('configuration version already applied',
|
|
||||||
{ configurationVersion: newConf.version });
|
|
||||||
return process.nextTick(cb, null, newConf);
|
|
||||||
}
|
|
||||||
return getStoredCredentials(log, (err, creds) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
const accounts = [];
|
|
||||||
if (newConf.users) {
|
|
||||||
newConf.users.forEach(u => {
|
|
||||||
if (u.secretKey && u.secretKey.length > 0) {
|
|
||||||
const secretKey = decryptSecret(creds, u.secretKey);
|
|
||||||
// accountType will be service-replication or service-clueso
|
|
||||||
let serviceName;
|
|
||||||
if (u.accountType && u.accountType.startsWith('service-')) {
|
|
||||||
serviceName = u.accountType.split('-')[1];
|
|
||||||
}
|
|
||||||
const newAccount = buildAuthDataAccount(
|
|
||||||
u.accessKey, secretKey, u.canonicalId, serviceName,
|
|
||||||
u.userName);
|
|
||||||
accounts.push(newAccount.accounts[0]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const restEndpoints = Object.assign({}, _config.restEndpoints);
|
|
||||||
if (newConf.endpoints) {
|
|
||||||
newConf.endpoints.forEach(e => {
|
|
||||||
restEndpoints[e.hostname] = e.locationName;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!restEndpoints[replicatorEndpoint]) {
|
|
||||||
restEndpoints[replicatorEndpoint] = 'us-east-1';
|
|
||||||
}
|
|
||||||
|
|
||||||
const locations = patchLocations(newConf.locations, creds, log);
|
|
||||||
if (Object.keys(locations).length !== 0) {
|
|
||||||
try {
|
|
||||||
_config.setLocationConstraints(locations);
|
|
||||||
} catch (error) {
|
|
||||||
const exceptionError = reshapeExceptionError(error);
|
|
||||||
log.error('could not apply configuration version location ' +
|
|
||||||
'constraints', { error: exceptionError,
|
|
||||||
method: 'getStoredCredentials' });
|
|
||||||
return cb(exceptionError);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
const locationsWithReplicationBackend = Object.keys(locations)
|
|
||||||
// NOTE: In Orbit, we don't need to have Scality location in our
|
|
||||||
// replication endpoind config, since we do not replicate to
|
|
||||||
// any Scality Instance yet.
|
|
||||||
.filter(key => replicationBackends
|
|
||||||
[locations[key].type])
|
|
||||||
.reduce((obj, key) => {
|
|
||||||
/* eslint no-param-reassign:0 */
|
|
||||||
obj[key] = locations[key];
|
|
||||||
return obj;
|
|
||||||
}, {});
|
|
||||||
_config.setReplicationEndpoints(
|
|
||||||
locationsWithReplicationBackend);
|
|
||||||
} catch (error) {
|
|
||||||
const exceptionError = reshapeExceptionError(error);
|
|
||||||
log.error('could not apply replication endpoints',
|
|
||||||
{ error: exceptionError, method: 'getStoredCredentials' });
|
|
||||||
return cb(exceptionError);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_config.setAuthDataAccounts(accounts);
|
|
||||||
_config.setRestEndpoints(restEndpoints);
|
|
||||||
_config.setPublicInstanceId(newConf.instanceId);
|
|
||||||
|
|
||||||
if (newConf.browserAccess) {
|
|
||||||
if (Boolean(_config.browserAccessEnabled) !==
|
|
||||||
Boolean(newConf.browserAccess.enabled)) {
|
|
||||||
_config.browserAccessEnabled =
|
|
||||||
Boolean(newConf.browserAccess.enabled);
|
|
||||||
_config.emit('browser-access-enabled-change');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_config.overlayVersion = newConf.version;
|
|
||||||
|
|
||||||
log.info('applied configuration version',
|
|
||||||
{ configurationVersion: _config.overlayVersion });
|
|
||||||
|
|
||||||
return cb(null, newConf);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Writes configuration version to the management database
|
|
||||||
*
|
|
||||||
* @param {object} cachedOverlay Latest stored configuration version
|
|
||||||
* for freshness comparison purposes
|
|
||||||
* @param {object} remoteOverlay New configuration version
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger
|
|
||||||
* @param {function} cb Function to call with (error, remoteOverlay)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
|
|
||||||
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
|
|
||||||
const objName = `configuration/overlay/${remoteOverlay.version}`;
|
|
||||||
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
|
|
||||||
{}, log, error => {
|
|
||||||
if (error) {
|
|
||||||
const exceptionError = reshapeExceptionError(error);
|
|
||||||
log.error('could not save configuration',
|
|
||||||
{ error: exceptionError,
|
|
||||||
method: 'saveConfigurationVersion',
|
|
||||||
configurationVersion: remoteOverlay.version });
|
|
||||||
cb(exceptionError);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
metadata.putObjectMD(managementDatabaseName,
|
|
||||||
latestOverlayVersionKey, remoteOverlay.version, {}, log,
|
|
||||||
error => {
|
|
||||||
if (error) {
|
|
||||||
log.error('could not save configuration version', {
|
|
||||||
configurationVersion: remoteOverlay.version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
cb(error, remoteOverlay);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
log.debug('no remote configuration to cache yet');
|
|
||||||
process.nextTick(cb, null, remoteOverlay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads the latest cached configuration overlay from the management
|
|
||||||
* database, without contacting the Orbit API.
|
|
||||||
*
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger
|
|
||||||
* @param {function} callback Function called with (error, cachedOverlay)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function loadCachedOverlay(log, callback) {
|
|
||||||
return metadata.getObjectMD(managementDatabaseName,
|
|
||||||
latestOverlayVersionKey, {}, log, (err, version) => {
|
|
||||||
if (err) {
|
|
||||||
if (err.is.NoSuchKey) {
|
|
||||||
return process.nextTick(callback, null, {});
|
|
||||||
}
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
return metadata.getObjectMD(managementDatabaseName,
|
|
||||||
`configuration/overlay/${version}`, {}, log, (err, conf) => {
|
|
||||||
if (err) {
|
|
||||||
if (err.is.NoSuchKey) {
|
|
||||||
return process.nextTick(callback, null, {});
|
|
||||||
}
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
return callback(null, conf);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function applyAndSaveOverlay(overlay, log) {
|
|
||||||
patchConfiguration(overlay, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.error('could not apply pushed overlay', {
|
|
||||||
error: reshapeExceptionError(err),
|
|
||||||
method: 'applyAndSaveOverlay',
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
saveConfigurationVersion(null, overlay, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.error('could not cache overlay version', {
|
|
||||||
error: reshapeExceptionError(err),
|
|
||||||
method: 'applyAndSaveOverlay',
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.info('overlay push processed');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
loadCachedOverlay,
|
|
||||||
managementDatabaseName,
|
|
||||||
patchConfiguration,
|
|
||||||
saveConfigurationVersion,
|
|
||||||
remoteOverlayIsNewer,
|
|
||||||
applyAndSaveOverlay,
|
|
||||||
};
|
|
|
@ -1,145 +0,0 @@
|
||||||
const arsenal = require('arsenal');
|
|
||||||
const forge = require('node-forge');
|
|
||||||
const request = require('../utilities/request');
|
|
||||||
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
|
|
||||||
const managementDatabaseName = 'PENSIEVE';
|
|
||||||
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
|
||||||
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves Orbit API token from the management database.
|
|
||||||
*
|
|
||||||
* The token is used to authenticate stat posting and
|
|
||||||
*
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
|
||||||
* initialization process
|
|
||||||
* @param {function} callback Function called with (error, result)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function getStoredCredentials(log, callback) {
|
|
||||||
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
|
|
||||||
log, callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
function issueCredentials(managementEndpoint, instanceId, log, callback) {
|
|
||||||
log.info('registering with API to get token');
|
|
||||||
|
|
||||||
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
|
|
||||||
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
|
|
||||||
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
|
|
||||||
|
|
||||||
const postData = {
|
|
||||||
publicKey,
|
|
||||||
};
|
|
||||||
|
|
||||||
request.post(`${managementEndpoint}/${instanceId}/register`,
|
|
||||||
{ body: postData, json: true }, (error, response, body) => {
|
|
||||||
if (error) {
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
if (response.statusCode !== 201) {
|
|
||||||
log.error('could not register instance', {
|
|
||||||
statusCode: response.statusCode,
|
|
||||||
});
|
|
||||||
return callback(arsenal.errors.InternalError);
|
|
||||||
}
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
body.privateKey = privateKey;
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
return callback(null, body);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function confirmInstanceCredentials(
|
|
||||||
managementEndpoint, instanceId, creds, log, callback) {
|
|
||||||
const postData = {
|
|
||||||
serial: creds.serial || 0,
|
|
||||||
publicKey: creds.publicKey,
|
|
||||||
};
|
|
||||||
|
|
||||||
const opts = {
|
|
||||||
headers: {
|
|
||||||
'x-instance-authentication-token': creds.token,
|
|
||||||
},
|
|
||||||
body: postData,
|
|
||||||
};
|
|
||||||
|
|
||||||
request.post(`${managementEndpoint}/${instanceId}/confirm`,
|
|
||||||
opts, (error, response) => {
|
|
||||||
if (error) {
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
if (response.statusCode === 200) {
|
|
||||||
return callback(null, instanceId, creds.token);
|
|
||||||
}
|
|
||||||
return callback(arsenal.errors.InternalError);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initializes credentials and PKI in the management database.
|
|
||||||
*
|
|
||||||
* In case the management database is new and empty, the instance
|
|
||||||
* is registered as new against the Orbit API with newly-generated
|
|
||||||
* RSA key pair.
|
|
||||||
*
|
|
||||||
* @param {string} managementEndpoint API endpoint
|
|
||||||
* @param {string} instanceId UUID of this deployment
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
|
||||||
* initialization process
|
|
||||||
* @param {function} callback Function called with (error, result)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function initManagementCredentials(
|
|
||||||
managementEndpoint, instanceId, log, callback) {
|
|
||||||
getStoredCredentials(log, (error, value) => {
|
|
||||||
if (error) {
|
|
||||||
if (error.is.NoSuchKey) {
|
|
||||||
return issueCredentials(managementEndpoint, instanceId, log,
|
|
||||||
(error, value) => {
|
|
||||||
if (error) {
|
|
||||||
log.error('could not issue token',
|
|
||||||
{ error: reshapeExceptionError(error),
|
|
||||||
method: 'initManagementCredentials' });
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
log.debug('saving token');
|
|
||||||
return metadata.putObjectMD(managementDatabaseName,
|
|
||||||
tokenConfigurationKey, value, {}, log, error => {
|
|
||||||
if (error) {
|
|
||||||
log.error('could not save token',
|
|
||||||
{ error: reshapeExceptionError(error),
|
|
||||||
method: 'initManagementCredentials',
|
|
||||||
});
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
log.info('saved token locally, ' +
|
|
||||||
'confirming instance');
|
|
||||||
return confirmInstanceCredentials(
|
|
||||||
managementEndpoint, instanceId, value, log,
|
|
||||||
callback);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
log.debug('could not get token', { error });
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info('returning existing token');
|
|
||||||
if (Date.now() - value.issueDate > tokenRotationDelay) {
|
|
||||||
log.warn('management API token is too old, should re-issue');
|
|
||||||
}
|
|
||||||
|
|
||||||
return callback(null, instanceId, value.token);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
getStoredCredentials,
|
|
||||||
initManagementCredentials,
|
|
||||||
};
|
|
|
@ -1,138 +0,0 @@
|
||||||
const arsenal = require('arsenal');
|
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const logger = require('../utilities/logger');
|
|
||||||
|
|
||||||
const {
|
|
||||||
loadCachedOverlay,
|
|
||||||
managementDatabaseName,
|
|
||||||
patchConfiguration,
|
|
||||||
} = require('./configuration');
|
|
||||||
const { initManagementCredentials } = require('./credentials');
|
|
||||||
const { startWSManagementClient } = require('./push');
|
|
||||||
const { startPollingManagementClient } = require('./poll');
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
const { isManagementAgentUsed } = require('./agentClient');
|
|
||||||
|
|
||||||
const initRemoteManagementRetryDelay = 10000;
|
|
||||||
|
|
||||||
const managementEndpointRoot =
|
|
||||||
process.env.MANAGEMENT_ENDPOINT ||
|
|
||||||
'https://api.zenko.io';
|
|
||||||
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
|
|
||||||
|
|
||||||
const pushEndpointRoot =
|
|
||||||
process.env.PUSH_ENDPOINT ||
|
|
||||||
'https://push.api.zenko.io';
|
|
||||||
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
|
|
||||||
|
|
||||||
function initManagementDatabase(log, callback) {
|
|
||||||
// XXX choose proper owner names
|
|
||||||
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
|
|
||||||
'owner display name', new Date().toJSON());
|
|
||||||
|
|
||||||
metadata.createBucket(managementDatabaseName, md, log, error => {
|
|
||||||
if (error) {
|
|
||||||
if (error.is.BucketAlreadyExists) {
|
|
||||||
log.info('created management database');
|
|
||||||
return callback();
|
|
||||||
}
|
|
||||||
log.error('could not initialize management database',
|
|
||||||
{ error: reshapeExceptionError(error),
|
|
||||||
method: 'initManagementDatabase' });
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
log.info('initialized management database');
|
|
||||||
return callback();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function startManagementListeners(instanceId, token) {
|
|
||||||
const mode = process.env.MANAGEMENT_MODE || 'push';
|
|
||||||
if (mode === 'push') {
|
|
||||||
const url = `${pushEndpoint}/${instanceId}/ws`;
|
|
||||||
startWSManagementClient(url, token);
|
|
||||||
} else {
|
|
||||||
startPollingManagementClient(managementEndpoint, instanceId, token);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initializes Orbit-based management by:
|
|
||||||
* - creating the management database in metadata
|
|
||||||
* - generating a key pair for credentials encryption
|
|
||||||
* - generating an instance-unique ID
|
|
||||||
* - getting an authentication token for the API
|
|
||||||
* - loading and applying the latest cached overlay configuration
|
|
||||||
* - starting a configuration update and metrics push background task
|
|
||||||
*
|
|
||||||
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
|
||||||
* initialization process
|
|
||||||
* @param {function} callback Function to call once the overlay is loaded
|
|
||||||
* (overlay)
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function initManagement(log, callback) {
|
|
||||||
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
|
|
||||||
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|
|
||||||
|| process.env.S3BACKEND === 'mem') {
|
|
||||||
log.info('remote management disabled');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Temporary check before to fully move to the process management agent. */
|
|
||||||
if (isManagementAgentUsed() ^ typeof callback === 'function') {
|
|
||||||
let msg = 'misuse of initManagement function: ';
|
|
||||||
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
|
|
||||||
msg += `, callback type: ${typeof callback}`;
|
|
||||||
throw new Error(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
async.waterfall([
|
|
||||||
// eslint-disable-next-line arrow-body-style
|
|
||||||
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
|
|
||||||
cb => initManagementDatabase(log, cb),
|
|
||||||
cb => metadata.getUUID(log, cb),
|
|
||||||
(instanceId, cb) => initManagementCredentials(
|
|
||||||
managementEndpoint, instanceId, log, cb),
|
|
||||||
(instanceId, token, cb) => {
|
|
||||||
if (!isManagementAgentUsed()) {
|
|
||||||
cb(null, instanceId, token, {});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
|
|
||||||
token, overlay));
|
|
||||||
},
|
|
||||||
(instanceId, token, overlay, cb) => {
|
|
||||||
if (!isManagementAgentUsed()) {
|
|
||||||
cb(null, instanceId, token, overlay);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
patchConfiguration(overlay, log,
|
|
||||||
err => cb(err, instanceId, token, overlay));
|
|
||||||
},
|
|
||||||
], (error, instanceId, token, overlay) => {
|
|
||||||
if (error) {
|
|
||||||
log.error('could not initialize remote management, retrying later',
|
|
||||||
{ error: reshapeExceptionError(error),
|
|
||||||
method: 'initManagement' });
|
|
||||||
setTimeout(initManagement,
|
|
||||||
initRemoteManagementRetryDelay,
|
|
||||||
logger.newRequestLogger());
|
|
||||||
} else {
|
|
||||||
log.info(`this deployment's Instance ID is ${instanceId}`);
|
|
||||||
log.end('management init done');
|
|
||||||
startManagementListeners(instanceId, token);
|
|
||||||
if (callback) {
|
|
||||||
callback(overlay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
initManagement,
|
|
||||||
initManagementDatabase,
|
|
||||||
};
|
|
|
@ -1,157 +0,0 @@
|
||||||
const arsenal = require('arsenal');
|
|
||||||
const async = require('async');
|
|
||||||
const request = require('../utilities/request');
|
|
||||||
|
|
||||||
const _config = require('../Config').config;
|
|
||||||
const logger = require('../utilities/logger');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const {
|
|
||||||
loadCachedOverlay,
|
|
||||||
patchConfiguration,
|
|
||||||
saveConfigurationVersion,
|
|
||||||
} = require('./configuration');
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
|
|
||||||
const pushReportDelay = 30000;
|
|
||||||
const pullConfigurationOverlayDelay = 60000;
|
|
||||||
|
|
||||||
function loadRemoteOverlay(
|
|
||||||
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
|
|
||||||
log.debug('loading remote overlay');
|
|
||||||
const opts = {
|
|
||||||
headers: {
|
|
||||||
'x-instance-authentication-token': remoteToken,
|
|
||||||
'x-scal-request-id': log.getSerializedUids(),
|
|
||||||
},
|
|
||||||
json: true,
|
|
||||||
};
|
|
||||||
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
|
|
||||||
(error, response, body) => {
|
|
||||||
if (error) {
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
if (response.statusCode === 200) {
|
|
||||||
return cb(null, cachedOverlay, body);
|
|
||||||
}
|
|
||||||
if (response.statusCode === 404) {
|
|
||||||
return cb(null, cachedOverlay, {});
|
|
||||||
}
|
|
||||||
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO save only after successful patch
|
|
||||||
function applyConfigurationOverlay(
|
|
||||||
managementEndpoint, instanceId, remoteToken, log) {
|
|
||||||
async.waterfall([
|
|
||||||
wcb => loadCachedOverlay(log, wcb),
|
|
||||||
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
|
|
||||||
log, wcb),
|
|
||||||
(cachedOverlay, wcb) =>
|
|
||||||
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
|
|
||||||
cachedOverlay, log, wcb),
|
|
||||||
(cachedOverlay, remoteOverlay, wcb) =>
|
|
||||||
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
|
|
||||||
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
|
|
||||||
log, wcb),
|
|
||||||
], error => {
|
|
||||||
if (error) {
|
|
||||||
log.error('could not apply managed configuration',
|
|
||||||
{ error: reshapeExceptionError(error),
|
|
||||||
method: 'applyConfigurationOverlay' });
|
|
||||||
}
|
|
||||||
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
|
|
||||||
managementEndpoint, instanceId, remoteToken,
|
|
||||||
logger.newRequestLogger());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
|
|
||||||
const toURL = `${managementEndpoint}/${instanceId}/stats`;
|
|
||||||
const toOptions = {
|
|
||||||
json: true,
|
|
||||||
headers: {
|
|
||||||
'content-type': 'application/json',
|
|
||||||
'x-instance-authentication-token': remoteToken,
|
|
||||||
},
|
|
||||||
body: report,
|
|
||||||
};
|
|
||||||
const toCallback = (err, response, body) => {
|
|
||||||
if (err) {
|
|
||||||
logger.info('could not post stats', { error: err });
|
|
||||||
}
|
|
||||||
if (response && response.statusCode !== 201) {
|
|
||||||
logger.info('could not post stats', {
|
|
||||||
body,
|
|
||||||
statusCode: response.statusCode,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (next) {
|
|
||||||
next(null, instanceId, remoteToken);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return request.post(toURL, toOptions, toCallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
function getStats(next) {
|
|
||||||
const fromURL = `http://localhost:${_config.port}/_/report`;
|
|
||||||
const fromOptions = {
|
|
||||||
headers: {
|
|
||||||
'x-scal-report-token': process.env.REPORT_TOKEN,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return request.get(fromURL, fromOptions, next);
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
|
|
||||||
if (process.env.PUSH_STATS === 'false') {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
getStats((err, res, report) => {
|
|
||||||
if (err) {
|
|
||||||
logger.info('could not retrieve stats', { error: err });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('report', { report });
|
|
||||||
postStats(
|
|
||||||
managementEndpoint,
|
|
||||||
instanceId,
|
|
||||||
remoteToken,
|
|
||||||
report,
|
|
||||||
next
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
});
|
|
||||||
|
|
||||||
setTimeout(pushStats, pushReportDelay,
|
|
||||||
managementEndpoint, instanceId, remoteToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts background task that updates configuration and pushes stats.
|
|
||||||
*
|
|
||||||
* Periodically polls for configuration updates, and pushes stats at
|
|
||||||
* a fixed interval.
|
|
||||||
*
|
|
||||||
* @param {string} managementEndpoint API endpoint
|
|
||||||
* @param {string} instanceId UUID of this deployment
|
|
||||||
* @param {string} remoteToken API authentication token
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function startPollingManagementClient(
|
|
||||||
managementEndpoint, instanceId, remoteToken) {
|
|
||||||
metadata.notifyBucketChange(() => {
|
|
||||||
pushStats(managementEndpoint, instanceId, remoteToken);
|
|
||||||
});
|
|
||||||
|
|
||||||
pushStats(managementEndpoint, instanceId, remoteToken);
|
|
||||||
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
|
|
||||||
logger.newRequestLogger());
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
startPollingManagementClient,
|
|
||||||
};
|
|
|
@ -1,301 +0,0 @@
|
||||||
const arsenal = require('arsenal');
|
|
||||||
const HttpsProxyAgent = require('https-proxy-agent');
|
|
||||||
const net = require('net');
|
|
||||||
const request = require('../utilities/request');
|
|
||||||
const { URL } = require('url');
|
|
||||||
const WebSocket = require('ws');
|
|
||||||
const assert = require('assert');
|
|
||||||
const http = require('http');
|
|
||||||
|
|
||||||
const _config = require('../Config').config;
|
|
||||||
const logger = require('../utilities/logger');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
|
|
||||||
const { reshapeExceptionError } = arsenal.errorUtils;
|
|
||||||
const { isManagementAgentUsed } = require('./agentClient');
|
|
||||||
const { applyAndSaveOverlay } = require('./configuration');
|
|
||||||
const {
|
|
||||||
ChannelMessageV0,
|
|
||||||
MessageType,
|
|
||||||
} = require('./ChannelMessageV0');
|
|
||||||
|
|
||||||
const {
|
|
||||||
CONFIG_OVERLAY_MESSAGE,
|
|
||||||
METRICS_REQUEST_MESSAGE,
|
|
||||||
CHANNEL_CLOSE_MESSAGE,
|
|
||||||
CHANNEL_PAYLOAD_MESSAGE,
|
|
||||||
} = MessageType;
|
|
||||||
|
|
||||||
const PING_INTERVAL_MS = 10000;
|
|
||||||
const subprotocols = [ChannelMessageV0.protocolName];
|
|
||||||
|
|
||||||
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|
|
||||||
|| 'localhost';
|
|
||||||
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|
|
||||||
|| _config.port;
|
|
||||||
|
|
||||||
let overlayMessageListener = null;
|
|
||||||
let connected = false;
|
|
||||||
|
|
||||||
// No wildcard nor cidr/mask match for now
|
|
||||||
function createWSAgent(pushEndpoint, env, log) {
|
|
||||||
const url = new URL(pushEndpoint);
|
|
||||||
const noProxy = (env.NO_PROXY || env.no_proxy
|
|
||||||
|| '').split(',');
|
|
||||||
|
|
||||||
if (noProxy.includes(url.hostname)) {
|
|
||||||
log.info('push server ws has proxy exclusion', { noProxy });
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (url.protocol === 'https:' || url.protocol === 'wss:') {
|
|
||||||
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
|
|
||||||
if (httpsProxy) {
|
|
||||||
log.info('push server ws using https proxy', { httpsProxy });
|
|
||||||
return new HttpsProxyAgent(httpsProxy);
|
|
||||||
}
|
|
||||||
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
|
|
||||||
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
|
|
||||||
if (httpProxy) {
|
|
||||||
log.info('push server ws using http proxy', { httpProxy });
|
|
||||||
return new HttpsProxyAgent(httpProxy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const allProxy = (env.ALL_PROXY || env.all_proxy);
|
|
||||||
if (allProxy) {
|
|
||||||
log.info('push server ws using wildcard proxy', { allProxy });
|
|
||||||
return new HttpsProxyAgent(allProxy);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info('push server ws not using proxy');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts background task that updates configuration and pushes stats.
|
|
||||||
*
|
|
||||||
* Receives pushed Websocket messages on configuration updates, and
|
|
||||||
* sends stat messages in response to API sollicitations.
|
|
||||||
*
|
|
||||||
* @param {string} url API endpoint
|
|
||||||
* @param {string} token API authentication token
|
|
||||||
* @param {function} cb end-of-connection callback
|
|
||||||
*
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
function startWSManagementClient(url, token, cb) {
|
|
||||||
logger.info('connecting to push server', { url });
|
|
||||||
function _logError(error, errorMessage, method) {
|
|
||||||
if (error) {
|
|
||||||
logger.error(`management client error: ${errorMessage}`,
|
|
||||||
{ error: reshapeExceptionError(error), method });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const socketsByChannelId = [];
|
|
||||||
const headers = {
|
|
||||||
'x-instance-authentication-token': token,
|
|
||||||
};
|
|
||||||
const agent = createWSAgent(url, process.env, logger);
|
|
||||||
|
|
||||||
const ws = new WebSocket(url, subprotocols, { headers, agent });
|
|
||||||
let pingTimeout = null;
|
|
||||||
|
|
||||||
function sendPing() {
|
|
||||||
if (ws.readyState === ws.OPEN) {
|
|
||||||
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
|
|
||||||
}
|
|
||||||
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
|
|
||||||
}
|
|
||||||
|
|
||||||
function initiatePing() {
|
|
||||||
clearTimeout(pingTimeout);
|
|
||||||
setTimeout(sendPing, PING_INTERVAL_MS);
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushStats(options) {
|
|
||||||
if (process.env.PUSH_STATS === 'false') {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
|
|
||||||
const fromOptions = {
|
|
||||||
json: true,
|
|
||||||
headers: {
|
|
||||||
'x-scal-report-token': process.env.REPORT_TOKEN,
|
|
||||||
'x-scal-report-skip-cache': Boolean(options && options.noCache),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
request.get(fromURL, fromOptions, (err, response, body) => {
|
|
||||||
if (err) {
|
|
||||||
_logError(err, 'failed to get metrics report', 'pushStats');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
|
|
||||||
err => _logError(err, 'failed to send metrics report message',
|
|
||||||
'pushStats'));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function closeChannel(channelId) {
|
|
||||||
const socket = socketsByChannelId[channelId];
|
|
||||||
if (socket) {
|
|
||||||
socket.destroy();
|
|
||||||
delete socketsByChannelId[channelId];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function receiveChannelData(channelId, payload) {
|
|
||||||
let socket = socketsByChannelId[channelId];
|
|
||||||
if (!socket) {
|
|
||||||
socket = net.createConnection(cloudServerPort, cloudServerHost);
|
|
||||||
|
|
||||||
socket.on('data', data => {
|
|
||||||
ws.send(ChannelMessageV0.
|
|
||||||
encodeChannelDataMessage(channelId, data), err =>
|
|
||||||
_logError(err, 'failed to send channel data message',
|
|
||||||
'receiveChannelData'));
|
|
||||||
});
|
|
||||||
|
|
||||||
socket.on('connect', () => {
|
|
||||||
});
|
|
||||||
|
|
||||||
socket.on('drain', () => {
|
|
||||||
});
|
|
||||||
|
|
||||||
socket.on('error', error => {
|
|
||||||
logger.error('failed to connect to S3', {
|
|
||||||
code: error.code,
|
|
||||||
host: error.address,
|
|
||||||
port: error.port,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
socket.on('end', () => {
|
|
||||||
socket.destroy();
|
|
||||||
socketsByChannelId[channelId] = null;
|
|
||||||
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
|
|
||||||
err => _logError(err,
|
|
||||||
'failed to send channel close message',
|
|
||||||
'receiveChannelData'));
|
|
||||||
});
|
|
||||||
|
|
||||||
socketsByChannelId[channelId] = socket;
|
|
||||||
}
|
|
||||||
socket.write(payload);
|
|
||||||
}
|
|
||||||
|
|
||||||
function browserAccessChangeHandler() {
|
|
||||||
if (!_config.browserAccessEnabled) {
|
|
||||||
socketsByChannelId.forEach(s => s.close());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.on('open', () => {
|
|
||||||
connected = true;
|
|
||||||
logger.info('connected to push server');
|
|
||||||
|
|
||||||
metadata.notifyBucketChange(() => {
|
|
||||||
pushStats({ noCache: true });
|
|
||||||
});
|
|
||||||
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
|
|
||||||
|
|
||||||
initiatePing();
|
|
||||||
});
|
|
||||||
|
|
||||||
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
|
|
||||||
|
|
||||||
ws.on('close', () => {
|
|
||||||
logger.info('disconnected from push server, reconnecting in 10s');
|
|
||||||
metadata.notifyBucketChange(null);
|
|
||||||
_config.removeListener('browser-access-enabled-change',
|
|
||||||
browserAccessChangeHandler);
|
|
||||||
setTimeout(startWSManagementClient, 10000, url, token);
|
|
||||||
connected = false;
|
|
||||||
|
|
||||||
if (cbOnce) {
|
|
||||||
process.nextTick(cbOnce);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('error', err => {
|
|
||||||
connected = false;
|
|
||||||
logger.error('error from push server connection', {
|
|
||||||
error: err,
|
|
||||||
errorMessage: err.message,
|
|
||||||
});
|
|
||||||
if (cbOnce) {
|
|
||||||
process.nextTick(cbOnce, err);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('ping', () => {
|
|
||||||
ws.pong(err => _logError(err, 'failed to send a pong'));
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('pong', () => {
|
|
||||||
initiatePing();
|
|
||||||
});
|
|
||||||
|
|
||||||
ws.on('message', data => {
|
|
||||||
const log = logger.newRequestLogger();
|
|
||||||
const message = new ChannelMessageV0(data);
|
|
||||||
switch (message.getType()) {
|
|
||||||
case CONFIG_OVERLAY_MESSAGE:
|
|
||||||
if (!isManagementAgentUsed()) {
|
|
||||||
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
|
|
||||||
} else {
|
|
||||||
if (overlayMessageListener) {
|
|
||||||
overlayMessageListener(message.getPayload().toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case METRICS_REQUEST_MESSAGE:
|
|
||||||
pushStats();
|
|
||||||
break;
|
|
||||||
case CHANNEL_CLOSE_MESSAGE:
|
|
||||||
closeChannel(message.getChannelNumber());
|
|
||||||
break;
|
|
||||||
case CHANNEL_PAYLOAD_MESSAGE:
|
|
||||||
// browserAccessEnabled defaults to true unless explicitly false
|
|
||||||
if (_config.browserAccessEnabled !== false) {
|
|
||||||
receiveChannelData(
|
|
||||||
message.getChannelNumber(), message.getPayload());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
logger.error('unknown message type from push server',
|
|
||||||
{ messageType: message.getType() });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function addOverlayMessageListener(callback) {
|
|
||||||
assert(typeof callback === 'function');
|
|
||||||
overlayMessageListener = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
function startPushConnectionHealthCheckServer(cb) {
|
|
||||||
const server = http.createServer((req, res) => {
|
|
||||||
if (req.url !== '/_/healthcheck') {
|
|
||||||
res.writeHead(404);
|
|
||||||
res.write('Not Found');
|
|
||||||
} else if (connected) {
|
|
||||||
res.writeHead(200);
|
|
||||||
res.write('Connected');
|
|
||||||
} else {
|
|
||||||
res.writeHead(503);
|
|
||||||
res.write('Not Connected');
|
|
||||||
}
|
|
||||||
res.end();
|
|
||||||
});
|
|
||||||
|
|
||||||
server.listen(_config.port, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
createWSAgent,
|
|
||||||
startWSManagementClient,
|
|
||||||
startPushConnectionHealthCheckServer,
|
|
||||||
addOverlayMessageListener,
|
|
||||||
};
|
|
|
@ -6,6 +6,9 @@ const BucketInfo = require('arsenal').models.BucketInfo;
|
||||||
const { isBucketAuthorized, isObjAuthorized } =
|
const { isBucketAuthorized, isObjAuthorized } =
|
||||||
require('../api/apiUtils/authorization/permissionChecks');
|
require('../api/apiUtils/authorization/permissionChecks');
|
||||||
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
||||||
|
const { onlyOwnerAllowed } = require('../../constants');
|
||||||
|
const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext');
|
||||||
|
const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils');
|
||||||
|
|
||||||
/** getNullVersionFromMaster - retrieves the null version
|
/** getNullVersionFromMaster - retrieves the null version
|
||||||
* metadata via retrieving the master key
|
* metadata via retrieving the master key
|
||||||
|
@ -152,9 +155,6 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
|
||||||
});
|
});
|
||||||
return errors.NoSuchBucket;
|
return errors.NoSuchBucket;
|
||||||
}
|
}
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
const onlyOwnerAllowed = ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'];
|
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
if (!Array.isArray(requestType)) {
|
if (!Array.isArray(requestType)) {
|
||||||
requestType = [requestType];
|
requestType = [requestType];
|
||||||
|
@ -184,7 +184,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
|
||||||
* @return {undefined} - and call callback with params err, bucket md
|
* @return {undefined} - and call callback with params err, bucket md
|
||||||
*/
|
*/
|
||||||
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
|
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
|
||||||
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
|
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params;
|
||||||
let requestType = params.requestType;
|
let requestType = params.requestType;
|
||||||
if (!Array.isArray(requestType)) {
|
if (!Array.isArray(requestType)) {
|
||||||
requestType = [requestType];
|
requestType = [requestType];
|
||||||
|
@ -238,6 +238,21 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
|
||||||
}
|
}
|
||||||
return next(null, bucket, objMD);
|
return next(null, bucket, objMD);
|
||||||
},
|
},
|
||||||
|
(bucket, objMD, next) => {
|
||||||
|
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
|
||||||
|
actionWithDataDeletion[type]);
|
||||||
|
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
|
||||||
|
// withVersionId cover cases when an object is being restored with a specific version ID.
|
||||||
|
// In this case, the storage space was already accounted for when the RestoreObject API call
|
||||||
|
// was made, so we don't need to add any inflight, but quota must be evaluated.
|
||||||
|
if (!checkQuota) {
|
||||||
|
return next(null, bucket, objMD);
|
||||||
|
}
|
||||||
|
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
|
||||||
|
request?.parsedContentLength || 0, objMD, params.destObjMD);
|
||||||
|
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
|
||||||
|
contentLength, withVersionId, log, err => next(err, bucket, objMD));
|
||||||
|
},
|
||||||
], (err, bucket, objMD) => {
|
], (err, bucket, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// still return bucket for cors headers
|
// still return bucket for cors headers
|
||||||
|
@ -279,6 +294,7 @@ module.exports = {
|
||||||
validateBucket,
|
validateBucket,
|
||||||
metadataGetObject,
|
metadataGetObject,
|
||||||
metadataGetObjects,
|
metadataGetObjects,
|
||||||
|
processBytesToWrite,
|
||||||
standardMetadataValidateBucketAndObj,
|
standardMetadataValidateBucketAndObj,
|
||||||
standardMetadataValidateBucket,
|
standardMetadataValidateBucket,
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const logger = require('../utilities/logger');
|
const logger = require('../utilities/logger');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const bucketclient = require('bucketclient');
|
|
||||||
|
|
||||||
const clientName = config.backends.metadata;
|
const clientName = config.backends.metadata;
|
||||||
|
let bucketclient;
|
||||||
let params;
|
let params;
|
||||||
if (clientName === 'mem') {
|
if (clientName === 'mem') {
|
||||||
params = {};
|
params = {};
|
||||||
|
@ -21,6 +21,7 @@ if (clientName === 'mem') {
|
||||||
noDbOpen: null,
|
noDbOpen: null,
|
||||||
};
|
};
|
||||||
} else if (clientName === 'scality') {
|
} else if (clientName === 'scality') {
|
||||||
|
bucketclient = require('bucketclient');
|
||||||
params = {
|
params = {
|
||||||
bucketdBootstrap: config.bucketd.bootstrap,
|
bucketdBootstrap: config.bucketd.bootstrap,
|
||||||
bucketdLog: config.bucketd.log,
|
bucketdLog: config.bucketd.log,
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
const { config } = require('../Config');
|
||||||
|
const { ScubaClientImpl } = require('./scuba/wrapper');
|
||||||
|
|
||||||
|
let instance = null;
|
||||||
|
|
||||||
|
switch (config.backends.quota) {
|
||||||
|
case 'scuba':
|
||||||
|
instance = new ScubaClientImpl(config);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
instance = {
|
||||||
|
enabled: false,
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = instance;
|
|
@ -0,0 +1,80 @@
|
||||||
|
const util = require('util');
|
||||||
|
const { default: ScubaClient } = require('scubaclient');
|
||||||
|
const { externalBackendHealthCheckInterval } = require('../../../constants');
|
||||||
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
class ScubaClientImpl extends ScubaClient {
|
||||||
|
constructor(config) {
|
||||||
|
super(config.scuba);
|
||||||
|
this.enabled = false;
|
||||||
|
this.maxStaleness = config.quota.maxStaleness;
|
||||||
|
this._healthCheckTimer = null;
|
||||||
|
this._log = null;
|
||||||
|
this._getLatestMetricsCallback = util.callbackify(this.getLatestMetrics);
|
||||||
|
|
||||||
|
if (config.scuba) {
|
||||||
|
this.enabled = true;
|
||||||
|
} else {
|
||||||
|
this.enabled = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setup(log) {
|
||||||
|
this._log = log;
|
||||||
|
if (this.enabled) {
|
||||||
|
this.periodicHealthCheck();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_healthCheck() {
|
||||||
|
return this.healthCheck().then(data => {
|
||||||
|
if (data?.date) {
|
||||||
|
const date = new Date(data.date);
|
||||||
|
if (Date.now() - date.getTime() > this.maxStaleness) {
|
||||||
|
throw new Error('Data is stale, disabling quotas');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!this.enabled) {
|
||||||
|
this._log.info('Scuba health check passed, enabling quotas');
|
||||||
|
}
|
||||||
|
monitoring.utilizationServiceAvailable.set(1);
|
||||||
|
this.enabled = true;
|
||||||
|
}).catch(err => {
|
||||||
|
if (this.enabled) {
|
||||||
|
this._log.warn('Scuba health check failed, disabling quotas', {
|
||||||
|
err: err.name,
|
||||||
|
description: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
monitoring.utilizationServiceAvailable.set(0);
|
||||||
|
this.enabled = false;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
periodicHealthCheck() {
|
||||||
|
if (this._healthCheckTimer) {
|
||||||
|
clearInterval(this._healthCheckTimer);
|
||||||
|
}
|
||||||
|
this._healthCheck();
|
||||||
|
this._healthCheckTimer = setInterval(async () => {
|
||||||
|
this._healthCheck();
|
||||||
|
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|
||||||
|
|| externalBackendHealthCheckInterval);
|
||||||
|
}
|
||||||
|
|
||||||
|
getUtilizationMetrics(metricsClass, resourceName, options, body, callback) {
|
||||||
|
const requestStartTime = process.hrtime.bigint();
|
||||||
|
return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => {
|
||||||
|
const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime);
|
||||||
|
monitoring.utilizationMetricsRetrievalDuration.labels({
|
||||||
|
code: err ? (err.statusCode || 500) : 200,
|
||||||
|
class: metricsClass,
|
||||||
|
}).observe(responseTimeInNs / 1e9);
|
||||||
|
return callback(err, data);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
ScubaClientImpl,
|
||||||
|
};
|
|
@ -37,6 +37,7 @@ const kms = require('../kms/wrapper');
|
||||||
const { listLifecycleCurrents } = require('../api/backbeat/listLifecycleCurrents');
|
const { listLifecycleCurrents } = require('../api/backbeat/listLifecycleCurrents');
|
||||||
const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCurrents');
|
const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCurrents');
|
||||||
const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers');
|
const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers');
|
||||||
|
const { objectDeleteInternal } = require('../api/objectDelete');
|
||||||
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing;
|
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing;
|
||||||
|
|
||||||
const lifecycleTypeCalls = {
|
const lifecycleTypeCalls = {
|
||||||
|
@ -709,6 +710,19 @@ function putObject(request, response, log, callback) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function deleteObjectFromExpiration(request, response, userInfo, log, callback) {
|
||||||
|
return objectDeleteInternal(userInfo, request, log, true, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('error deleting object from expiration', {
|
||||||
|
error: err,
|
||||||
|
method: 'deleteObjectFromExpiration',
|
||||||
|
});
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return _respond(response, {}, log, callback);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
function deleteObject(request, response, log, callback) {
|
function deleteObject(request, response, log, callback) {
|
||||||
const err = _checkMultipleBackendRequest(request, log);
|
const err = _checkMultipleBackendRequest(request, log);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -1274,6 +1288,7 @@ const backbeatRoutes = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
DELETE: {
|
DELETE: {
|
||||||
|
expiration: deleteObjectFromExpiration,
|
||||||
multiplebackenddata: {
|
multiplebackenddata: {
|
||||||
deleteobject: deleteObject,
|
deleteobject: deleteObject,
|
||||||
deleteobjecttagging: deleteObjectTagging,
|
deleteobjecttagging: deleteObjectTagging,
|
||||||
|
|
|
@ -0,0 +1,225 @@
|
||||||
|
const url = require('url');
|
||||||
|
const async = require('async');
|
||||||
|
const vault = require('../auth/vault');
|
||||||
|
const putVeeamFile = require('./veeam/put');
|
||||||
|
const getVeeamFile = require('./veeam/get');
|
||||||
|
const headVeeamFile = require('./veeam/head');
|
||||||
|
const listVeeamFiles = require('./veeam/list');
|
||||||
|
const { deleteVeeamFile } = require('./veeam/delete');
|
||||||
|
const { auth, s3routes, errors } = require('arsenal');
|
||||||
|
const { _decodeURI, validPath } = require('./veeam/utils');
|
||||||
|
const { routesUtils } = require('arsenal/build/lib/s3routes');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const prepareRequestContexts = require('../api/apiUtils/authorization/prepareRequestContexts');
|
||||||
|
|
||||||
|
const { responseXMLBody } = s3routes.routesUtils;
|
||||||
|
|
||||||
|
auth.setHandler(vault);
|
||||||
|
|
||||||
|
const validObjectKeys = [
|
||||||
|
`${validPath}system.xml`,
|
||||||
|
`${validPath}capacity.xml`,
|
||||||
|
];
|
||||||
|
|
||||||
|
const apiToAction = {
|
||||||
|
PUT: 'PutObject',
|
||||||
|
GET: 'GetObject',
|
||||||
|
HEAD: 'HeadObject',
|
||||||
|
DELETE: 'DeleteObject',
|
||||||
|
LIST: 'ListObjects',
|
||||||
|
};
|
||||||
|
|
||||||
|
const routeMap = {
|
||||||
|
GET: getVeeamFile,
|
||||||
|
PUT: putVeeamFile,
|
||||||
|
HEAD: headVeeamFile,
|
||||||
|
DELETE: deleteVeeamFile,
|
||||||
|
LIST: listVeeamFiles,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validator for the Veeam12 custom routes. Ensures that bucket name and
|
||||||
|
* object name are correct, and that the bucket exists in the DB.
|
||||||
|
* @param {string} bucketName - name of the bucket
|
||||||
|
* @param {string} objectKey - key of the object
|
||||||
|
* @param {array | null} requestQueryParams - request query parameters
|
||||||
|
* @param {string} method - HTTP verb
|
||||||
|
* @param {object} log - request logger
|
||||||
|
* @returns {Error | undefined} error or undefined
|
||||||
|
*/
|
||||||
|
function checkBucketAndKey(bucketName, objectKey, requestQueryParams, method, log) {
|
||||||
|
// In case bucket name is not specified and the request contains an
|
||||||
|
// object key or is not a GET, then the bucket name is mandatory.
|
||||||
|
// Reject the request in this case.
|
||||||
|
if (!bucketName && !(method === 'GET' && !objectKey)) {
|
||||||
|
log.debug('empty bucket name', { method: 'checkBucketAndKey' });
|
||||||
|
return errors.MethodNotAllowed;
|
||||||
|
}
|
||||||
|
if (typeof bucketName !== 'string' || routesUtils.isValidBucketName(bucketName, []) === false) {
|
||||||
|
log.debug('invalid bucket name', { bucketName });
|
||||||
|
if (method === 'DELETE') {
|
||||||
|
return errors.NoSuchBucket;
|
||||||
|
}
|
||||||
|
return errors.InvalidBucketName;
|
||||||
|
}
|
||||||
|
if (method !== 'LIST') {
|
||||||
|
// Reject any unsupported request, but allow downloads and deletes from UI
|
||||||
|
// Download relies on GETs calls with auth in query parameters, that can be
|
||||||
|
// checked if 'X-Amz-Credential' is included.
|
||||||
|
// Deletion requires that the tags of the object are returned.
|
||||||
|
if (requestQueryParams && Object.keys(requestQueryParams).length > 0
|
||||||
|
&& !(method === 'GET' && (requestQueryParams['X-Amz-Credential'] || ('tagging' in requestQueryParams)))) {
|
||||||
|
return errors.InvalidRequest
|
||||||
|
.customizeDescription('The Veeam SOSAPI folder does not support this action.');
|
||||||
|
}
|
||||||
|
if (typeof objectKey !== 'string' || !validObjectKeys.includes(objectKey)) {
|
||||||
|
log.debug('invalid object name', { objectKey });
|
||||||
|
return errors.InvalidArgument;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query the authorization service for the request, and extract the bucket
|
||||||
|
* and, if applicable, object metadata according to the request method.
|
||||||
|
*
|
||||||
|
* @param {object} request - incoming request
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {string} api - HTTP verb
|
||||||
|
* @param {object} log - logger instance
|
||||||
|
* @param {function} callback -
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function authorizationMiddleware(request, response, api, log, callback) {
|
||||||
|
if (!api) {
|
||||||
|
return responseXMLBody(errors.AccessDenied, null, response, log);
|
||||||
|
}
|
||||||
|
const requestContexts = prepareRequestContexts(api, request);
|
||||||
|
return async.waterfall([
|
||||||
|
next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('authentication error', {
|
||||||
|
error: err,
|
||||||
|
method: request.method,
|
||||||
|
bucketName: request.bucketName,
|
||||||
|
objectKey: request.objectKey,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
request.authorizationResults = authorizationResults;
|
||||||
|
request.streamingV4Params = streamingV4Params;
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
return next(err, userInfo);
|
||||||
|
}, 's3', requestContexts),
|
||||||
|
(userInfo, next) => {
|
||||||
|
// Ensure only supported HTTP verbs and actions are called,
|
||||||
|
// otherwise deny access
|
||||||
|
const requestType = apiToAction[api];
|
||||||
|
if (!requestType) {
|
||||||
|
return next(errors.AccessDenied);
|
||||||
|
}
|
||||||
|
const mdValParams = {
|
||||||
|
bucketName: request.bucketName,
|
||||||
|
authInfo: userInfo,
|
||||||
|
requestType,
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
return next(null, mdValParams);
|
||||||
|
},
|
||||||
|
(mdValParams, next) => standardMetadataValidateBucket(mdValParams, request.actionImplicitDenies, log, next),
|
||||||
|
], (err, bucketMd) => {
|
||||||
|
if (err || !bucketMd) {
|
||||||
|
return responseXMLBody(err, null, response, log);
|
||||||
|
}
|
||||||
|
return callback(request, response, bucketMd, log);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function _normalizeVeeamRequest(req) {
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
// Rewriting the URL is needed for the V4 signature check: the initial
|
||||||
|
// request targets https://s3.subdomain/bucketName/objectKey, but the
|
||||||
|
// custom ingresses and/or nginx configuration for the UI will redirect this
|
||||||
|
// call to .../_/veeam/bucketName/objectKey. We need to revert the custom
|
||||||
|
// path only used for routing before computing the V4 signature.
|
||||||
|
req.url = req.url.replace('/_/veeam', '');
|
||||||
|
// Assign multiple common (extracted) parameters to the request object
|
||||||
|
const parsedUrl = url.parse(req.url, true);
|
||||||
|
req.path = _decodeURI(parsedUrl.pathname);
|
||||||
|
const pathArr = req.path.split('/');
|
||||||
|
req.query = parsedUrl.query;
|
||||||
|
req.bucketName = pathArr[1];
|
||||||
|
req.objectKey = pathArr.slice(2).join('/');
|
||||||
|
const contentLength = req.headers['x-amz-decoded-content-length'] ?
|
||||||
|
req.headers['x-amz-decoded-content-length'] :
|
||||||
|
req.headers['content-length'];
|
||||||
|
req.parsedContentLength =
|
||||||
|
Number.parseInt(contentLength?.toString() ?? '', 10);
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure only supported methods are supported, otherwise, return an error
|
||||||
|
* @param {string} reqMethod - the HTTP verb of the request
|
||||||
|
* @param {string} reqQuery - request query
|
||||||
|
* @param {object} reqHeaders - request headers
|
||||||
|
* @returns {object} - method or error
|
||||||
|
*/
|
||||||
|
function checkUnsupportedRoutes(reqMethod, reqQuery, reqHeaders) {
|
||||||
|
const method = routeMap[reqMethod];
|
||||||
|
if (!method || (!reqQuery && !reqHeaders)) {
|
||||||
|
return { error: errors.MethodNotAllowed };
|
||||||
|
}
|
||||||
|
return { method };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Router for the Veeam custom files
|
||||||
|
* @param {string} clientIP - client IP address
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} log - requets logger
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function routeVeeam(clientIP, request, response, log) {
|
||||||
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
request.apiMethod = 'routeVeeam';
|
||||||
|
_normalizeVeeamRequest(request);
|
||||||
|
|
||||||
|
log.info('routing request', {
|
||||||
|
method: 'routeVeeam',
|
||||||
|
url: request.url,
|
||||||
|
clientIP,
|
||||||
|
resourceType: request.resourceType,
|
||||||
|
subResource: request.subResource,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Rewrite action to LIST for list-objects
|
||||||
|
const requestMethod = request.method === 'GET' && !request.objectKey ? 'LIST' : request.method;
|
||||||
|
const { error, method } = checkUnsupportedRoutes(requestMethod, request.query, request.headers);
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
log.error('error validating route or uri params', { error });
|
||||||
|
return responseXMLBody(error, '', response, log);
|
||||||
|
}
|
||||||
|
const bucketOrKeyError = checkBucketAndKey(
|
||||||
|
request.bucketName, request.objectKey, request.query, requestMethod, log);
|
||||||
|
|
||||||
|
if (bucketOrKeyError) {
|
||||||
|
log.error('error with bucket or key value',
|
||||||
|
{ error: bucketOrKeyError });
|
||||||
|
return routesUtils.responseXMLBody(bucketOrKeyError, null, response, log);
|
||||||
|
}
|
||||||
|
return authorizationMiddleware(request, response, requestMethod, log, method);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
routeVeeam,
|
||||||
|
checkUnsupportedRoutes,
|
||||||
|
_normalizeVeeamRequest,
|
||||||
|
authorizationMiddleware,
|
||||||
|
checkBucketAndKey,
|
||||||
|
validObjectKeys,
|
||||||
|
};
|
|
@ -0,0 +1,72 @@
|
||||||
|
|
||||||
|
const { s3routes, errors } = require('arsenal');
|
||||||
|
const metadata = require('../../metadata/wrapper');
|
||||||
|
const { isSystemXML } = require('./utils');
|
||||||
|
const { responseXMLBody, responseNoBody } = s3routes.routesUtils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes system.xml or capacity.xml files for a given bucket.
|
||||||
|
*
|
||||||
|
* @param {string} bucketName - bucket name
|
||||||
|
* @param {string} objectKey - object key to delete
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @param {function} callback - callback
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function deleteVeeamCapabilities(bucketName, objectKey, bucketMd, log, callback) {
|
||||||
|
const capabilityFieldName = isSystemXML(objectKey) ? 'SystemInfo' : 'CapacityInfo';
|
||||||
|
|
||||||
|
// Ensure file exists in metadata before deletion
|
||||||
|
if (!bucketMd._capabilities?.VeeamSOSApi
|
||||||
|
|| !bucketMd._capabilities?.VeeamSOSApi[capabilityFieldName]) {
|
||||||
|
return callback(errors.NoSuchKey);
|
||||||
|
}
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
delete bucketMd._capabilities.VeeamSOSApi[capabilityFieldName];
|
||||||
|
|
||||||
|
// Delete the whole veeam capacity if nothing is left
|
||||||
|
if (Object.keys(bucketMd._capabilities.VeeamSOSApi).length === 0) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
delete bucketMd._capabilities.VeeamSOSApi;
|
||||||
|
// Delete all capacities if no capacity is left
|
||||||
|
if (Object.keys(bucketMd._capabilities).length === 0) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
delete bucketMd._capabilities;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Update the bucket metadata
|
||||||
|
return metadata.deleteBucketCapabilities(bucketName, bucketMd, 'VeeamSOSApi', capabilityFieldName, log, err => {
|
||||||
|
if (err) {
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes system.xml or capacity.xml files for a given bucket. handle
|
||||||
|
* request context for custom routes.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function deleteVeeamFile(request, response, bucketMd, log) {
|
||||||
|
if (!bucketMd) {
|
||||||
|
return responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||||
|
}
|
||||||
|
return deleteVeeamCapabilities(request.bucketName, request.objectKey, bucketMd, log, err => {
|
||||||
|
if (err) {
|
||||||
|
return responseXMLBody(err, null, response, log);
|
||||||
|
}
|
||||||
|
return responseNoBody(null, null, response, 204, log);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
deleteVeeamFile,
|
||||||
|
deleteVeeamCapabilities,
|
||||||
|
};
|
|
@ -0,0 +1,46 @@
|
||||||
|
const xml2js = require('xml2js');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const metadata = require('../../metadata/wrapper');
|
||||||
|
const { respondWithData, buildHeadXML, getFileToBuild } = require('./utils');
|
||||||
|
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns system.xml or capacity.xml files for a given bucket.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function getVeeamFile(request, response, bucketMd, log) {
|
||||||
|
if (!bucketMd) {
|
||||||
|
return responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||||
|
}
|
||||||
|
if ('tagging' in request.query) {
|
||||||
|
return respondWithData(request, response, log, bucketMd,
|
||||||
|
buildHeadXML('<Tagging><TagSet></TagSet></Tagging>'));
|
||||||
|
}
|
||||||
|
return metadata.getBucket(request.bucketName, log, (err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return responseXMLBody(errors.InternalError, null, response, log);
|
||||||
|
}
|
||||||
|
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
|
||||||
|
if (fileToBuild.error) {
|
||||||
|
return responseXMLBody(fileToBuild.error, null, response, log);
|
||||||
|
}
|
||||||
|
let modified = new Date().toISOString();
|
||||||
|
// Extract the last modified date, but do not include it when computing
|
||||||
|
// the file's ETag (md5)
|
||||||
|
modified = fileToBuild.value.LastModified;
|
||||||
|
delete fileToBuild.value.LastModified;
|
||||||
|
|
||||||
|
const builder = new xml2js.Builder({
|
||||||
|
headless: true,
|
||||||
|
});
|
||||||
|
return respondWithData(request, response, log, data,
|
||||||
|
buildHeadXML(builder.buildObject(fileToBuild.value)), modified);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = getVeeamFile;
|
|
@ -0,0 +1,43 @@
|
||||||
|
const xml2js = require('xml2js');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const metadata = require('../../metadata/wrapper');
|
||||||
|
const { getResponseHeader, buildHeadXML, getFileToBuild } = require('./utils');
|
||||||
|
const { responseXMLBody, responseContentHeaders } = require('arsenal/build/lib/s3routes/routesUtils');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns system.xml or capacity.xml files metadata for a given bucket.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function headVeeamFile(request, response, bucketMd, log) {
|
||||||
|
if (!bucketMd) {
|
||||||
|
return responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||||
|
}
|
||||||
|
return metadata.getBucket(request.bucketName, log, (err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return responseXMLBody(errors.InternalError, null, response, log);
|
||||||
|
}
|
||||||
|
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
|
||||||
|
if (fileToBuild.error) {
|
||||||
|
return responseXMLBody(fileToBuild.error, null, response, log);
|
||||||
|
}
|
||||||
|
let modified = new Date().toISOString();
|
||||||
|
// Extract the last modified date, but do not include it when computing
|
||||||
|
// the file's ETag (md5)
|
||||||
|
modified = fileToBuild.value.LastModified;
|
||||||
|
delete fileToBuild.value.LastModified;
|
||||||
|
// Recompute file content to generate appropriate content-md5 header
|
||||||
|
const builder = new xml2js.Builder({
|
||||||
|
headless: true,
|
||||||
|
});
|
||||||
|
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(fileToBuild)));
|
||||||
|
return responseContentHeaders(null, {}, getResponseHeader(request, data,
|
||||||
|
dataBuffer, modified, log), response, log);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = headVeeamFile;
|
|
@ -0,0 +1,132 @@
|
||||||
|
const url = require('url');
|
||||||
|
const xml2js = require('xml2js');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const querystring = require('querystring');
|
||||||
|
const metadata = require('../../metadata/wrapper');
|
||||||
|
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
|
||||||
|
const { respondWithData, getResponseHeader, buildHeadXML, validPath } = require('./utils');
|
||||||
|
const { processVersions, processMasterVersions } = require('../../api/bucketGet');
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility function to build a standard response for the LIST route.
|
||||||
|
* It adds the supported path by default as a static and default file.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} arrayOfFiles - array of files headers
|
||||||
|
* @param {boolean} [versioned] - set to true if versioned listing is enabled
|
||||||
|
* @returns {string} - the formatted XML content to send
|
||||||
|
*/
|
||||||
|
function buildXMLResponse(request, arrayOfFiles, versioned = false) {
|
||||||
|
const parsedUrl = url.parse(request.url);
|
||||||
|
const parsedQs = querystring.parse(parsedUrl.query);
|
||||||
|
|
||||||
|
const listParams = {
|
||||||
|
prefix: validPath,
|
||||||
|
maxKeys: parsedQs['max-keys'] || 1000,
|
||||||
|
delimiter: '/',
|
||||||
|
};
|
||||||
|
const list = {
|
||||||
|
IsTruncated: false,
|
||||||
|
Versions: [],
|
||||||
|
Contents: [],
|
||||||
|
CommonPrefixes: [],
|
||||||
|
};
|
||||||
|
const entries = arrayOfFiles.map(file => ({
|
||||||
|
key: file.name,
|
||||||
|
value: {
|
||||||
|
IsDeleteMarker: false,
|
||||||
|
IsNull: true,
|
||||||
|
LastModified: file['Last-Modified'],
|
||||||
|
// Generated ETag alrady contains quotes, removing them here
|
||||||
|
ETag: file.ETag.substring(1, file.ETag.length - 1),
|
||||||
|
Size: file['Content-Length'],
|
||||||
|
Owner: {
|
||||||
|
ID: 0,
|
||||||
|
DisplayName: 'Veeam SOSAPI',
|
||||||
|
},
|
||||||
|
StorageClass: 'VIRTUAL',
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
entries.push({
|
||||||
|
key: validPath,
|
||||||
|
value: {
|
||||||
|
IsDeleteMarker: false,
|
||||||
|
IsNull: true,
|
||||||
|
LastModified: new Date().toISOString(),
|
||||||
|
ETag: 'd41d8cd98f00b204e9800998ecf8427e',
|
||||||
|
Size: 0,
|
||||||
|
Owner: {
|
||||||
|
ID: 0,
|
||||||
|
DisplayName: 'Veeam SOSAPI',
|
||||||
|
},
|
||||||
|
StorageClass: 'VIRTUAL',
|
||||||
|
}
|
||||||
|
});
|
||||||
|
// Add the folder as the base file
|
||||||
|
if (versioned) {
|
||||||
|
list.Versions = entries;
|
||||||
|
} else {
|
||||||
|
list.Contents = entries;
|
||||||
|
}
|
||||||
|
const processingXMLFunction = versioned ? processVersions : processMasterVersions;
|
||||||
|
return processingXMLFunction(request.bucketName, listParams, list);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List system.xml and/or capacity.xml files for a given bucket.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function listVeeamFiles(request, response, bucketMd, log) {
|
||||||
|
if (!bucketMd) {
|
||||||
|
return responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||||
|
}
|
||||||
|
// Only accept list-type query parameter
|
||||||
|
if (!('list-type' in request.query) && !('versions' in request.query)) {
|
||||||
|
return responseXMLBody(errors.InvalidRequest
|
||||||
|
.customizeDescription('The Veeam folder does not support this action.'), null, response, log);
|
||||||
|
}
|
||||||
|
return metadata.getBucket(request.bucketName, log, (err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return responseXMLBody(errors.InternalError, null, response, log);
|
||||||
|
}
|
||||||
|
const filesToBuild = [];
|
||||||
|
const fieldsToGenerate = [];
|
||||||
|
if (data._capabilities?.VeeamSOSApi?.SystemInfo) {
|
||||||
|
fieldsToGenerate.push({
|
||||||
|
...data._capabilities?.VeeamSOSApi?.SystemInfo,
|
||||||
|
name: `${validPath}system.xml`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (data._capabilities?.VeeamSOSApi?.CapacityInfo) {
|
||||||
|
fieldsToGenerate.push({
|
||||||
|
...data._capabilities?.VeeamSOSApi?.CapacityInfo,
|
||||||
|
name: `${validPath}capacity.xml`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
fieldsToGenerate.forEach(file => {
|
||||||
|
const lastModified = file.LastModified;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
delete file.LastModified;
|
||||||
|
const builder = new xml2js.Builder({
|
||||||
|
headless: true,
|
||||||
|
});
|
||||||
|
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(file)));
|
||||||
|
filesToBuild.push({
|
||||||
|
...getResponseHeader(request, data,
|
||||||
|
dataBuffer, lastModified, log),
|
||||||
|
name: file.name,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
// When `versions` is present, listing should return a versioned list
|
||||||
|
return respondWithData(request, response, log, data,
|
||||||
|
buildXMLResponse(request, filesToBuild, 'versions' in request.query));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = listVeeamFiles;
|
|
@ -0,0 +1,80 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { parseString } = require('xml2js');
|
||||||
|
const { receiveData, isSystemXML, getFileToBuild } = require('./utils');
|
||||||
|
const { s3routes, errors } = require('arsenal');
|
||||||
|
const metadata = require('../../metadata/wrapper');
|
||||||
|
const parseSystemSchema = require('./schemas/system');
|
||||||
|
const parseCapacitySchema = require('./schemas/capacity');
|
||||||
|
const writeContinue = require('../../utilities/writeContinue');
|
||||||
|
|
||||||
|
const { responseNoBody, responseXMLBody } = s3routes.routesUtils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Puts a veeam capacity or system file in the bucket metadata.
|
||||||
|
* Logic ensures consistency of the data and metadata.
|
||||||
|
*
|
||||||
|
* @param {object} request - request object
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} bucketMd - bucket metadata from the db
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function putVeeamFile(request, response, bucketMd, log) {
|
||||||
|
if (!bucketMd) {
|
||||||
|
return errors.NoSuchBucket;
|
||||||
|
}
|
||||||
|
|
||||||
|
return async.waterfall([
|
||||||
|
next => {
|
||||||
|
// Extract the data from the request, keep it in memory
|
||||||
|
writeContinue(request, response);
|
||||||
|
return receiveData(request, log, next);
|
||||||
|
},
|
||||||
|
(value, next) => parseString(value, { explicitArray: false }, (err, parsed) => {
|
||||||
|
// Convert the received XML to a JS object
|
||||||
|
if (err) {
|
||||||
|
return next(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
return next(null, parsed);
|
||||||
|
}),
|
||||||
|
(parsedXML, next) => {
|
||||||
|
const capabilities = bucketMd._capabilities || {
|
||||||
|
VeeamSOSApi: {},
|
||||||
|
};
|
||||||
|
// Validate the JS object schema with joi and prepare the object for
|
||||||
|
// further logic
|
||||||
|
const validateFn = isSystemXML(request.objectKey) ? parseSystemSchema : parseCapacitySchema;
|
||||||
|
let validatedData = null;
|
||||||
|
try {
|
||||||
|
validatedData = validateFn(parsedXML);
|
||||||
|
} catch (err) {
|
||||||
|
log.error('xml file did not pass validation', { err });
|
||||||
|
return next(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
const file = getFileToBuild(request, validatedData, true);
|
||||||
|
if (file.error) {
|
||||||
|
return next(file.error);
|
||||||
|
}
|
||||||
|
capabilities.VeeamSOSApi = {
|
||||||
|
...(capabilities.VeeamSOSApi || {}),
|
||||||
|
...file.value,
|
||||||
|
};
|
||||||
|
// Write data to bucketMD with the same (validated) format
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
bucketMd = {
|
||||||
|
...bucketMd,
|
||||||
|
_capabilities: capabilities,
|
||||||
|
};
|
||||||
|
// Update bucket metadata
|
||||||
|
return metadata.updateBucketCapabilities(
|
||||||
|
request.bucketName, bucketMd, 'VeeamSOSApi', file.fieldName, file.value[file.fieldName], log, next);
|
||||||
|
}
|
||||||
|
], err => {
|
||||||
|
if (err) {
|
||||||
|
return responseXMLBody(err, null, response, log);
|
||||||
|
}
|
||||||
|
return responseNoBody(null, null, response, 200, log);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = putVeeamFile;
|
|
@ -0,0 +1,38 @@
|
||||||
|
const joi = require('joi');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates and parse the provided JSON object from the
|
||||||
|
* provided XML file. XML scheme example:
|
||||||
|
*
|
||||||
|
* <?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
* <CapacityInfo>
|
||||||
|
* <Capacity>1099511627776</Capacity>
|
||||||
|
* <Available>0</Available>
|
||||||
|
* <Used>0</Used>
|
||||||
|
* </CapacityInfo>
|
||||||
|
*
|
||||||
|
* @param {string} parsedXML - the parsed XML from xml2js
|
||||||
|
* @returns {object | Error} the valid system.xml JS object or an error if
|
||||||
|
* validation fails
|
||||||
|
*/
|
||||||
|
function validateCapacitySchema(parsedXML) {
|
||||||
|
const schema = joi.object({
|
||||||
|
CapacityInfo: joi.object({
|
||||||
|
Capacity: joi.number().min(-1).integer().required(),
|
||||||
|
Available: joi.number().min(-1).integer().required(),
|
||||||
|
Used: joi.number().min(-1).integer().required(),
|
||||||
|
}).required(),
|
||||||
|
});
|
||||||
|
const validatedData = schema.validate(parsedXML, {
|
||||||
|
// Allow any unknown keys for future compatibility
|
||||||
|
allowUnknown: true,
|
||||||
|
convert: true,
|
||||||
|
});
|
||||||
|
if (validatedData.error) {
|
||||||
|
throw new Error(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
return validatedData.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = validateCapacitySchema;
|
|
@ -0,0 +1,95 @@
|
||||||
|
const joi = require('joi');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
// Allow supporting any version of the protocol
|
||||||
|
const systemSchemasPerVersion = {
|
||||||
|
'unsupported': joi.object({}),
|
||||||
|
'"1.0"': joi.object({
|
||||||
|
SystemInfo: joi.object({
|
||||||
|
ProtocolVersion: joi.string().required(),
|
||||||
|
ModelName: joi.string().required(),
|
||||||
|
ProtocolCapabilities: joi.object({
|
||||||
|
CapacityInfo: joi.boolean().required(),
|
||||||
|
UploadSessions: joi.boolean().required(),
|
||||||
|
IAMSTS: joi.boolean().default(false),
|
||||||
|
}).required(),
|
||||||
|
APIEndpoints: joi.object({
|
||||||
|
IAMEndpoint: joi.string().required(),
|
||||||
|
STSEndpoint: joi.string().required()
|
||||||
|
}),
|
||||||
|
SystemRecommendations: joi.object({
|
||||||
|
S3ConcurrentTaskLimit: joi.number().min(0).default(64),
|
||||||
|
S3MultiObjectDeleteLimit: joi.number().min(1).default(1000),
|
||||||
|
StorageCurrentTasksLimit: joi.number().min(0).default(0),
|
||||||
|
KbBlockSize: joi.number()
|
||||||
|
.valid(256, 512, 1024, 2048, 4096, 8192)
|
||||||
|
.default(1024),
|
||||||
|
}),
|
||||||
|
}).required()
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates and parse the provided JSON object from the
|
||||||
|
* provided XML file. XML scheme example:
|
||||||
|
*
|
||||||
|
* <?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
* <SystemInfo>
|
||||||
|
* <ProtocolVersion>"1.0"</ProtocolVersion>
|
||||||
|
* <ModelName>"ACME corp - Custom S3 server - v1.2"</ModelName>
|
||||||
|
* <ProtocolCapabilities>
|
||||||
|
* <CapacityInfo>true</CapacityInfo>
|
||||||
|
* <UploadSessions>true</UploadSessions>
|
||||||
|
* <IAMSTS>true</IAMSTS>
|
||||||
|
* </ProtocolCapabilities>
|
||||||
|
* <APIEndpoints>
|
||||||
|
* <IAMEndpoint>https://storage.acme.local/iam/endpoint</IAMEndpoint>
|
||||||
|
* <STSEndpoint>https://storage.acme.local/sts/endpoint</STSEndpoint>
|
||||||
|
* </APIEndpoints>
|
||||||
|
* <SystemRecommendations>
|
||||||
|
* <S3ConcurrentTaskLimit>64</S3ConcurrentTaskLimit>
|
||||||
|
* <S3MultiObjectDeleteLimit>1000</S3MultiObjectDeleteLimit>
|
||||||
|
* <StorageCurrentTaksLimit>0</StorageCurrentTaskLimit>
|
||||||
|
* <KbBlockSize>1024</KbBlockSize>
|
||||||
|
* </SystemRecommendations>
|
||||||
|
* </SystemInfo>
|
||||||
|
*
|
||||||
|
* @param {string} parsedXML - the parsed XML from xml2js
|
||||||
|
* @returns {object | Error} the valid system.xml JS object or an error if
|
||||||
|
* validation fails
|
||||||
|
*/
|
||||||
|
function validateSystemSchema(parsedXML) {
|
||||||
|
const protocolVersion = parsedXML?.SystemInfo?.ProtocolVersion;
|
||||||
|
let schema = systemSchemasPerVersion.unsupported;
|
||||||
|
if (!protocolVersion) {
|
||||||
|
throw new Error(errors.MalformedXML
|
||||||
|
.customizeDescription('ProtocolVersion must be set for the system.xml file'));
|
||||||
|
}
|
||||||
|
if (protocolVersion && protocolVersion in systemSchemasPerVersion) {
|
||||||
|
schema = systemSchemasPerVersion[parsedXML?.SystemInfo?.ProtocolVersion];
|
||||||
|
}
|
||||||
|
const validatedData = schema.validate(parsedXML, {
|
||||||
|
// Allow any unknown keys for future compatibility
|
||||||
|
allowUnknown: true,
|
||||||
|
convert: true,
|
||||||
|
});
|
||||||
|
if (validatedData.error) {
|
||||||
|
throw validatedData.error;
|
||||||
|
} else {
|
||||||
|
switch (protocolVersion) {
|
||||||
|
case '"1.0"':
|
||||||
|
// Ensure conditional fields are set
|
||||||
|
// IAMSTS === true implies that SystemInfo.APIEndpoints is defined
|
||||||
|
if (validatedData.value.SystemInfo.ProtocolCapabilities.IAMSTS
|
||||||
|
&& !validatedData.value.SystemInfo.APIEndpoints) {
|
||||||
|
throw new Error(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return validatedData.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = validateSystemSchema;
|
|
@ -0,0 +1,211 @@
|
||||||
|
const { errors, jsutil } = require('arsenal');
|
||||||
|
const { Readable } = require('stream');
|
||||||
|
const collectResponseHeaders = require('../../utilities/collectResponseHeaders');
|
||||||
|
const collectCorsHeaders = require('../../utilities/collectCorsHeaders');
|
||||||
|
const crypto = require('crypto');
|
||||||
|
const { prepareStream } = require('arsenal/build/lib/s3middleware/prepareStream');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes an URI and return the result.
|
||||||
|
* Do the same decoding than in S3 server
|
||||||
|
* @param {string} uri - uri to decode
|
||||||
|
* @returns {string} -
|
||||||
|
*/
|
||||||
|
function _decodeURI(uri) {
|
||||||
|
return decodeURIComponent(uri.replace(/\+/g, ' '));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic function to get data from a client request.
|
||||||
|
*
|
||||||
|
* @param {object} request - incoming request
|
||||||
|
* @param {object} log - logger object
|
||||||
|
* @param {function} callback -
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function receiveData(request, log, callback) {
|
||||||
|
// Get keycontent
|
||||||
|
const { parsedContentLength } = request;
|
||||||
|
const ContentLengthThreshold = 1024 * 1024; // 1MB
|
||||||
|
// Prevent memory overloads by limiting the size of the
|
||||||
|
// received data.
|
||||||
|
if (parsedContentLength > ContentLengthThreshold) {
|
||||||
|
return callback(errors.InvalidInput
|
||||||
|
.customizeDescription(`maximum allowed content-length is ${ContentLengthThreshold} bytes`));
|
||||||
|
}
|
||||||
|
const value = Buffer.alloc(parsedContentLength);
|
||||||
|
const cbOnce = jsutil.once(callback);
|
||||||
|
const dataStream = prepareStream(request, request.streamingV4Params, log, cbOnce);
|
||||||
|
let cursor = 0;
|
||||||
|
let exceeded = false;
|
||||||
|
dataStream.on('data', data => {
|
||||||
|
if (cursor + data.length > parsedContentLength) {
|
||||||
|
exceeded = true;
|
||||||
|
}
|
||||||
|
if (!exceeded) {
|
||||||
|
data.copy(value, cursor);
|
||||||
|
}
|
||||||
|
cursor += data.length;
|
||||||
|
});
|
||||||
|
dataStream.on('end', () => {
|
||||||
|
if (exceeded) {
|
||||||
|
log.error('data stream exceed announced size',
|
||||||
|
{ parsedContentLength, overflow: cursor });
|
||||||
|
return callback(errors.InternalError);
|
||||||
|
} else {
|
||||||
|
return callback(null, value.toString());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a valid XML file for SOSAPI
|
||||||
|
*
|
||||||
|
* @param {string} xmlContent - valid xml content
|
||||||
|
* @returns {string} a valid and formatted XML file
|
||||||
|
*/
|
||||||
|
function buildHeadXML(xmlContent) {
|
||||||
|
return `<?xml version="1.0" encoding="UTF-8" ?>\n${xmlContent}\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get response headers for the object
|
||||||
|
* @param {object} request - incoming request
|
||||||
|
* @param {BucketInfo} bucket - bucket
|
||||||
|
* @param {string} dataBuffer - data to send as a buffer
|
||||||
|
* @param {date} [lastModified] - last modified date of the value
|
||||||
|
* @param {object} log - logging object
|
||||||
|
* @returns {object} - response headers
|
||||||
|
*/
|
||||||
|
function getResponseHeader(request, bucket, dataBuffer, lastModified, log) {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
const responseMetaHeaders = collectResponseHeaders({
|
||||||
|
'last-modified': lastModified || new Date().toISOString(),
|
||||||
|
'content-md5': crypto
|
||||||
|
.createHash('md5')
|
||||||
|
.update(dataBuffer)
|
||||||
|
.digest('hex'),
|
||||||
|
'content-length': dataBuffer.byteLength,
|
||||||
|
'content-type': 'text/xml',
|
||||||
|
}, corsHeaders, null, false);
|
||||||
|
responseMetaHeaders.versionId = 'null';
|
||||||
|
responseMetaHeaders['x-amz-id-2'] = log.getSerializedUids();
|
||||||
|
responseMetaHeaders['x-amz-request-id'] = log.getSerializedUids();
|
||||||
|
return responseMetaHeaders;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Generic function to respond to user with data using streams
|
||||||
|
*
|
||||||
|
* @param {object} request - incoming request
|
||||||
|
* @param {object} response - response object
|
||||||
|
* @param {object} log - logging object
|
||||||
|
* @param {BucketInfo} bucket - bucket info
|
||||||
|
* @param {string} data - data to send
|
||||||
|
* @param {date} [lastModified] - last modified date of the value
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function respondWithData(request, response, log, bucket, data, lastModified) {
|
||||||
|
const dataBuffer = Buffer.from(data);
|
||||||
|
const responseMetaHeaders = getResponseHeader(request, bucket, dataBuffer, lastModified, log);
|
||||||
|
|
||||||
|
response.on('finish', () => {
|
||||||
|
let contentLength = 0;
|
||||||
|
if (responseMetaHeaders && responseMetaHeaders['Content-Length']) {
|
||||||
|
contentLength = responseMetaHeaders['Content-Length'];
|
||||||
|
}
|
||||||
|
log.end().addDefaultFields({ contentLength });
|
||||||
|
log.end().info('responded with streamed content', {
|
||||||
|
httpCode: response.statusCode,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
if (responseMetaHeaders && typeof responseMetaHeaders === 'object') {
|
||||||
|
Object.keys(responseMetaHeaders).forEach(key => {
|
||||||
|
if (responseMetaHeaders[key] !== undefined) {
|
||||||
|
try {
|
||||||
|
response.setHeader(key, responseMetaHeaders[key]);
|
||||||
|
} catch (e) {
|
||||||
|
log.debug('header can not be added ' +
|
||||||
|
'to the response', {
|
||||||
|
header: responseMetaHeaders[key],
|
||||||
|
error: e.stack, method: 'routeVeeam/respondWithData'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
response.writeHead(200);
|
||||||
|
const stream = Readable.from(dataBuffer);
|
||||||
|
stream.pipe(response);
|
||||||
|
stream.on('unpipe', () => {
|
||||||
|
response.end();
|
||||||
|
});
|
||||||
|
stream.on('error', () => {
|
||||||
|
response.end();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const validPath = '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper to determine if the current requested file is system.xml
|
||||||
|
*
|
||||||
|
* @param {string} objectKey - object key
|
||||||
|
* @returns {boolean} - true if the object key ends with `/system.xml`
|
||||||
|
*/
|
||||||
|
function isSystemXML(objectKey) {
|
||||||
|
return objectKey.endsWith('/system.xml');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper to extract the file from the bucket metadata
|
||||||
|
*
|
||||||
|
* @param {object} request - incoming request
|
||||||
|
* @param {object} data - the bucket metadata or input data
|
||||||
|
* @param {boolean} inlineLastModified - true if LastModified should be in the returned object
|
||||||
|
* or as another standalone field
|
||||||
|
* @returns {error | object} - error if file does not exist, or
|
||||||
|
* the associated metadata
|
||||||
|
*/
|
||||||
|
function getFileToBuild(request, data, inlineLastModified = false) {
|
||||||
|
const _isSystemXML = isSystemXML(request.objectKey);
|
||||||
|
const fileToBuild = _isSystemXML ? data?.SystemInfo
|
||||||
|
: data?.CapacityInfo;
|
||||||
|
if (!fileToBuild) {
|
||||||
|
return { error: errors.NoSuchKey };
|
||||||
|
}
|
||||||
|
const modified = fileToBuild.LastModified || (new Date()).toISOString();
|
||||||
|
const fieldName = _isSystemXML ? 'SystemInfo' : 'CapacityInfo';
|
||||||
|
if (inlineLastModified) {
|
||||||
|
fileToBuild.LastModified = modified;
|
||||||
|
return {
|
||||||
|
value: {
|
||||||
|
[fieldName]: fileToBuild,
|
||||||
|
},
|
||||||
|
fieldName,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
delete fileToBuild.LastModified;
|
||||||
|
return {
|
||||||
|
value: {
|
||||||
|
[fieldName]: fileToBuild,
|
||||||
|
LastModified: modified,
|
||||||
|
},
|
||||||
|
fieldName,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
_decodeURI,
|
||||||
|
receiveData,
|
||||||
|
respondWithData,
|
||||||
|
getResponseHeader,
|
||||||
|
buildHeadXML,
|
||||||
|
validPath,
|
||||||
|
isSystemXML,
|
||||||
|
getFileToBuild,
|
||||||
|
};
|
|
@ -18,13 +18,9 @@ const locationStorageCheck =
|
||||||
require('./api/apiUtils/object/locationStorageCheck');
|
require('./api/apiUtils/object/locationStorageCheck');
|
||||||
const vault = require('./auth/vault');
|
const vault = require('./auth/vault');
|
||||||
const metadata = require('./metadata/wrapper');
|
const metadata = require('./metadata/wrapper');
|
||||||
const { initManagement } = require('./management');
|
|
||||||
const {
|
|
||||||
initManagementClient,
|
|
||||||
isManagementAgentUsed,
|
|
||||||
} = require('./management/agentClient');
|
|
||||||
|
|
||||||
const HttpAgent = require('agentkeepalive');
|
const HttpAgent = require('agentkeepalive');
|
||||||
|
const QuotaService = require('./quotas/quotas');
|
||||||
const routes = arsenal.s3routes.routes;
|
const routes = arsenal.s3routes.routes;
|
||||||
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||||
const websiteEndpoints = _config.websiteEndpoints;
|
const websiteEndpoints = _config.websiteEndpoints;
|
||||||
|
@ -55,7 +51,6 @@ const STATS_INTERVAL = 5; // 5 seconds
|
||||||
const STATS_EXPIRY = 30; // 30 seconds
|
const STATS_EXPIRY = 30; // 30 seconds
|
||||||
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
||||||
STATS_EXPIRY);
|
STATS_EXPIRY);
|
||||||
const enableRemoteManagement = true;
|
|
||||||
|
|
||||||
class S3Server {
|
class S3Server {
|
||||||
/**
|
/**
|
||||||
|
@ -321,16 +316,9 @@ class S3Server {
|
||||||
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO this should wait for metadata healthcheck to be ok
|
// Start quota service health checks
|
||||||
// TODO only do this in cluster master
|
if (QuotaService.enabled) {
|
||||||
if (enableRemoteManagement) {
|
QuotaService?.setup(log);
|
||||||
if (!isManagementAgentUsed()) {
|
|
||||||
setTimeout(() => {
|
|
||||||
initManagement(logger.newRequestLogger());
|
|
||||||
}, 5000);
|
|
||||||
} else {
|
|
||||||
initManagementClient();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.started = true;
|
this.started = true;
|
||||||
|
@ -339,8 +327,7 @@ class S3Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
// TODO: change config to use workers prop. name for clarity
|
let workers = _config.workers || 1;
|
||||||
let workers = _config.clusters || 1;
|
|
||||||
if (process.env.S3BACKEND === 'mem') {
|
if (process.env.S3BACKEND === 'mem') {
|
||||||
workers = 1;
|
workers = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ const services = {
|
||||||
tagging, taggingCopy, replicationInfo, defaultRetention,
|
tagging, taggingCopy, replicationInfo, defaultRetention,
|
||||||
dataStoreName, creationTime, retentionMode, retentionDate,
|
dataStoreName, creationTime, retentionMode, retentionDate,
|
||||||
legalHold, originOp, updateMicroVersionId, archive, oldReplayId,
|
legalHold, originOp, updateMicroVersionId, archive, oldReplayId,
|
||||||
deleteNullKey, overheadField } = params;
|
deleteNullKey, amzStorageClass, overheadField } = params;
|
||||||
log.trace('storing object in metadata');
|
log.trace('storing object in metadata');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
const md = new ObjectMD();
|
const md = new ObjectMD();
|
||||||
|
@ -186,6 +186,7 @@ const services = {
|
||||||
}
|
}
|
||||||
// update restore
|
// update restore
|
||||||
if (archive) {
|
if (archive) {
|
||||||
|
md.setAmzStorageClass(amzStorageClass);
|
||||||
md.setArchive(new ObjectMDArchive(
|
md.setArchive(new ObjectMDArchive(
|
||||||
archive.archiveInfo,
|
archive.archiveInfo,
|
||||||
archive.restoreRequestedAt,
|
archive.restoreRequestedAt,
|
||||||
|
@ -262,6 +263,11 @@ const services = {
|
||||||
if (legalHold) {
|
if (legalHold) {
|
||||||
md.setLegalHold(legalHold);
|
md.setLegalHold(legalHold);
|
||||||
}
|
}
|
||||||
|
if (params.acl) {
|
||||||
|
// In case of a restore we dont pass ACL in the headers
|
||||||
|
// but we take them from the old metadata
|
||||||
|
md.setAcl(params.acl);
|
||||||
|
}
|
||||||
|
|
||||||
log.trace('object metadata', { omVal: md.getValue() });
|
log.trace('object metadata', { omVal: md.getValue() });
|
||||||
// If this is not the completion of a multipart upload or
|
// If this is not the completion of a multipart upload or
|
||||||
|
@ -327,10 +333,11 @@ const services = {
|
||||||
* @param {boolean} deferLocationDeletion - true if the object should not
|
* @param {boolean} deferLocationDeletion - true if the object should not
|
||||||
* be removed from the storage, but be returned instead.
|
* be removed from the storage, but be returned instead.
|
||||||
* @param {Log} log - logger instance
|
* @param {Log} log - logger instance
|
||||||
|
* @param {string} originOp - origin operation
|
||||||
* @param {function} cb - callback from async.waterfall in objectGet
|
* @param {function} cb - callback from async.waterfall in objectGet
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, cb) {
|
deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, originOp, cb) {
|
||||||
log.trace('deleting object from bucket');
|
log.trace('deleting object from bucket');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
assert.strictEqual(typeof objectMD, 'object');
|
assert.strictEqual(typeof objectMD, 'object');
|
||||||
|
@ -362,7 +369,7 @@ const services = {
|
||||||
}
|
}
|
||||||
return cb(null, res);
|
return cb(null, res);
|
||||||
});
|
});
|
||||||
});
|
}, originOp);
|
||||||
}
|
}
|
||||||
|
|
||||||
const objGetInfo = objectMD.location;
|
const objGetInfo = objectMD.location;
|
||||||
|
|
|
@ -1,16 +1,12 @@
|
||||||
|
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
||||||
|
const vault = require('../auth/vault');
|
||||||
|
|
||||||
// start utapi server
|
// start utapi server
|
||||||
if (utapiVersion === 1 && _config.utapi) {
|
if (utapiVersion === 1 && _config.utapi) {
|
||||||
const fullConfig = Object.assign({}, _config.utapi,
|
const fullConfig = Object.assign({}, _config.utapi,
|
||||||
{ redis: _config.redis });
|
{ redis: _config.redis, vaultclient: vault });
|
||||||
if (_config.vaultd) {
|
|
||||||
Object.assign(fullConfig, { vaultd: _config.vaultd });
|
|
||||||
}
|
|
||||||
if (_config.https) {
|
|
||||||
Object.assign(fullConfig, { https: _config.https });
|
|
||||||
}
|
|
||||||
// copy healthcheck IPs
|
// copy healthcheck IPs
|
||||||
if (_config.healthChecks) {
|
if (_config.healthChecks) {
|
||||||
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
||||||
const UtapiReindex = require('utapi').UtapiReindex;
|
const UtapiReindex = require('utapi').UtapiReindex;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
||||||
const UtapiReplay = require('utapi').UtapiReplay;
|
const UtapiReplay = require('utapi').UtapiReplay;
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
|
|
||||||
|
|
|
@ -3,12 +3,14 @@ const routeMetadata = require('../routes/routeMetadata');
|
||||||
const routeWorkflowEngineOperator =
|
const routeWorkflowEngineOperator =
|
||||||
require('../routes/routeWorkflowEngineOperator');
|
require('../routes/routeWorkflowEngineOperator');
|
||||||
const { reportHandler } = require('./reportHandler');
|
const { reportHandler } = require('./reportHandler');
|
||||||
|
const routeVeeam = require('../routes/routeVeeam').routeVeeam;
|
||||||
|
|
||||||
const internalHandlers = {
|
const internalHandlers = {
|
||||||
backbeat: routeBackbeat,
|
backbeat: routeBackbeat,
|
||||||
report: reportHandler,
|
report: reportHandler,
|
||||||
metadata: routeMetadata,
|
metadata: routeMetadata,
|
||||||
'workflow-engine-operator': routeWorkflowEngineOperator,
|
'workflow-engine-operator': routeWorkflowEngineOperator,
|
||||||
|
veeam: routeVeeam,
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
const { Werelogs } = require('werelogs');
|
const { configure, Werelogs } = require('werelogs');
|
||||||
|
|
||||||
const _config = require('../Config.js').config;
|
const _config = require('../Config.js').config;
|
||||||
|
|
||||||
|
configure({
|
||||||
|
level: _config.log.logLevel,
|
||||||
|
dump: _config.log.dumpLevel,
|
||||||
|
});
|
||||||
const werelogs = new Werelogs({
|
const werelogs = new Werelogs({
|
||||||
level: _config.log.logLevel,
|
level: _config.log.logLevel,
|
||||||
dump: _config.log.dumpLevel,
|
dump: _config.log.dumpLevel,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const client = require('prom-client');
|
const client = require('prom-client');
|
||||||
|
const { config } = require('../Config');
|
||||||
|
|
||||||
const collectDefaultMetrics = client.collectDefaultMetrics;
|
const collectDefaultMetrics = client.collectDefaultMetrics;
|
||||||
const numberOfBuckets = new client.Gauge({
|
const numberOfBuckets = new client.Gauge({
|
||||||
|
@ -64,6 +65,61 @@ const httpResponseSizeBytes = new client.Summary({
|
||||||
help: 'Cloudserver HTTP response sizes in bytes',
|
help: 'Cloudserver HTTP response sizes in bytes',
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let quotaEvaluationDuration;
|
||||||
|
let utilizationMetricsRetrievalDuration;
|
||||||
|
let utilizationServiceAvailable;
|
||||||
|
let bucketsWithQuota;
|
||||||
|
let accountsWithQuota;
|
||||||
|
let requestWithQuotaMetricsUnavailable;
|
||||||
|
|
||||||
|
if (config.isQuotaEnabled) {
|
||||||
|
quotaEvaluationDuration = new client.Histogram({
|
||||||
|
name: 's3_cloudserver_quota_evaluation_duration_seconds',
|
||||||
|
help: 'Duration of the quota evaluation operation',
|
||||||
|
labelNames: ['action', 'code', 'type'],
|
||||||
|
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1],
|
||||||
|
});
|
||||||
|
|
||||||
|
utilizationMetricsRetrievalDuration = new client.Histogram({
|
||||||
|
name: 's3_cloudserver_quota_metrics_retrieval_duration_seconds',
|
||||||
|
help: 'Duration of the utilization metrics retrieval operation',
|
||||||
|
labelNames: ['code', 'class'],
|
||||||
|
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5],
|
||||||
|
});
|
||||||
|
|
||||||
|
utilizationServiceAvailable = new client.Gauge({
|
||||||
|
name: 's3_cloudserver_quota_utilization_service_available',
|
||||||
|
help: 'Availability of the utilization service',
|
||||||
|
});
|
||||||
|
|
||||||
|
bucketsWithQuota = new client.Gauge({
|
||||||
|
name: 's3_cloudserver_quota_buckets_count',
|
||||||
|
help: 'Total number of buckets quota',
|
||||||
|
});
|
||||||
|
|
||||||
|
accountsWithQuota = new client.Gauge({
|
||||||
|
name: 's3_cloudserver_quota_accounts_count',
|
||||||
|
help: 'Total number of account quota',
|
||||||
|
});
|
||||||
|
|
||||||
|
requestWithQuotaMetricsUnavailable = new client.Counter({
|
||||||
|
name: 's3_cloudserver_quota_unavailable_count',
|
||||||
|
help: 'Total number of requests with quota metrics unavailable',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lifecycle duration metric, to track the completion of restore.
|
||||||
|
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
|
||||||
|
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
|
||||||
|
// labels and buckets.
|
||||||
|
const lifecycleDuration = new client.Histogram({
|
||||||
|
name: 's3_lifecycle_duration_seconds',
|
||||||
|
help: 'Duration of the lifecycle operation, calculated from the theoretical date to the end ' +
|
||||||
|
'of the operation',
|
||||||
|
labelNames: ['type', 'location'],
|
||||||
|
buckets: [0.2, 1, 5, 30, 120, 600, 3600, 4 * 3600, 8 * 3600, 16 * 3600, 24 * 3600],
|
||||||
|
});
|
||||||
|
|
||||||
function promMetrics(method, bucketName, code, action,
|
function promMetrics(method, bucketName, code, action,
|
||||||
newByteLength, oldByteLength, isVersionedObj,
|
newByteLength, oldByteLength, isVersionedObj,
|
||||||
numOfObjectsRemoved, ingestSize) {
|
numOfObjectsRemoved, ingestSize) {
|
||||||
|
@ -131,6 +187,10 @@ function crrCacheToProm(crrResults) {
|
||||||
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
|
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
|
||||||
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
|
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
|
||||||
}
|
}
|
||||||
|
if (config.isQuotaEnabled) {
|
||||||
|
bucketsWithQuota.set(crrResults?.getObjectCount?.bucketWithQuotaCount || 0);
|
||||||
|
accountsWithQuota.set(crrResults?.getVaultReport?.accountWithQuotaCount || 0);
|
||||||
|
}
|
||||||
if (crrResults.getDataDiskUsage) {
|
if (crrResults.getDataDiskUsage) {
|
||||||
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
|
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
|
||||||
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
|
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
|
||||||
|
@ -207,4 +267,10 @@ module.exports = {
|
||||||
httpRequestDurationSeconds,
|
httpRequestDurationSeconds,
|
||||||
httpRequestsTotal,
|
httpRequestsTotal,
|
||||||
httpActiveRequests,
|
httpActiveRequests,
|
||||||
|
lifecycleDuration,
|
||||||
|
quotaEvaluationDuration,
|
||||||
|
utilizationMetricsRetrievalDuration,
|
||||||
|
utilizationServiceAvailable,
|
||||||
|
bucketsWithQuota,
|
||||||
|
requestWithQuotaMetricsUnavailable,
|
||||||
};
|
};
|
||||||
|
|
|
@ -10,6 +10,7 @@ const config = require('../Config').config;
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
const vault = require('../auth/vault');
|
||||||
|
|
||||||
const REPORT_MODEL_VERSION = 1;
|
const REPORT_MODEL_VERSION = 1;
|
||||||
const ASYNCLIMIT = 5;
|
const ASYNCLIMIT = 5;
|
||||||
|
@ -461,6 +462,7 @@ function reportHandler(clientIP, req, res, log) {
|
||||||
getCRRMetrics: cb => getCRRMetrics(log, cb),
|
getCRRMetrics: cb => getCRRMetrics(log, cb),
|
||||||
getReplicationStates: cb => getReplicationStates(log, cb),
|
getReplicationStates: cb => getReplicationStates(log, cb),
|
||||||
getIngestionInfo: cb => getIngestionInfo(log, cb),
|
getIngestionInfo: cb => getIngestionInfo(log, cb),
|
||||||
|
getVaultReport: cb => vault.report(log, cb),
|
||||||
},
|
},
|
||||||
(err, results) => {
|
(err, results) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -488,6 +490,7 @@ function reportHandler(clientIP, req, res, log) {
|
||||||
capabilities: getCapabilities(),
|
capabilities: getCapabilities(),
|
||||||
ingestStats: results.getIngestionInfo.metrics,
|
ingestStats: results.getIngestionInfo.metrics,
|
||||||
ingestStatus: results.getIngestionInfo.status,
|
ingestStatus: results.getIngestionInfo.status,
|
||||||
|
vaultReport: results.getVaultReport,
|
||||||
};
|
};
|
||||||
monitoring.crrCacheToProm(results);
|
monitoring.crrCacheToProm(results);
|
||||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
|
|
@ -101,5 +101,12 @@
|
||||||
"legacyAwsBehavior": false,
|
"legacyAwsBehavior": false,
|
||||||
"isCold": true,
|
"isCold": true,
|
||||||
"details": {}
|
"details": {}
|
||||||
|
},
|
||||||
|
"location-azure-archive-v1": {
|
||||||
|
"type": "azure_archive",
|
||||||
|
"objectId": "location-azure-archive-v1",
|
||||||
|
"legacyAwsBehavior": false,
|
||||||
|
"isCold": true,
|
||||||
|
"details": {}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"STANDARD": {
|
||||||
|
"type": "vitastor",
|
||||||
|
"objectId": "std",
|
||||||
|
"legacyAwsBehavior": true,
|
||||||
|
"details": {
|
||||||
|
"config_path": "/etc/vitastor/vitastor.conf",
|
||||||
|
"pool_id": 3,
|
||||||
|
"metadata_image": "s3-volume-meta"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,179 +0,0 @@
|
||||||
const Uuid = require('uuid');
|
|
||||||
const WebSocket = require('ws');
|
|
||||||
|
|
||||||
const logger = require('./lib/utilities/logger');
|
|
||||||
const { initManagement } = require('./lib/management');
|
|
||||||
const _config = require('./lib/Config').config;
|
|
||||||
const { managementAgentMessageType } = require('./lib/management/agentClient');
|
|
||||||
const { addOverlayMessageListener } = require('./lib/management/push');
|
|
||||||
const { saveConfigurationVersion } = require('./lib/management/configuration');
|
|
||||||
|
|
||||||
|
|
||||||
// TODO: auth?
|
|
||||||
// TODO: werelogs with a specific name.
|
|
||||||
|
|
||||||
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
|
|
||||||
|
|
||||||
|
|
||||||
class ManagementAgentServer {
|
|
||||||
constructor() {
|
|
||||||
this.port = _config.managementAgent.port || 8010;
|
|
||||||
this.wss = null;
|
|
||||||
this.loadedOverlay = null;
|
|
||||||
|
|
||||||
this.stop = this.stop.bind(this);
|
|
||||||
process.on('SIGINT', this.stop);
|
|
||||||
process.on('SIGHUP', this.stop);
|
|
||||||
process.on('SIGQUIT', this.stop);
|
|
||||||
process.on('SIGTERM', this.stop);
|
|
||||||
process.on('SIGPIPE', () => {});
|
|
||||||
}
|
|
||||||
|
|
||||||
start(_cb) {
|
|
||||||
const cb = _cb || function noop() {};
|
|
||||||
|
|
||||||
/* Define REPORT_TOKEN env variable needed by the management
|
|
||||||
* module. */
|
|
||||||
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|
|
||||||
|| _config.reportToken
|
|
||||||
|| Uuid.v4();
|
|
||||||
|
|
||||||
initManagement(logger.newRequestLogger(), overlay => {
|
|
||||||
let error = null;
|
|
||||||
|
|
||||||
if (overlay) {
|
|
||||||
this.loadedOverlay = overlay;
|
|
||||||
this.startServer();
|
|
||||||
} else {
|
|
||||||
error = new Error('failed to init management');
|
|
||||||
}
|
|
||||||
return cb(error);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
stop() {
|
|
||||||
if (!this.wss) {
|
|
||||||
process.exit(0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.wss.close(() => {
|
|
||||||
logger.info('server shutdown');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
startServer() {
|
|
||||||
this.wss = new WebSocket.Server({
|
|
||||||
port: this.port,
|
|
||||||
clientTracking: true,
|
|
||||||
path: '/watch',
|
|
||||||
});
|
|
||||||
|
|
||||||
this.wss.on('connection', this.onConnection.bind(this));
|
|
||||||
this.wss.on('listening', this.onListening.bind(this));
|
|
||||||
this.wss.on('error', this.onError.bind(this));
|
|
||||||
|
|
||||||
setInterval(this.checkBrokenConnections.bind(this),
|
|
||||||
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
|
|
||||||
|
|
||||||
addOverlayMessageListener(this.onNewOverlay.bind(this));
|
|
||||||
}
|
|
||||||
|
|
||||||
onConnection(socket, request) {
|
|
||||||
function hearthbeat() {
|
|
||||||
this.isAlive = true;
|
|
||||||
}
|
|
||||||
logger.info('client connected to watch route', {
|
|
||||||
ip: request.connection.remoteAddress,
|
|
||||||
});
|
|
||||||
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
socket.isAlive = true;
|
|
||||||
socket.on('pong', hearthbeat.bind(socket));
|
|
||||||
|
|
||||||
if (socket.readyState !== socket.OPEN) {
|
|
||||||
logger.error('client socket not in ready state', {
|
|
||||||
state: socket.readyState,
|
|
||||||
client: socket._socket._peername,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const msg = {
|
|
||||||
messageType: managementAgentMessageType.NEW_OVERLAY,
|
|
||||||
payload: this.loadedOverlay,
|
|
||||||
};
|
|
||||||
socket.send(JSON.stringify(msg), error => {
|
|
||||||
if (error) {
|
|
||||||
logger.error('failed to send remoteOverlay to client', {
|
|
||||||
error,
|
|
||||||
client: socket._socket._peername,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
onListening() {
|
|
||||||
logger.info('websocket server listening',
|
|
||||||
{ port: this.port });
|
|
||||||
}
|
|
||||||
|
|
||||||
onError(error) {
|
|
||||||
logger.error('websocket server error', { error });
|
|
||||||
}
|
|
||||||
|
|
||||||
_sendNewOverlayToClient(client) {
|
|
||||||
if (client.readyState !== client.OPEN) {
|
|
||||||
logger.error('client socket not in ready state', {
|
|
||||||
state: client.readyState,
|
|
||||||
client: client._socket._peername,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const msg = {
|
|
||||||
messageType: managementAgentMessageType.NEW_OVERLAY,
|
|
||||||
payload: this.loadedOverlay,
|
|
||||||
};
|
|
||||||
client.send(JSON.stringify(msg), error => {
|
|
||||||
if (error) {
|
|
||||||
logger.error(
|
|
||||||
'failed to send remoteOverlay to management agent client', {
|
|
||||||
error, client: client._socket._peername,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
onNewOverlay(remoteOverlay) {
|
|
||||||
const remoteOverlayObj = JSON.parse(remoteOverlay);
|
|
||||||
saveConfigurationVersion(
|
|
||||||
this.loadedOverlay, remoteOverlayObj, logger, err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('failed to save remote overlay', { err });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.loadedOverlay = remoteOverlayObj;
|
|
||||||
this.wss.clients.forEach(
|
|
||||||
this._sendNewOverlayToClient.bind(this)
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
checkBrokenConnections() {
|
|
||||||
this.wss.clients.forEach(client => {
|
|
||||||
if (!client.isAlive) {
|
|
||||||
logger.info('close broken connection', {
|
|
||||||
client: client._socket._peername,
|
|
||||||
});
|
|
||||||
client.terminate();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
client.isAlive = false;
|
|
||||||
client.ping();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const server = new ManagementAgentServer();
|
|
||||||
server.start();
|
|
|
@ -192,3 +192,163 @@ tests:
|
||||||
summary: Very high delete latency
|
summary: Very high delete latency
|
||||||
exp_labels:
|
exp_labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
|
|
||||||
|
# QuotaMetricsNotAvailable (case with bucket quota)
|
||||||
|
##################################################################################################
|
||||||
|
- name: Quota metrics not available (bucket quota)
|
||||||
|
interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
||||||
|
values: 1+1x6 0+0x20 1+1x6
|
||||||
|
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
||||||
|
values: 1+1x32
|
||||||
|
alert_rule_test:
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 6m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 15m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 20m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: critical
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 28m
|
||||||
|
exp_alerts: []
|
||||||
|
|
||||||
|
# QuotaMetricsNotAvailable (case with account quota)
|
||||||
|
##################################################################################################
|
||||||
|
- name: Quota metrics not available (account quota)
|
||||||
|
interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
||||||
|
values: 1+1x6 0+0x20 1+1x6
|
||||||
|
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
||||||
|
values: 1+1x32
|
||||||
|
alert_rule_test:
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 6m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 15m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 20m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: critical
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 28m
|
||||||
|
exp_alerts: []
|
||||||
|
|
||||||
|
# QuotaMetricsNotAvailable (case with both quota quota)
|
||||||
|
##################################################################################################
|
||||||
|
- name: Quota metrics not available (account quota)
|
||||||
|
interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
||||||
|
values: 1+1x6 0+0x20 1+1x6
|
||||||
|
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
||||||
|
values: 1+1x32
|
||||||
|
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
||||||
|
values: 1+1x32
|
||||||
|
alert_rule_test:
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 6m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 15m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 20m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: critical
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 28m
|
||||||
|
exp_alerts: []
|
||||||
|
|
||||||
|
# QuotaMetricsNotAvailable (case without quota)
|
||||||
|
##################################################################################################
|
||||||
|
- name: Utilization service Latency
|
||||||
|
interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
||||||
|
values: 1+1x6 0+0x20 1+1x6
|
||||||
|
alert_rule_test:
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 6m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 15m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 20m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaMetricsNotAvailable
|
||||||
|
eval_time: 28m
|
||||||
|
exp_alerts: []
|
||||||
|
|
||||||
|
# QuotaUnavailable
|
||||||
|
##################################################################################################
|
||||||
|
- name: Quota evaluation disabled
|
||||||
|
interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: s3_cloudserver_quota_unavailable_count{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
||||||
|
values: 0+0x6 1+1x20 0+0x6
|
||||||
|
alert_rule_test:
|
||||||
|
- alertname: QuotaUnavailable
|
||||||
|
eval_time: 6m
|
||||||
|
exp_alerts: []
|
||||||
|
- alertname: QuotaUnavailable
|
||||||
|
eval_time: 20m
|
||||||
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet.
|
||||||
|
summary: High number of quota requests with metrics unavailable
|
||||||
|
exp_labels:
|
||||||
|
severity: critical
|
||||||
|
- alertname: QuotaUnavailable
|
||||||
|
eval_time: 30m
|
||||||
|
exp_alerts: []
|
||||||
|
|
|
@ -6,6 +6,9 @@ x-inputs:
|
||||||
- name: service
|
- name: service
|
||||||
type: constant
|
type: constant
|
||||||
value: artesca-data-connector-s3api-metrics
|
value: artesca-data-connector-s3api-metrics
|
||||||
|
- name: reportJob
|
||||||
|
type: constant
|
||||||
|
value: artesca-data-ops-report-handler
|
||||||
- name: replicas
|
- name: replicas
|
||||||
type: constant
|
type: constant
|
||||||
- name: systemErrorsWarningThreshold
|
- name: systemErrorsWarningThreshold
|
||||||
|
@ -26,6 +29,9 @@ x-inputs:
|
||||||
- name: deleteLatencyCriticalThreshold
|
- name: deleteLatencyCriticalThreshold
|
||||||
type: config
|
type: config
|
||||||
value: 1.000
|
value: 1.000
|
||||||
|
- name: quotaUnavailabilityThreshold
|
||||||
|
type: config
|
||||||
|
value: 0.500
|
||||||
|
|
||||||
groups:
|
groups:
|
||||||
- name: CloudServer
|
- name: CloudServer
|
||||||
|
@ -132,3 +138,45 @@ groups:
|
||||||
annotations:
|
annotations:
|
||||||
description: "Latency of delete object operations is more than 1s"
|
description: "Latency of delete object operations is more than 1s"
|
||||||
summary: "Very high delete latency"
|
summary: "Very high delete latency"
|
||||||
|
|
||||||
|
# As a platform admin I want to be alerted (warning) when the utilization metrics service is enabled
|
||||||
|
# but not available for at least half of the S3 services during the last minute
|
||||||
|
- alert: QuotaMetricsNotAvailable
|
||||||
|
expr: |
|
||||||
|
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
||||||
|
< ${quotaUnavailabilityThreshold} and
|
||||||
|
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
||||||
|
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
|
||||||
|
summary: "Utilization metrics service not available"
|
||||||
|
|
||||||
|
# As a platform admin I want to be alerted (critical) when the utilization metrics service is enabled
|
||||||
|
# but not available during the last 10 minutes
|
||||||
|
- alert: QuotaMetricsNotAvailable
|
||||||
|
expr: |
|
||||||
|
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
||||||
|
< ${quotaUnavailabilityThreshold} and
|
||||||
|
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
||||||
|
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
|
||||||
|
summary: "Utilization metrics service not available"
|
||||||
|
|
||||||
|
# As a platform admin I want to be alerted (critical) when quotas were not honored due to metrics
|
||||||
|
# being unavailable
|
||||||
|
- alert: QuotaUnavailable
|
||||||
|
expr: |
|
||||||
|
sum(increase(s3_cloudserver_quota_unavailable_count{namespace="${namespace}",service="${service}"}[2m]))
|
||||||
|
> 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
description: "Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet."
|
||||||
|
summary: "High number of quota requests with metrics unavailable"
|
||||||
|
|
|
@ -1625,7 +1625,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(rate(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)",
|
"expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -1697,7 +1697,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(round(increase(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)",
|
"expr": "sum(round(increase(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -1931,7 +1931,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
"format": "heatmap",
|
"format": "heatmap",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -1960,7 +1960,7 @@
|
||||||
},
|
},
|
||||||
"yAxis": {
|
"yAxis": {
|
||||||
"decimals": null,
|
"decimals": null,
|
||||||
"format": "dtdurations",
|
"format": "s",
|
||||||
"label": null,
|
"label": null,
|
||||||
"logBase": 1,
|
"logBase": 1,
|
||||||
"max": null,
|
"max": null,
|
||||||
|
@ -2182,7 +2182,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -2196,7 +2196,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -2665,6 +2665,865 @@
|
||||||
"transformations": [],
|
"transformations": [],
|
||||||
"transparent": false,
|
"transparent": false,
|
||||||
"type": "piechart"
|
"type": "piechart"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 1,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 65
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 34,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"panels": [],
|
||||||
|
"targets": [],
|
||||||
|
"title": "Quotas",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"custom": {},
|
||||||
|
"decimals": null,
|
||||||
|
"mappings": [],
|
||||||
|
"noValue": "-",
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "#808080",
|
||||||
|
"index": 0,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": "null",
|
||||||
|
"yaxis": "left"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "blue",
|
||||||
|
"index": 1,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": 0.0,
|
||||||
|
"yaxis": "left"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "short"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 66
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 35,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Buckets with quota",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"custom": {},
|
||||||
|
"decimals": null,
|
||||||
|
"mappings": [],
|
||||||
|
"noValue": "-",
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "#808080",
|
||||||
|
"index": 0,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": "null",
|
||||||
|
"yaxis": "left"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "blue",
|
||||||
|
"index": 1,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": 0.0,
|
||||||
|
"yaxis": "left"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "short"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 70
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 36,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Accounts with quota",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 30,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
},
|
||||||
|
"unit": "ops"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 66
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 37,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "hidden",
|
||||||
|
"placement": "bottom"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Operations with unavailable metrics",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
},
|
||||||
|
"unit": "ops"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 66
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 38,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [
|
||||||
|
"min",
|
||||||
|
"mean",
|
||||||
|
"max"
|
||||||
|
],
|
||||||
|
"displayMode": "table",
|
||||||
|
"placement": "right"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{action}}",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Quota evaluaton rate per S3 action",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 30,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "stepAfter",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"index": 0,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": "null",
|
||||||
|
"yaxis": "left"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "orange",
|
||||||
|
"index": 1,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": 90.0,
|
||||||
|
"yaxis": "left"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"index": 2,
|
||||||
|
"line": true,
|
||||||
|
"op": "gt",
|
||||||
|
"value": 0.0,
|
||||||
|
"yaxis": "left"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percent"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 74
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 39,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "hidden",
|
||||||
|
"placement": "bottom"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Quota service uptime",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 30,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
},
|
||||||
|
"unit": "ops"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 74
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 40,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "Success",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "Quota Exceeded",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Quota evaluation status code over time",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": 180000,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 74
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 41,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [
|
||||||
|
"min",
|
||||||
|
"mean",
|
||||||
|
"max"
|
||||||
|
],
|
||||||
|
"displayMode": "table",
|
||||||
|
"placement": "right"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{ type }} (success)",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{ type }} (exceeded)",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Average quota evaluation latencies",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cards": {
|
||||||
|
"cardPadding": null,
|
||||||
|
"cardRound": null
|
||||||
|
},
|
||||||
|
"color": {
|
||||||
|
"cardColor": "#b4ff00",
|
||||||
|
"colorScale": "sqrt",
|
||||||
|
"colorScheme": "interpolateOranges",
|
||||||
|
"exponent": 0.5,
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"mode": "opacity"
|
||||||
|
},
|
||||||
|
"dataFormat": "tsbuckets",
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 82
|
||||||
|
},
|
||||||
|
"heatmap": {},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"hideZeroBuckets": false,
|
||||||
|
"highlightCards": true,
|
||||||
|
"id": 42,
|
||||||
|
"legend": {
|
||||||
|
"show": false
|
||||||
|
},
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 25,
|
||||||
|
"reverseYBuckets": false,
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
||||||
|
"format": "heatmap",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{ le }}",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Quota evaluation duration",
|
||||||
|
"tooltip": {
|
||||||
|
"show": true,
|
||||||
|
"showHistogram": true
|
||||||
|
},
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "heatmap",
|
||||||
|
"xAxis": {
|
||||||
|
"mode": "time",
|
||||||
|
"name": null,
|
||||||
|
"show": true,
|
||||||
|
"values": []
|
||||||
|
},
|
||||||
|
"yAxis": {
|
||||||
|
"decimals": null,
|
||||||
|
"format": "s",
|
||||||
|
"label": null,
|
||||||
|
"logBase": 1,
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"show": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": "${DS_PROMETHEUS}",
|
||||||
|
"editable": true,
|
||||||
|
"error": false,
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "smooth",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"log": 2,
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": 180000,
|
||||||
|
"stacking": {},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": []
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 18,
|
||||||
|
"x": 6,
|
||||||
|
"y": 82
|
||||||
|
},
|
||||||
|
"hideTimeOverride": false,
|
||||||
|
"id": 43,
|
||||||
|
"links": [],
|
||||||
|
"maxDataPoints": 100,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom"
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{ class }} (success)",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": null,
|
||||||
|
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)",
|
||||||
|
"format": "time_series",
|
||||||
|
"hide": false,
|
||||||
|
"instant": false,
|
||||||
|
"interval": "",
|
||||||
|
"intervalFactor": 1,
|
||||||
|
"legendFormat": "{{ class }} (error)",
|
||||||
|
"metric": "",
|
||||||
|
"refId": "",
|
||||||
|
"step": 10,
|
||||||
|
"target": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Average utilization metrics retrieval latencies",
|
||||||
|
"transformations": [],
|
||||||
|
"transparent": false,
|
||||||
|
"type": "timeseries"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"refresh": "30s",
|
"refresh": "30s",
|
||||||
|
@ -2766,5 +3625,5 @@
|
||||||
"timezone": "",
|
"timezone": "",
|
||||||
"title": "S3 service",
|
"title": "S3 service",
|
||||||
"uid": null,
|
"uid": null,
|
||||||
"version": 31
|
"version": 110
|
||||||
}
|
}
|
||||||
|
|
|
@ -331,7 +331,7 @@ requestsByAction = TimeSeries(
|
||||||
unit=UNITS.OPS_PER_SEC,
|
unit=UNITS.OPS_PER_SEC,
|
||||||
targets=[
|
targets=[
|
||||||
Target(
|
Target(
|
||||||
expr='sum(rate(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501
|
expr='sum(rate(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501
|
||||||
legendFormat="{{action}}",
|
legendFormat="{{action}}",
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -345,7 +345,7 @@ requestsByMethod = PieChart(
|
||||||
unit=UNITS.SHORT,
|
unit=UNITS.SHORT,
|
||||||
targets=[
|
targets=[
|
||||||
Target(
|
Target(
|
||||||
expr='sum(round(increase(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501
|
expr='sum(round(increase(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501
|
||||||
legendFormat="{{method}}",
|
legendFormat="{{method}}",
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
|
@ -366,6 +366,28 @@ def average_latency_target(title, action="", by=""):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def average_quota_latency_target(code="", by=""):
|
||||||
|
# type: (str, str) -> Target
|
||||||
|
extra = ', code=' + code if code else ""
|
||||||
|
by = " by (" + by + ")" if by else ""
|
||||||
|
return "\n".join([
|
||||||
|
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
|
||||||
|
" /",
|
||||||
|
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def average_quota_retrieval_latency(code="", by=""):
|
||||||
|
# type: (str, str) -> Target
|
||||||
|
extra = ', code=' + code if code else ""
|
||||||
|
by = " by (" + by + ")" if by else ""
|
||||||
|
return "\n".join([
|
||||||
|
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
|
||||||
|
" /",
|
||||||
|
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
averageLatencies = TimeSeries(
|
averageLatencies = TimeSeries(
|
||||||
title="Average latencies",
|
title="Average latencies",
|
||||||
dataSource="${DS_PROMETHEUS}",
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
@ -406,10 +428,10 @@ requestTime = Heatmap(
|
||||||
dataFormat="tsbuckets",
|
dataFormat="tsbuckets",
|
||||||
maxDataPoints=25,
|
maxDataPoints=25,
|
||||||
tooltip=Tooltip(show=True, showHistogram=True),
|
tooltip=Tooltip(show=True, showHistogram=True),
|
||||||
yAxis=YAxis(format=UNITS.DURATION_SECONDS),
|
yAxis=YAxis(format=UNITS.SECONDS),
|
||||||
color=HeatmapColor(mode="opacity"),
|
color=HeatmapColor(mode="opacity"),
|
||||||
targets=[Target(
|
targets=[Target(
|
||||||
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
format="heatmap",
|
format="heatmap",
|
||||||
legendFormat="{{ le }}",
|
legendFormat="{{ le }}",
|
||||||
)],
|
)],
|
||||||
|
@ -433,11 +455,11 @@ bandWidth = TimeSeries(
|
||||||
unit="binBps",
|
unit="binBps",
|
||||||
targets=[
|
targets=[
|
||||||
Target(
|
Target(
|
||||||
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
legendFormat="Out"
|
legendFormat="Out"
|
||||||
),
|
),
|
||||||
Target(
|
Target(
|
||||||
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
legendFormat="In"
|
legendFormat="In"
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
|
@ -525,6 +547,174 @@ top10Error5xxByBucket = top10_errors_by_bucket(
|
||||||
title="5xx : Top10 by Bucket", code='~"5.."'
|
title="5xx : Top10 by Bucket", code='~"5.."'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
quotaHealth = TimeSeries(
|
||||||
|
title="Quota service uptime",
|
||||||
|
legendDisplayMode="hidden",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
lineInterpolation="stepAfter",
|
||||||
|
fillOpacity=30,
|
||||||
|
unit=UNITS.PERCENT_FORMAT,
|
||||||
|
targets=[Target(
|
||||||
|
expr='avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",job="${job}"}[$__rate_interval])) * 100', # noqa: E501
|
||||||
|
)],
|
||||||
|
thresholds=[
|
||||||
|
Threshold("green", 0, 95.0),
|
||||||
|
Threshold("orange", 1, 90.0),
|
||||||
|
Threshold("red", 2, 0.0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
quotaStatusCode = TimeSeries(
|
||||||
|
title="Quota evaluation status code over time",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
fillOpacity=30,
|
||||||
|
lineInterpolation="smooth",
|
||||||
|
unit=UNITS.OPS_PER_SEC,
|
||||||
|
targets=[Target(
|
||||||
|
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code=~"2..", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
|
legendFormat="Success",
|
||||||
|
), Target(
|
||||||
|
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code="429", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
|
legendFormat="Quota Exceeded",
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
|
||||||
|
quotaByAction = TimeSeries(
|
||||||
|
title="Quota evaluaton rate per S3 action",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
legendDisplayMode="table",
|
||||||
|
legendPlacement="right",
|
||||||
|
legendValues=["min", "mean", "max"],
|
||||||
|
lineInterpolation="smooth",
|
||||||
|
unit=UNITS.OPS_PER_SEC,
|
||||||
|
targets=[
|
||||||
|
Target(
|
||||||
|
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"}[$__rate_interval])) by(action)', # noqa: E501
|
||||||
|
legendFormat="{{action}}",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
averageQuotaDuration = Heatmap(
|
||||||
|
title="Quota evaluation duration",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
dataFormat="tsbuckets",
|
||||||
|
maxDataPoints=25,
|
||||||
|
tooltip=Tooltip(show=True, showHistogram=True),
|
||||||
|
yAxis=YAxis(format=UNITS.SECONDS),
|
||||||
|
color=HeatmapColor(mode="opacity"),
|
||||||
|
targets=[Target(
|
||||||
|
expr='sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
|
format="heatmap",
|
||||||
|
legendFormat="{{ le }}",
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
operationsWithUnavailableMetrics = TimeSeries(
|
||||||
|
title="Operations with unavailable metrics",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
fillOpacity=30,
|
||||||
|
lineInterpolation="smooth",
|
||||||
|
unit=UNITS.OPS_PER_SEC,
|
||||||
|
legendDisplayMode="hidden",
|
||||||
|
targets=[Target(
|
||||||
|
expr='sum(rate(s3_cloudserver_quota_unavailable_count{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
averageQuotaLatencies = TimeSeries(
|
||||||
|
title="Average quota evaluation latencies",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
lineInterpolation="smooth",
|
||||||
|
spanNulls=3*60*1000,
|
||||||
|
legendDisplayMode="table",
|
||||||
|
legendPlacement="right",
|
||||||
|
legendValues=["min", "mean", "max"],
|
||||||
|
unit=UNITS.SECONDS,
|
||||||
|
targets=[
|
||||||
|
Target(
|
||||||
|
expr=average_quota_latency_target(code='~"2.."', by='type'),
|
||||||
|
legendFormat='{{ type }} (success)',
|
||||||
|
),
|
||||||
|
Target(
|
||||||
|
expr=average_quota_latency_target(code='"429"', by='type'),
|
||||||
|
legendFormat='{{ type }} (exceeded)',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
averageMetricsRetrievalLatencies = TimeSeries(
|
||||||
|
title="Average utilization metrics retrieval latencies",
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
lineInterpolation="smooth",
|
||||||
|
spanNulls=3*60*1000,
|
||||||
|
unit=UNITS.SECONDS,
|
||||||
|
targets=[
|
||||||
|
Target(
|
||||||
|
expr=average_quota_retrieval_latency(code='~"2.."', by='class'),
|
||||||
|
legendFormat='{{ class }} (success)',
|
||||||
|
),
|
||||||
|
Target(
|
||||||
|
expr=average_quota_retrieval_latency(
|
||||||
|
code='~"4..|5.."',
|
||||||
|
by='class'
|
||||||
|
),
|
||||||
|
legendFormat='{{ class }} (error)',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
bucketQuotaCounter = Stat(
|
||||||
|
title="Buckets with quota",
|
||||||
|
description=(
|
||||||
|
"Number of S3 buckets with quota enabled in the cluster.\n"
|
||||||
|
"This value is computed asynchronously, and update "
|
||||||
|
"may be delayed up to 1h."
|
||||||
|
),
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
colorMode="value",
|
||||||
|
format=UNITS.SHORT,
|
||||||
|
noValue="-",
|
||||||
|
reduceCalc="lastNotNull",
|
||||||
|
targets=[Target(
|
||||||
|
expr='max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
|
||||||
|
)],
|
||||||
|
thresholds=[
|
||||||
|
Threshold("#808080", 0, 0.0),
|
||||||
|
Threshold("blue", 1, 0.0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
accountQuotaCounter = Stat(
|
||||||
|
title="Accounts with quota",
|
||||||
|
description=(
|
||||||
|
"Number of accounts with quota enabled in the cluster.\n"
|
||||||
|
"This value is computed asynchronously, and update "
|
||||||
|
"may be delayed up to 1h."
|
||||||
|
),
|
||||||
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
colorMode="value",
|
||||||
|
format=UNITS.SHORT,
|
||||||
|
noValue="-",
|
||||||
|
reduceCalc="lastNotNull",
|
||||||
|
targets=[Target(
|
||||||
|
expr='max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
|
||||||
|
)],
|
||||||
|
thresholds=[
|
||||||
|
Threshold("#808080", 0, 0.0),
|
||||||
|
Threshold("blue", 1, 0.0),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
dashboard = (
|
dashboard = (
|
||||||
Dashboard(
|
Dashboard(
|
||||||
title="S3 service",
|
title="S3 service",
|
||||||
|
@ -630,6 +820,24 @@ dashboard = (
|
||||||
top10Error500ByBucket,
|
top10Error500ByBucket,
|
||||||
top10Error5xxByBucket
|
top10Error5xxByBucket
|
||||||
], height=8),
|
], height=8),
|
||||||
|
RowPanel(title="Quotas"),
|
||||||
|
layout.row([
|
||||||
|
layout.column([
|
||||||
|
layout.resize([bucketQuotaCounter], width=6, height=4),
|
||||||
|
layout.resize([accountQuotaCounter], width=6, height=4),
|
||||||
|
], height=8),
|
||||||
|
layout.resize([operationsWithUnavailableMetrics], width=6),
|
||||||
|
quotaByAction,
|
||||||
|
], height=8),
|
||||||
|
layout.row([
|
||||||
|
layout.resize([quotaHealth], width=6),
|
||||||
|
layout.resize([quotaStatusCode], width=6),
|
||||||
|
averageQuotaLatencies,
|
||||||
|
], height=8),
|
||||||
|
layout.row([
|
||||||
|
layout.resize([averageQuotaDuration], width=6),
|
||||||
|
averageMetricsRetrievalLatencies,
|
||||||
|
], height=8),
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
.auto_panel_ids()
|
.auto_panel_ids()
|
||||||
|
|
|
@ -45,8 +45,8 @@ then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
REGISTRY=${REGISTRY:-"registry.scality.com"}
|
REGISTRY=${REGISTRY:-"ghcr.io/scality"}
|
||||||
PROJECT=${PROJECT:-"cloudserver-dev"}
|
PROJECT=${PROJECT:-"cloudserver"}
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"
|
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"
|
||||||
|
|
57
package.json
57
package.json
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@zenko/cloudserver",
|
"name": "@zenko/cloudserver",
|
||||||
"version": "8.6.26",
|
"version": "8.8.27",
|
||||||
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
@ -21,53 +21,61 @@
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@azure/storage-blob": "^12.12.0",
|
"@azure/storage-blob": "^12.12.0",
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://github.com/scality/arsenal#8.1.127",
|
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
|
||||||
"async": "~2.5.0",
|
"async": "^2.5.0",
|
||||||
"aws-sdk": "2.905.0",
|
"aws-sdk": "^2.905.0",
|
||||||
"bucketclient": "scality/bucketclient#8.1.9",
|
|
||||||
"bufferutil": "^4.0.6",
|
"bufferutil": "^4.0.6",
|
||||||
"commander": "^2.9.0",
|
"commander": "^2.9.0",
|
||||||
"cron-parser": "^2.11.0",
|
"cron-parser": "^2.11.0",
|
||||||
"diskusage": "1.1.3",
|
"diskusage": "^1.1.3",
|
||||||
"google-auto-auth": "^0.9.1",
|
"google-auto-auth": "^0.9.1",
|
||||||
"http-proxy": "^1.17.0",
|
"http-proxy": "^1.17.0",
|
||||||
"http-proxy-agent": "^4.0.1",
|
"http-proxy-agent": "^4.0.1",
|
||||||
"https-proxy-agent": "^2.2.0",
|
"https-proxy-agent": "^2.2.0",
|
||||||
"level-mem": "^5.0.1",
|
"level-mem": "^5.0.1",
|
||||||
"moment": "^2.26.0",
|
"moment": "^2.26.0",
|
||||||
"mongodb": "^2.2.31",
|
"mongodb": "^5.2.0",
|
||||||
"node-fetch": "^2.6.0",
|
"node-fetch": "^2.6.0",
|
||||||
"node-forge": "^0.7.1",
|
"node-forge": "^0.7.1",
|
||||||
"npm-run-all": "~4.1.5",
|
"npm-run-all": "^4.1.5",
|
||||||
"prom-client": "14.2.0",
|
"prom-client": "14.2.0",
|
||||||
"request": "^2.81.0",
|
"request": "^2.81.0",
|
||||||
"sql-where-parser": "~2.2.1",
|
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
|
||||||
"utapi": "github:scality/utapi#8.1.13",
|
"sql-where-parser": "^2.2.1",
|
||||||
|
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
|
||||||
"utf-8-validate": "^5.0.8",
|
"utf-8-validate": "^5.0.8",
|
||||||
"utf8": "~2.1.1",
|
"utf8": "^2.1.1",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"vaultclient": "scality/vaultclient#8.3.11",
|
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
|
||||||
"werelogs": "scality/werelogs#8.1.4",
|
|
||||||
"ws": "^5.1.0",
|
"ws": "^5.1.0",
|
||||||
"xml2js": "~0.4.16"
|
"xml2js": "^0.4.16"
|
||||||
|
},
|
||||||
|
"overrides": {
|
||||||
|
"ltgt": "^2.2.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@babel/core": "^7.25.2",
|
||||||
|
"@babel/preset-env": "^7.25.3",
|
||||||
|
"babel-loader": "^9.1.3",
|
||||||
"bluebird": "^3.3.1",
|
"bluebird": "^3.3.1",
|
||||||
"eslint": "^8.14.0",
|
"eslint": "^8.14.0",
|
||||||
"eslint-config-airbnb-base": "^13.1.0",
|
"eslint-config-airbnb-base": "^15.0.0",
|
||||||
"eslint-config-scality": "scality/Guidelines#8.2.0",
|
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
||||||
"eslint-plugin-import": "^2.14.0",
|
"eslint-plugin-import": "^2.14.0",
|
||||||
|
"eslint-plugin-mocha": "^10.1.0",
|
||||||
"express": "^4.17.1",
|
"express": "^4.17.1",
|
||||||
"ioredis": "4.9.5",
|
"ioredis": "^4.9.5",
|
||||||
"istanbul": "1.0.0-alpha.2",
|
"istanbul": "^1.0.0-alpha.2",
|
||||||
"istanbul-api": "1.0.0-alpha.13",
|
"istanbul-api": "^1.0.0-alpha.13",
|
||||||
"lolex": "^1.4.0",
|
"lolex": "^1.4.0",
|
||||||
"mocha": "^2.3.4",
|
"mocha": ">=3.1.2",
|
||||||
"mocha-junit-reporter": "^1.23.1",
|
"mocha-junit-reporter": "^1.23.1",
|
||||||
"mocha-multi-reporters": "^1.1.7",
|
"mocha-multi-reporters": "^1.1.7",
|
||||||
"node-mocks-http": "1.5.2",
|
"node-mocks-http": "^1.5.2",
|
||||||
"sinon": "^13.0.1",
|
"sinon": "^13.0.1",
|
||||||
"tv4": "^1.2.7"
|
"tv4": "^1.2.7",
|
||||||
|
"webpack": "^5.93.0",
|
||||||
|
"webpack-cli": "^5.1.4"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
||||||
|
@ -108,10 +116,11 @@
|
||||||
"utapi_replay": "node lib/utapi/utapiReplay.js",
|
"utapi_replay": "node lib/utapi/utapiReplay.js",
|
||||||
"utapi_reindex": "node lib/utapi/utapiReindex.js",
|
"utapi_reindex": "node lib/utapi/utapiReindex.js",
|
||||||
"management_agent": "node managementAgent.js",
|
"management_agent": "node managementAgent.js",
|
||||||
"test": "CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"test": "CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
||||||
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
||||||
|
"test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota",
|
||||||
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
||||||
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'deletequotatestbucket';
|
||||||
|
const nonExistantBucket = 'deletequotatestnonexistantbucket';
|
||||||
|
|
||||||
|
describe('Test delete bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should delete the bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.ok(true);
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`);
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,77 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'getquotatestbucket';
|
||||||
|
const quota = { quota: 1000 };
|
||||||
|
|
||||||
|
describe('Test get bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should return the quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
const data = await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.strictEqual(data.GetBucketQuota.Name[0], bucket);
|
||||||
|
assert.strictEqual(data.GetBucketQuota.Quota[0], '1000');
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.fail('Expected NoSuchQuota error');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.fail('Expected NoSuchQuota error');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -475,4 +475,58 @@ describe('Listing corner cases tests', () => {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should not list DeleteMarkers for version suspended buckets', done => {
|
||||||
|
const obj = { name: 'testDeleteMarker.txt', value: 'foo' };
|
||||||
|
const bucketName = `bucket-test-delete-markers-not-listed${Date.now()}`;
|
||||||
|
let objectCount = 0;
|
||||||
|
return async.waterfall([
|
||||||
|
next => s3.createBucket({ Bucket: bucketName }, err => next(err)),
|
||||||
|
next => {
|
||||||
|
const params = {
|
||||||
|
Bucket: bucketName,
|
||||||
|
VersioningConfiguration: {
|
||||||
|
Status: 'Suspended',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return s3.putBucketVersioning(params, err =>
|
||||||
|
next(err));
|
||||||
|
},
|
||||||
|
next => s3.putObject({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: obj.name,
|
||||||
|
Body: obj.value,
|
||||||
|
}, err =>
|
||||||
|
next(err)),
|
||||||
|
next => s3.listObjectsV2({ Bucket: bucketName },
|
||||||
|
(err, res) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
objectCount = res.Contents.length;
|
||||||
|
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), true);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => s3.deleteObject({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: obj.name,
|
||||||
|
}, function test(err) {
|
||||||
|
const headers = this.httpResponse.headers;
|
||||||
|
assert.strictEqual(
|
||||||
|
headers['x-amz-delete-marker'], 'true');
|
||||||
|
return next(err);
|
||||||
|
}),
|
||||||
|
next => s3.listObjectsV2({ Bucket: bucketName },
|
||||||
|
(err, res) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
assert.strictEqual(res.Contents.length, objectCount - 1);
|
||||||
|
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), false);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => s3.deleteObject({ Bucket: bucketName, Key: obj.name, VersionId: 'null' }, err => next(err)),
|
||||||
|
next => s3.deleteBucket({ Bucket: bucketName }, err => next(err))
|
||||||
|
], err => done(err));
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'updatequotatestbucket';
|
||||||
|
const nonExistantBucket = 'updatequotatestnonexistantbucket';
|
||||||
|
const quota = { quota: 2000 };
|
||||||
|
const negativeQuota = { quota: -1000 };
|
||||||
|
const wrongquotaFromat = '1000';
|
||||||
|
const largeQuota = { quota: 1000000000000 };
|
||||||
|
|
||||||
|
describe('Test update bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should update the quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
assert.ok(true);
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return error when quota is negative', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(negativeQuota));
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
||||||
|
assert.strictEqual(err.Error.Message[0], 'Quota value must be a positive number');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return error when quota is not in correct format', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, wrongquotaFromat);
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
||||||
|
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle large quota values', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(largeQuota));
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -33,7 +33,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() {
|
||||||
let s3;
|
let s3;
|
||||||
|
|
||||||
before(() => {
|
before(() => {
|
||||||
const config = getConfig('default');
|
const config = getConfig('default', { signatureVersion: 'v2' });
|
||||||
|
|
||||||
s3 = new S3(config);
|
s3 = new S3(config);
|
||||||
});
|
});
|
||||||
|
|
|
@ -45,7 +45,7 @@ const itSkipCeph = isCEPH ? it.skip : it.skip;
|
||||||
const describeSkipIfCeph = isCEPH ? describe.skip : describe.skip; // always skip
|
const describeSkipIfCeph = isCEPH ? describe.skip : describe.skip; // always skip
|
||||||
|
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
describeSkipIfNotMultiple = describe.skip;
|
describeSkipIfNotMultiple = describe;
|
||||||
describeSkipIfNotMultipleOrCeph = isCEPH ? describe.skip : describe.skip; // always skip
|
describeSkipIfNotMultipleOrCeph = isCEPH ? describe.skip : describe.skip; // always skip
|
||||||
const awsConfig = getRealAwsConfig(awsLocation);
|
const awsConfig = getRealAwsConfig(awsLocation);
|
||||||
awsS3 = new AWS.S3(awsConfig);
|
awsS3 = new AWS.S3(awsConfig);
|
||||||
|
|
|
@ -7,6 +7,7 @@ const withV4 = require('../support/withV4');
|
||||||
const BucketUtility = require('../../lib/utility/bucket-util');
|
const BucketUtility = require('../../lib/utility/bucket-util');
|
||||||
const { createEncryptedBucketPromise } =
|
const { createEncryptedBucketPromise } =
|
||||||
require('../../lib/utility/createEncryptedBucket');
|
require('../../lib/utility/createEncryptedBucket');
|
||||||
|
const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init');
|
||||||
|
|
||||||
const sourceBucketName = 'supersourcebucket81033016532';
|
const sourceBucketName = 'supersourcebucket81033016532';
|
||||||
const sourceObjName = 'supersourceobject';
|
const sourceObjName = 'supersourceobject';
|
||||||
|
@ -710,6 +711,72 @@ describe('Object Part Copy', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should not copy a part of a cold object', done => {
|
||||||
|
const archive = {
|
||||||
|
archiveInfo: {
|
||||||
|
archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322',
|
||||||
|
archiveVersion: 5577006791947779
|
||||||
|
},
|
||||||
|
};
|
||||||
|
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
s3.uploadPartCopy({
|
||||||
|
Bucket: destBucketName,
|
||||||
|
Key: destObjName,
|
||||||
|
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||||
|
PartNumber: 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
}, err => {
|
||||||
|
assert.strictEqual(err.code, 'InvalidObjectState');
|
||||||
|
assert.strictEqual(err.statusCode, 403);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should copy a part of an object when it\'s transitioning to cold', done => {
|
||||||
|
fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
s3.uploadPartCopy({
|
||||||
|
Bucket: destBucketName,
|
||||||
|
Key: destObjName,
|
||||||
|
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||||
|
PartNumber: 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
}, (err, res) => {
|
||||||
|
checkNoError(err);
|
||||||
|
assert.strictEqual(res.ETag, etag);
|
||||||
|
assert(res.LastModified);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should copy a part of a restored object', done => {
|
||||||
|
const archiveCompleted = {
|
||||||
|
archiveInfo: {},
|
||||||
|
restoreRequestedAt: new Date(0),
|
||||||
|
restoreRequestedDays: 5,
|
||||||
|
restoreCompletedAt: new Date(10),
|
||||||
|
restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)),
|
||||||
|
};
|
||||||
|
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
s3.uploadPartCopy({
|
||||||
|
Bucket: destBucketName,
|
||||||
|
Key: destObjName,
|
||||||
|
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||||
|
PartNumber: 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
}, (err, res) => {
|
||||||
|
checkNoError(err);
|
||||||
|
assert.strictEqual(res.ETag, etag);
|
||||||
|
assert(res.LastModified);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('copying parts by another account', () => {
|
describe('copying parts by another account', () => {
|
||||||
const otherAccountBucket = 'otheraccountbucket42342342342';
|
const otherAccountBucket = 'otheraccountbucket42342342342';
|
||||||
const otherAccountKey = 'key';
|
const otherAccountKey = 'key';
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue