Compare commits
1523 Commits
feature/AR
...
developmen
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | b5711e9cbf | |
Vitaliy Filippov | 36dc6298d2 | |
Vitaliy Filippov | bc2d637578 | |
Vitaliy Filippov | b543695048 | |
Vitaliy Filippov | 90024d044d | |
Vitaliy Filippov | 451ab33f68 | |
Vitaliy Filippov | c86107e912 | |
Vitaliy Filippov | 0a5962f256 | |
Vitaliy Filippov | 0e292791c6 | |
Vitaliy Filippov | fc07729bd0 | |
Vitaliy Filippov | 4527dd6795 | |
Vitaliy Filippov | 05fb581023 | |
Vitaliy Filippov | 956739a04e | |
Vitaliy Filippov | 7ad0888a66 | |
Vitaliy Filippov | bf01ba4ed1 | |
Vitaliy Filippov | ab019e7e50 | |
Vitaliy Filippov | 3797695e74 | |
Vitaliy Filippov | c8084196c4 | |
bert-e | b72e918ff9 | |
bert-e | 22887f47d8 | |
bert-e | 0cd10a73f3 | |
bert-e | e139406612 | |
Maha Benzekri | d91853a38b | |
Mickael Bourgois | a7e798f909 | |
Mickael Bourgois | 3a1ba29869 | |
Mickael Bourgois | dbb9b6d787 | |
Mickael Bourgois | fce76f0934 | |
Mickael Bourgois | 0e39aaac09 | |
Mickael Bourgois | 0b14c93fac | |
Mickael Bourgois | ab2960bbf4 | |
Mickael Bourgois | 7305b112e2 | |
Mickael Bourgois | cd9e2e757b | |
Mickael Bourgois | ca0904f584 | |
Mickael Bourgois | 0dd3dd35e6 | |
bert-e | bf7e4b7e23 | |
bert-e | 92f4794727 | |
Jonathan Gramain | c6ef85e3a1 | |
Jonathan Gramain | c0fe0cfbcf | |
bert-e | 9c936f2b83 | |
bert-e | d26bac2ebc | |
Jonathan Gramain | cfb9db5178 | |
Jonathan Gramain | 2ce004751a | |
Jonathan Gramain | 539219e046 | |
Jonathan Gramain | be49e55db5 | |
bert-e | e6b240421b | |
bert-e | 81739e3ecf | |
Jonathan Gramain | c475503248 | |
bert-e | 7acbd5d2fb | |
Jonathan Gramain | 8d726322e5 | |
williamlardier | 4f7aa54886 | |
williamlardier | 0117a5b0b4 | |
williamlardier | f679831ba2 | |
williamlardier | bb162ca7d3 | |
williamlardier | 0c6dfc7b6e | |
williamlardier | d608d849df | |
williamlardier | 2cb63f58d4 | |
williamlardier | 51585712f4 | |
bert-e | 61eb24e46f | |
bert-e | a34b162782 | |
bert-e | a9e50fe046 | |
bert-e | 4150a8432e | |
Taylor McKinnon | 7e70ff9cbc | |
bert-e | 09dc45289c | |
bert-e | 47c628e0e1 | |
Nicolas Humbert | a1f4d3fe8a | |
williamlardier | 926242b077 | |
williamlardier | aa2aac5db3 | |
williamlardier | f2e2d82e51 | |
williamlardier | 88ad86b0c6 | |
bert-e | 8f25892247 | |
bert-e | 9ac207187b | |
Anurag Mittal | 624a04805f | |
Anurag Mittal | ba99933765 | |
williamlardier | 38d1ac1d2c | |
Taylor McKinnon | 4f34a34a11 | |
Taylor McKinnon | 53f2a159fa | |
Maha Benzekri | 63f6a75a86 | |
Maha Benzekri | 41acc7968e | |
williamlardier | c98c5207fc | |
williamlardier | 615ee393a4 | |
williamlardier | 97dfc699aa | |
williamlardier | 76786282d1 | |
williamlardier | a19d6524be | |
williamlardier | bbf6dfba22 | |
williamlardier | f0663fd507 | |
williamlardier | d4decbbd6c | |
williamlardier | 288b2b7b87 | |
williamlardier | ccf9b62e59 | |
williamlardier | 9fc2d552ae | |
williamlardier | d7cc4cf7d5 | |
williamlardier | 334d33ef44 | |
williamlardier | 989b0214d9 | |
williamlardier | 04d0730f97 | |
williamlardier | fbc642c022 | |
williamlardier | 104435f0b6 | |
williamlardier | a362ac202e | |
williamlardier | 1277e58150 | |
williamlardier | 7727ccf5f0 | |
williamlardier | 71860fc90c | |
williamlardier | e504b52de7 | |
Maha Benzekri | b369a47c4d | |
Maha Benzekri | b4fa81e832 | |
Maha Benzekri | 1e03d53879 | |
Maha Benzekri | 63e502d419 | |
Maha Benzekri | d2a31dc20a | |
Maha Benzekri | f24411875f | |
Maha Benzekri | 4fd7faa6a3 | |
Francois Ferrand | 118aaba702 | |
Francois Ferrand | e4442fdc52 | |
Francois Ferrand | 7fa199741f | |
Francois Ferrand | f7f95af78f | |
Francois Ferrand | 2dc053a784 | |
Francois Ferrand | cc9bb9047e | |
Francois Ferrand | b824fc0828 | |
Francois Ferrand | a2e6d91cf2 | |
Francois Ferrand | c1060853dd | |
Francois Ferrand | 227d6edd09 | |
bert-e | b4754c68ea | |
bert-e | 11aea5d93b | |
bert-e | 0c50a5952f | |
bert-e | 4a32e05855 | |
bert-e | 402ed21b14 | |
Nicolas Humbert | a22719ed47 | |
Nicolas Humbert | 41975d539d | |
Nicolas Humbert | c6724eb811 | |
Nicolas Humbert | d027006938 | |
Nicolas Humbert | 92cfd47572 | |
bert-e | 8796bf0f44 | |
bert-e | 735fcd04ef | |
Jonathan Gramain | c5522685b2 | |
Jonathan Gramain | 48df7df271 | |
Jonathan Gramain | e028eb227f | |
Nicolas Humbert | caf3146662 | |
bert-e | 1dee707eb8 | |
Jonathan Gramain | 2c8d69c20a | |
Jonathan Gramain | 0b2b6ceeb5 | |
Jonathan Gramain | f4b3f39dc6 | |
Jonathan Gramain | 84260340d0 | |
Jonathan Gramain | e531abc346 | |
Jonathan Gramain | 20f6e3089b | |
bert-e | 9dc34f2155 | |
bert-e | 08a4c3ade3 | |
Nicolas Humbert | d5c731856b | |
Nicolas Humbert | 584c94692b | |
Nicolas Humbert | a0e5257c75 | |
bert-e | 5435c14116 | |
bert-e | 38c44ea874 | |
Nicolas Humbert | 4200346dd2 | |
bert-e | 5472d0da59 | |
bert-e | cdc0bb1128 | |
Nicolas Humbert | 795f8bcf1c | |
Nicolas Humbert | 9371d8d734 | |
Nicolas Humbert | 3f31c7f3a1 | |
KillianG | 39cba3ee6c | |
KillianG | a00952712f | |
KillianG | a246e18e17 | |
KillianG | 3bb3a4d161 | |
bert-e | c6ba7f981e | |
bert-e | 69c82da878 | |
bert-e | 762ae5a0ff | |
bert-e | 89dfc794a6 | |
bert-e | 3205d117f5 | |
bert-e | 4eafae44d8 | |
bert-e | 4cab3c84f3 | |
bert-e | e3301a2db9 | |
williamlardier | 0dcc93cdbe | |
williamlardier | 2f2f91d6e8 | |
williamlardier | a28b141dfb | |
williamlardier | 46fe061895 | |
williamlardier | 34202eaa62 | |
williamlardier | 4d343fe468 | |
williamlardier | 229e641f88 | |
bert-e | 1433973e5c | |
bert-e | 201170b1ed | |
bert-e | f13985094e | |
Nicolas Humbert | 395033acd2 | |
Nicolas Humbert | 632ef26826 | |
bert-e | 242b2ec85a | |
bert-e | 3186a97113 | |
bert-e | 3861b8d317 | |
bert-e | bb278f7d7e | |
bert-e | 3b9309490d | |
Will Toozs | 0118dfabbb | |
Will Toozs | ff40dfaadf | |
Will Toozs | 9a31236da0 | |
Will Toozs | 61ebacfbf3 | |
Will Toozs | aa646ced28 | |
Will Toozs | f2ca37b5fb | |
Will Toozs | 9d74cedde8 | |
bert-e | 9c99a6980f | |
bert-e | d4e255781b | |
bert-e | f5763d012e | |
bert-e | 8fb740cf09 | |
bert-e | 55c8d89de2 | |
bert-e | 1afaaec0ac | |
bert-e | e20e458971 | |
williamlardier | 56e52de056 | |
williamlardier | d9fc4aae50 | |
williamlardier | 08de09a2ab | |
bert-e | bef9220032 | |
bert-e | de20f1efdc | |
bert-e | 4817f11f36 | |
bert-e | a6b283f5a2 | |
bert-e | 3f810a7596 | |
bert-e | b89d19c9f8 | |
Nicolas Humbert | 4dc9788629 | |
Nicolas Humbert | 65a891d6f8 | |
bert-e | 2ecca4feef | |
Nicolas Humbert | c52a3a6e44 | |
williamlardier | d82965ff78 | |
williamlardier | f488a65f15 | |
williamlardier | 40a575a717 | |
williamlardier | fea82f15ea | |
bert-e | 06dc042154 | |
bert-e | aa4643644a | |
bert-e | 89edf7e3d0 | |
Francois Ferrand | 4c7d3ae4bc | |
Francois Ferrand | 23883dae8b | |
Francois Ferrand | e616ffa374 | |
Francois Ferrand | 515c20e4cf | |
Francois Ferrand | f8eedddebf | |
Francois Ferrand | f3654e4fb8 | |
Francois Ferrand | 517fb99190 | |
Francois Ferrand | 531c83a359 | |
Francois Ferrand | b84fa851f7 | |
Francois Ferrand | 4cb1a879f7 | |
Francois Ferrand | 7ae55b20e7 | |
Francois Ferrand | d0a6fa17a5 | |
Francois Ferrand | 7275459f70 | |
Hervé Dombya | 363afcd17f | |
Frédéric Meinnel | 1cf0250ce9 | |
Frédéric Meinnel | 20d0b38d0b | |
Frédéric Meinnel | 9988a8327a | |
Frédéric Meinnel | b481d24637 | |
Frédéric Meinnel | 71625774c1 | |
Frédéric Meinnel | 9b9338f2b8 | |
Frédéric Meinnel | 601619f200 | |
Frédéric Meinnel | a92e71fd50 | |
Frédéric Meinnel | 8802ea0617 | |
Frédéric Meinnel | acc5f74787 | |
Frédéric Meinnel | e3c093f352 | |
Frédéric Meinnel | e17383a678 | |
bert-e | 43f62b847c | |
bert-e | a031905bba | |
bert-e | 13ad6881f4 | |
Mickael Bourgois | dea5173075 | |
Mickael Bourgois | b3f96198fe | |
Mickael Bourgois | 5e2dd8cccb | |
bert-e | cd2406b827 | |
bert-e | 62f707caff | |
bert-e | f01ef00a52 | |
bert-e | 30fb64e443 | |
bert-e | 054107d8fb | |
bert-e | 848bf318fe | |
bert-e | 0beb48a1fd | |
bert-e | 618d4dffc7 | |
bert-e | b5aae192f7 | |
Mickael Bourgois | 557f3dcde6 | |
Mickael Bourgois | 3291af36bb | |
Will Toozs | d274acd8ed | |
Will Toozs | e6d9e8fc35 | |
Will Toozs | b08edefad6 | |
Will Toozs | e9c353d62a | |
Will Toozs | c7c55451a1 | |
bert-e | 7bb004586d | |
bert-e | d48de67723 | |
Will Toozs | fa4dec01cb | |
Will Toozs | 4f79a9c59c | |
Will Toozs | 05c759110b | |
Will Toozs | deae294a81 | |
Will Toozs | ab587385e6 | |
Will Toozs | 6243911072 | |
Will Toozs | da804054e5 | |
Will Toozs | 493a6da773 | |
Will Toozs | 7ecdd11783 | |
Mickael Bourgois | 7e53b67c90 | |
bert-e | b141c59bb7 | |
bert-e | 0b79ecd942 | |
bert-e | 86ece5c264 | |
Mickael Bourgois | 0b79cd6af6 | |
Mickael Bourgois | a51b5e0af3 | |
bert-e | 10ca6b98fa | |
bert-e | 171925732f | |
Taylor McKinnon | 6d36f9c867 | |
Taylor McKinnon | 1a21c4f867 | |
Taylor McKinnon | 866dec1b81 | |
Mickael Bourgois | 9491e82235 | |
bert-e | 70e8b20af9 | |
bert-e | 0ec5f4fee5 | |
bert-e | 6c468a01d9 | |
bert-e | 3d2b75f344 | |
Mickael Bourgois | 5811fa5326 | |
bert-e | e600677545 | |
bert-e | 72e5da10b7 | |
Mickael Bourgois | de0e7e6449 | |
Mickael Bourgois | 97b5ed6dd3 | |
Mickael Bourgois | dad8a3ee37 | |
Mickael Bourgois | 8aca658c5c | |
bert-e | 759817c5a0 | |
bert-e | 035c7e8d7f | |
Mickael Bourgois | b8af1225d5 | |
Mickael Bourgois | 40faa5f3fa | |
Mickael Bourgois | 1fc8622614 | |
Mickael Bourgois | a0acefb4a8 | |
bert-e | de27a5b88e | |
bert-e | a4cc5e45f3 | |
bert-e | 621cb33680 | |
bert-e | b025443d21 | |
Mickael Bourgois | d502a81284 | |
bert-e | 9a8b707e82 | |
bert-e | 002dbe0019 | |
bert-e | 59e52f6df2 | |
bert-e | b52f2356ba | |
Mickael Bourgois | 60679495b6 | |
Mickael Bourgois | 9dfacd0827 | |
Mickael Bourgois | 485ef1e9bb | |
Mickael Bourgois | 5e041ca5e7 | |
Mickael Bourgois | 52137772d9 | |
Mickael Bourgois | fcf193d033 | |
Mickael Bourgois | fb61cad786 | |
Mickael Bourgois | b6367eb2b8 | |
bert-e | d803bdcadc | |
bert-e | 4f1b8f25b7 | |
bert-e | 94363482c3 | |
bert-e | 6b0a8cb9ed | |
Will Toozs | 5dbf5d965f | |
Will Toozs | ebefc4b5b0 | |
Mickael Bourgois | ac1c75e414 | |
Mickael Bourgois | fee4f3a96e | |
bert-e | e969eeaa20 | |
bert-e | 2ee78bcf6a | |
bert-e | 64273365d5 | |
bert-e | 65c6bacd34 | |
bert-e | d60d252eaf | |
bert-e | f31fe2f2bf | |
bert-e | ee47cece90 | |
Mickael Bourgois | 7a5cddacbc | |
Mickael Bourgois | baa6203b57 | |
Mickael Bourgois | 141056637b | |
Mickael Bourgois | 0f007e0489 | |
Mickael Bourgois | 2d50a76923 | |
Mickael Bourgois | 6b4f10ae56 | |
Mickael Bourgois | 23eaf89cc3 | |
Mickael Bourgois | d6a2144508 | |
Mickael Bourgois | 40dd3f37a4 | |
Mickael Bourgois | d3307654a6 | |
Mickael Bourgois | e342a90b48 | |
williamlardier | dbda5f16a6 | |
Mickael Bourgois | d4a4825668 | |
Mickael Bourgois | 83b9e9a775 | |
Maha Benzekri | 2959c950dd | |
Maha Benzekri | 462ddf7ef1 | |
Maha Benzekri | fda42e7399 | |
Maha Benzekri | edbd6caeb4 | |
Maha Benzekri | 1befaa1f28 | |
Maha Benzekri | 0cefca831d | |
Jonathan Gramain | ea7b69e313 | |
Jonathan Gramain | 8ec1c2f2db | |
Jonathan Gramain | 3af6ca5f6d | |
Jonathan Gramain | 997d71df08 | |
Jonathan Gramain | 275ebcec5c | |
Mickael Bourgois | 8b77530b2b | |
bert-e | 43f9606598 | |
bert-e | be34e5ad59 | |
Jonathan Gramain | 5bc64ede43 | |
Jonathan Gramain | 911010376e | |
Jonathan Gramain | b5ec37b38b | |
Mickael Bourgois | 3ce869cea3 | |
Mickael Bourgois | b7960784db | |
Mickael Bourgois | 5ac10cefa8 | |
Mickael Bourgois | 2dafefd77f | |
Mickael Bourgois | 36f147b441 | |
Mickael Bourgois | 8ed447ba63 | |
bert-e | bf235f3335 | |
bert-e | 569c9f4368 | |
Nicolas Humbert | 92cf03254a | |
Nicolas Humbert | c57ae9c8ea | |
Mickael Bourgois | 5bec42d051 | |
Mickael Bourgois | f427fc9b70 | |
Mickael Bourgois | 9aad4ae3ea | |
bert-e | 1a3cb8108c | |
bert-e | 042120b17e | |
bert-e | ba4593592d | |
bert-e | 6efdb627da | |
bert-e | 5306bf0b5c | |
bert-e | 5b22819c3f | |
bert-e | 126ca3560f | |
bert-e | e5b692f3db | |
bert-e | 548ae8cd12 | |
Taylor McKinnon | 80376405df | |
Taylor McKinnon | a612e5c27c | |
Taylor McKinnon | c3b7662086 | |
Taylor McKinnon | 818b1e60d1 | |
bert-e | 2a919af071 | |
bert-e | 5c300b8b6c | |
Maha Benzekri | ad3ebd3db2 | |
Maha Benzekri | 99068e7265 | |
Maha Benzekri | cd039d8133 | |
Maha Benzekri | dd3ec25d74 | |
Maha Benzekri | 717228bdfc | |
Maha Benzekri | 836fc80560 | |
Maha Benzekri | 75b293df8d | |
Maha Benzekri | a855e38998 | |
Maha Benzekri | 51d5666bec | |
Maha Benzekri | ecb74a2db3 | |
Maha Benzekri | cdcdf8eff0 | |
Maha Benzekri | dc39b37877 | |
Maha Benzekri | 4897b3c720 | |
Maha Benzekri | ffe4ea4afe | |
Maha Benzekri | a16cfad0fc | |
bert-e | 556163e3e9 | |
Maha Benzekri | 8fe9f16661 | |
Maha Benzekri | eb9ff85bd9 | |
bert-e | 52994c0177 | |
tmacro | e109b0fca7 | |
Maha Benzekri | 9940699f9d | |
Maha Benzekri | 869d554e43 | |
Maha Benzekri | 2f8b228595 | |
Maha Benzekri | 539b2c1630 | |
Maha Benzekri | 320766e7b2 | |
Maha Benzekri | 74425d03f8 | |
Maha Benzekri | 91629a0d18 | |
Maha Benzekri | e44b7ed918 | |
Maha Benzekri | 3cb29f7f8e | |
Maha Benzekri | 4f08a4dff2 | |
Maha Benzekri | 15a1aa7965 | |
Maha Benzekri | 4470ee9125 | |
Francois Ferrand | d8c12597ea | |
Francois Ferrand | c8eb9025fa | |
Francois Ferrand | 57e0f71e6a | |
Francois Ferrand | f22f920ee2 | |
Maha Benzekri | ed1bb6301d | |
Maha Benzekri | 70dfa5b11b | |
Maha Benzekri | f17e7677fa | |
Maha Benzekri | 63b00fef55 | |
Maha Benzekri | b4f0d34abd | |
Maha Benzekri | e18f83ef0d | |
Francois Ferrand | a4e6f9d034 | |
Maha Benzekri | cf94b9de6a | |
Maha Benzekri | da0492d2bb | |
Maha Benzekri | 979b9065ed | |
Maha Benzekri | d5a3923f74 | |
Maha Benzekri | 23cbbdaaed | |
Maha Benzekri | e506dea140 | |
Maha Benzekri | 78721be7f7 | |
Maha Benzekri | 02c5a46d14 | |
Maha Benzekri | b138955ef2 | |
Maha Benzekri | 7d10e5d69e | |
bert-e | bc291fe3a7 | |
bert-e | 8dc7432c51 | |
bert-e | 040fe53e53 | |
bert-e | 60e350a5cf | |
bert-e | 5de00c80f8 | |
bert-e | 6f963bdcd9 | |
bert-e | cd9024fd32 | |
Maha Benzekri | 37649bf49b | |
Maha Benzekri | abf5ea33a9 | |
Maha Benzekri | 2596f3fda8 | |
bert-e | dff7610060 | |
bert-e | 757c2537ef | |
Maha Benzekri | c445322685 | |
bert-e | 2344204746 | |
Maha Benzekri | 693ddf8d35 | |
Maha Benzekri | 6caa5cc26a | |
bert-e | 4515b2adbf | |
bert-e | 50ffdd260b | |
Taylor McKinnon | 3836848c05 | |
Taylor McKinnon | 813a1553d2 | |
Taylor McKinnon | 1238cd809c | |
bert-e | b5f22d8c68 | |
bert-e | 68ff54d49a | |
bert-e | a74b3eacf8 | |
bert-e | f00a2f2d9e | |
bert-e | 02bb60253a | |
bert-e | 3fe5579c80 | |
bert-e | 3fdd2bce21 | |
Taylor McKinnon | 44e6eb2550 | |
Taylor McKinnon | c148c770ac | |
Maha Benzekri | fa2f877825 | |
Will Toozs | 0e323fbefe | |
bert-e | c9b512174f | |
bert-e | 7b48624cf7 | |
bert-e | 55b07def2e | |
bert-e | 62ae2b2c69 | |
bert-e | fcc9468b63 | |
bert-e | efc44a620d | |
Maha Benzekri | 72342f6654 | |
Maha Benzekri | fa11e58d57 | |
bert-e | 1bc19b39d7 | |
bert-e | b5fa3a1fd3 | |
bert-e | 68a6fc659c | |
bert-e | 2624a05018 | |
bert-e | 0882bfffb9 | |
bert-e | c0fc958365 | |
bert-e | d3c74d2c16 | |
Maha Benzekri | 9001285177 | |
bert-e | bae6e8ecb3 | |
Will Toozs | e0eab954aa | |
Will Toozs | 19b4e25373 | |
Kerkesni | 07eda89a3f | |
bert-e | 27b4066ca4 | |
bert-e | 2ee5b356fa | |
bert-e | 233955a0d3 | |
bert-e | ab51522110 | |
Rahul Padigela | b1b2d2ada6 | |
bert-e | f5d3433413 | |
bert-e | 62b4b9bc25 | |
bert-e | ce4b2b5a27 | |
bert-e | 96bd67ee60 | |
bert-e | ec56c77881 | |
bert-e | d0abde3962 | |
bert-e | f08a3f434b | |
bert-e | fdc682f2db | |
bert-e | b184606dc2 | |
bert-e | 172ec4a714 | |
bert-e | ae770d0d3f | |
bert-e | 7d2613e9a3 | |
Maha Benzekri | 9ce0f2c2b6 | |
Maha Benzekri | 43b4e0c713 | |
Maha Benzekri | 2bda761518 | |
Maha Benzekri | bfc9ca68c9 | |
Maha Benzekri | 6abb0d96a9 | |
Maha Benzekri | 733f424a4b | |
Will Toozs | 8d4ff7df5f | |
Taylor McKinnon | 59b87479df | |
Taylor McKinnon | 967ab966fa | |
Taylor McKinnon | 212c7f506c | |
Taylor McKinnon | 1e9ee0ef0b | |
bert-e | 9185f16554 | |
bert-e | 2df9a57f9c | |
Nicolas Humbert | c96706ff28 | |
Nicolas Humbert | daa6f46b14 | |
Nicolas Humbert | 44315057df | |
Nicolas Humbert | 61fe64a3ac | |
bert-e | 68535f83d6 | |
bert-e | 41d63650be | |
bert-e | 4ebb5d449a | |
bert-e | 48abedc6f7 | |
bert-e | 12185f7c3b | |
bert-e | 5f82ee2d0e | |
bert-e | 7e0f9c63fe | |
bert-e | 9f5ac17357 | |
Taylor McKinnon | d72bc5c6b9 | |
Taylor McKinnon | 0e47810963 | |
Taylor McKinnon | 8d83546ee3 | |
Taylor McKinnon | fff4fd5d22 | |
Taylor McKinnon | 1016a6826d | |
bert-e | 3b36cef85f | |
Jonathan Gramain | 114b885c7f | |
Jonathan Gramain | e56d4e3744 | |
Jonathan Gramain | 15144e4adf | |
Jonathan Gramain | 3985e2a712 | |
williamlardier | 3b95c033d2 | |
williamlardier | 04091dc316 | |
williamlardier | 56023a80ed | |
bert-e | 2deaebd89a | |
bert-e | c706ccf9c6 | |
Nicolas Humbert | 4afb2476f8 | |
Nicolas Humbert | 91a7e7f24f | |
Taylor McKinnon | 2f344cde70 | |
Taylor McKinnon | ad154085ac | |
Francois Ferrand | 583ea8490f | |
bert-e | 85a9480793 | |
bert-e | be2f65b69e | |
bert-e | 1ee6d0a87d | |
bert-e | 224af9a5d2 | |
Nicolas Humbert | 9e2ad48c5c | |
Nicolas Humbert | 780971ce10 | |
bert-e | 74f05377f0 | |
bert-e | 111e14cc89 | |
Nicolas Humbert | fd6fb5a26c | |
Nicolas Humbert | 8df540dcc1 | |
Florent Monjalet | 00b20f00d1 | |
Florent Monjalet | a91d53a12c | |
Florent Monjalet | 63d2637046 | |
Maha Benzekri | 5d416ad190 | |
Maha Benzekri | ff29cda03f | |
Maha Benzekri | 5685b2e972 | |
Maha Benzekri | 4e4ea2ab84 | |
Florent Monjalet | cb8baf2dab | |
Maha Benzekri | 67e5694d26 | |
bert-e | 22f470c6eb | |
bert-e | e510473116 | |
Maha Benzekri | d046e8a294 | |
Maha Benzekri | 20a730788a | |
Maha Benzekri | 47958591ec | |
Maha Benzekri | 4195b6ae6a | |
Maha Benzekri | feefd13b68 | |
Florent Monjalet | 17a6808fe4 | |
Florent Monjalet | df646e4802 | |
Florent Monjalet | 267770d256 | |
Florent Monjalet | 1b92dc2c05 | |
Florent Monjalet | f80bb2f34b | |
Florent Monjalet | 4f89b67bb9 | |
Florent Monjalet | 8b5630923c | |
Florent Monjalet | 9ff5e376e5 | |
Florent Monjalet | a9b5a2e3a4 | |
Florent Monjalet | 7e9ec22ae3 | |
bert-e | 9d4664ae06 | |
bert-e | 662265ba2e | |
bert-e | c7da82dda7 | |
Taylor McKinnon | 960b4b2dd4 | |
Taylor McKinnon | 1e9af343b9 | |
Taylor McKinnon | 8bb7338080 | |
Taylor McKinnon | 17e4f14f9c | |
Taylor McKinnon | 014b071536 | |
Taylor McKinnon | 9130f323d4 | |
Taylor McKinnon | c09d3282dc | |
Taylor McKinnon | fb9175579f | |
bert-e | 2d45f92ae1 | |
bert-e | 48452496fa | |
bert-e | b89773eba6 | |
bert-e | c738e0924e | |
bert-e | 18bf6b8d4a | |
bert-e | 858c31a542 | |
Nicolas Humbert | 75a759de27 | |
bert-e | 19d3e0bc9d | |
bert-e | bac044dc8f | |
Taylor McKinnon | 8f77cd18c8 | |
bert-e | cb7609b173 | |
Taylor McKinnon | 2926048735 | |
Taylor McKinnon | 656ef3fcee | |
bert-e | 8c0f709014 | |
Francois Ferrand | ce92d33a5d | |
Kerkesni | 0381cce85c | |
Kerkesni | 20a08a2a4e | |
Kerkesni | ff73d8ab12 | |
Kerkesni | 1ee44bc6d3 | |
bert-e | 614e876536 | |
bert-e | b40a77d94b | |
bert-e | 3a3a73b756 | |
Nicolas Humbert | 6789959109 | |
Nicolas Humbert | bf9b53eea9 | |
Nicolas Humbert | aa04d23e68 | |
Nicolas Humbert | e08aaa7bcc | |
Nicolas Humbert | 1d9c44126a | |
Nicolas Humbert | 70a28ab620 | |
Nicolas Humbert | 550451eefa | |
Nicolas Humbert | 96befd3f28 | |
Nicolas Humbert | 75288f1b56 | |
Nicolas Humbert | 6847f2b0c4 | |
Nicolas Humbert | 050059548e | |
Nicolas Humbert | 9b2a557a05 | |
Nicolas Humbert | 7a7e2f4b91 | |
bert-e | 3f6e85590d | |
bert-e | de589a07e8 | |
bert-e | bc009945d2 | |
bert-e | 8db04f4486 | |
bert-e | 328b7bc335 | |
bert-e | 3ac30d9bab | |
bert-e | 32204fbfbf | |
bert-e | b1eda2a73a | |
bert-e | 0249ad9bcf | |
bert-e | 5a26e1a80d | |
bert-e | 507a2d4ff5 | |
bert-e | 8cdd35950b | |
bert-e | bfa366cd27 | |
Dimitrios Vasilas | d132757696 | |
Alexander Chan | 2a4be31b8a | |
bert-e | 1207a6fb70 | |
bert-e | 5883286864 | |
bert-e | b206728342 | |
bert-e | 347a7391b9 | |
Dimitrios Vasilas | 6273eebe66 | |
bert-e | 2a37e809d9 | |
bert-e | 86ce7691cd | |
bert-e | c04f663480 | |
Dimitrios Vasilas | f2493e982f | |
bert-e | e466b5e92a | |
bert-e | a4bc10f730 | |
bert-e | e826033bf0 | |
Dimitrios Vasilas | c23dad6fb8 | |
Dimitrios Vasilas | 5fcdaa5a97 | |
Dimitrios Vasilas | 9f61ef9a3b | |
Nicolas Humbert | c480301e95 | |
Nicolas Humbert | 276be285cc | |
bert-e | 897d41392a | |
bert-e | f4e3a19d61 | |
Nicolas Humbert | ee84a03d2c | |
Nicolas Humbert | 98f855f997 | |
williamlardier | 7c52fcbbb0 | |
bert-e | da52688a39 | |
bert-e | 1cb54a66f8 | |
williamlardier | 0bb61ddb5b | |
williamlardier | 68e4b0610a | |
bert-e | d9fffdad9e | |
williamlardier | 389c32f819 | |
williamlardier | c2df0bd3eb | |
williamlardier | af0436f1cd | |
williamlardier | f7593d385e | |
williamlardier | 84068b205e | |
williamlardier | 9774d31b03 | |
Kerkesni | d26b8bcfcc | |
Kerkesni | e4634621ee | |
williamlardier | 0b58b3ad2a | |
bert-e | 652bf92536 | |
bert-e | c5b1ef63ee | |
bert-e | 227de16bca | |
williamlardier | c57a6e3c57 | |
bert-e | 344ee8a014 | |
bert-e | 5d7a434306 | |
williamlardier | 852ae72a13 | |
williamlardier | 507782bd17 | |
bert-e | b7e7f65d52 | |
williamlardier | d00320a8ba | |
williamlardier | 4cf07193d9 | |
williamlardier | aef272ea3c | |
williamlardier | 31d1734d5c | |
bert-e | c5b7450a4d | |
bert-e | eb5affdced | |
bert-e | cdaf6db929 | |
bert-e | 91ada795d0 | |
bert-e | 2b420a85e0 | |
Nicolas Humbert | 18c8d4ecac | |
Nicolas Humbert | c8150c6857 | |
bert-e | 399a2a53ab | |
Alexander Chan | bbad049b5f | |
bert-e | 2a4e2e1584 | |
bert-e | 08e43f5084 | |
Nicolas Humbert | cc153c99d6 | |
Nicolas Humbert | d3f9870389 | |
Nicolas Humbert | 0fa264693d | |
bert-e | b304d05614 | |
bert-e | 751f6ce559 | |
bert-e | 0330597679 | |
Nicolas Humbert | 27cacc9552 | |
bert-e | 004bd63368 | |
bert-e | e047ae6fbb | |
Nicolas Humbert | ebca8dd05e | |
bert-e | 52535fb44b | |
Nicolas Humbert | 1ed32b2cae | |
Nicolas Humbert | 960d736962 | |
bert-e | 11098dd113 | |
Nicolas Humbert | 9cc7362fbd | |
KillianG | 32401c9a83 | |
KillianG | 5f05b676cc | |
KillianG | fd662a8c2c | |
bert-e | a843d53939 | |
Nicolas Humbert | f889100798 | |
bert-e | 5d54dd58be | |
Nicolas Humbert | 1bd0deafcf | |
Francois Ferrand | 7c788d3dbf | |
Nicolas Humbert | 50cb6a2bf1 | |
bert-e | 58f7bb2877 | |
bert-e | f899337284 | |
bert-e | b960a913ec | |
bert-e | 5436c0698e | |
bert-e | 3ff7856a94 | |
gaspardmoindrot | 57fb5f1403 | |
Francois Ferrand | ea284508d7 | |
Francois Ferrand | 0981fa42f3 | |
Francois Ferrand | 7e63064a52 | |
Francois Ferrand | 71b21e40ca | |
Francois Ferrand | ff894bb545 | |
Francois Ferrand | ae9f24e1bb | |
bert-e | 2dc01ce3ed | |
Kerkesni | 9bd9bef6c7 | |
bert-e | 3a8bbefb6c | |
bert-e | a6a5c273d5 | |
Dimitrios Vasilas | c329d9684d | |
bert-e | ec5baf1f85 | |
Dimitrios Vasilas | d844fb4fa1 | |
Kerkesni | 6479076fec | |
bert-e | c436e2657c | |
bert-e | df45f481d0 | |
Dimitrios Vasilas | 406f3f0093 | |
Dimitrios Vasilas | 6952b91539 | |
Dimitrios Vasilas | eea1ebb5ec | |
bert-e | dae5b7dc28 | |
Dimitrios Vasilas | 1d76f61d88 | |
Dimitrios Vasilas | 8abe809141 | |
Dimitrios Vasilas | 94b14a258e | |
bert-e | cd8c589eba | |
williamlardier | daec2661ae | |
Francois Ferrand | 0f266371a0 | |
Francois Ferrand | 73e56963bf | |
Nicolas Humbert | 4c189b2d9e | |
Alexander Chan | fb11d0f42e | |
Alexander Chan | fe6690da92 | |
williamlardier | 9cbd9f7be8 | |
williamlardier | c2fc8873cb | |
Francois Ferrand | bee1ae04bf | |
Francois Ferrand | eb86552a57 | |
Alexander Chan | 80fbf78d62 | |
bert-e | f5d8f2fac5 | |
bert-e | b1e13d6efa | |
Jonathan Gramain | e7ef437b27 | |
Jonathan Gramain | 55f652ecc4 | |
Jonathan Gramain | 77f56d1fa1 | |
bert-e | 36e841b542 | |
bert-e | a2404ed622 | |
williamlardier | 1d12a430a0 | |
williamlardier | bea27b4fb4 | |
williamlardier | 76405d9179 | |
Alexander Chan | 31b7f1e71c | |
Alexander Chan | 8674cac9f8 | |
KillianG | d5b666a246 | |
KillianG | 4360772971 | |
KillianG | 6e152e33d5 | |
KillianG | 94f34979a5 | |
bert-e | 4be430c313 | |
bert-e | 4b0f165b46 | |
Nicolas Humbert | 3590377554 | |
Nicolas Humbert | f7f77c6cd2 | |
bert-e | 8a08f97492 | |
bert-e | a908d09cc8 | |
Jonathan Gramain | 170a68a4f8 | |
bert-e | 448afa50e3 | |
bert-e | a0fff19611 | |
bert-e | 6ad1643ba8 | |
Jonathan Gramain | 5ce253ef62 | |
Jonathan Gramain | 72f4c36077 | |
Jonathan Gramain | e534af856f | |
bert-e | 5dd8d9057a | |
bert-e | 50b738cfff | |
bert-e | 2be3ce21c7 | |
bert-e | 70ff6fc4ee | |
bert-e | c5214d19a6 | |
bert-e | 951a98fcaf | |
Jonathan Gramain | ebb0fed48a | |
Jonathan Gramain | 5f85c14ab9 | |
bert-e | 8ca770dcb7 | |
bert-e | 7923977300 | |
Jonathan Gramain | 3597c146d7 | |
Jonathan Gramain | c81e49ba9b | |
Jonathan Gramain | e93c064b5f | |
Jonathan Gramain | 2b3774600d | |
Jonathan Gramain | a6951f2ef8 | |
Jonathan Gramain | 9fb232861f | |
Jonathan Gramain | 6cf4e291fa | |
Jonathan Gramain | 06b4320e7d | |
bert-e | 3585b8d5eb | |
bert-e | 9331c0a375 | |
bert-e | 70f368408d | |
Jonathan Gramain | a63762ae71 | |
Jonathan Gramain | f0420572c8 | |
Jonathan Gramain | b1fd915ba3 | |
Jonathan Gramain | 4285c18e44 | |
Jonathan Gramain | 71ffd004df | |
Jonathan Gramain | f674104825 | |
Jonathan Gramain | 9c9d4b3e7c | |
Jonathan Gramain | 13265a3d6e | |
Jonathan Gramain | 31c5316a7e | |
bert-e | 0a1489ee46 | |
bert-e | 71f80544ac | |
bert-e | 270080a75b | |
bert-e | 74717b2acb | |
bert-e | ef81f3e58f | |
Jonathan Gramain | aa55a87a65 | |
Xin LI | de5b4331e2 | |
Xin LI | e1a4f1ef8c | |
bert-e | 46dff0321d | |
bert-e | f3c7580510 | |
bert-e | 2145bb3ae3 | |
Xin LI | 468162c81c | |
Xin LI | 89f9139203 | |
Xin LI | 8153554a4c | |
Xin LI | fb9063bccc | |
bert-e | ddc6ea72be | |
Nicolas Humbert | f20bf1becf | |
bert-e | d31c773e77 | |
bert-e | d266ff4e9f | |
bert-e | 6ff21996f5 | |
bert-e | 15d1b3ba86 | |
bert-e | 827c752e9a | |
Jonathan Gramain | 82dc837610 | |
bert-e | 7dc2f07cb6 | |
Kerkesni | 6c22d87c55 | |
Kerkesni | 310f67d3a7 | |
Kerkesni | 49841c5e0e | |
Kerkesni | b5334baca8 | |
Kerkesni | e592671b54 | |
bert-e | 6e0b66849d | |
Nicolas Humbert | f2292f1ca3 | |
bert-e | 18a1bfd325 | |
bert-e | c2b54702f6 | |
Jonathan Gramain | 13a5e14da5 | |
Jonathan Gramain | 891913fd16 | |
bert-e | 7baa2501e6 | |
Jonathan Gramain | 8e808afec9 | |
bert-e | 2c999f4c10 | |
bert-e | b23472a754 | |
bert-e | a4999c1bfb | |
bert-e | fe0b0f8b2f | |
Jonathan Gramain | c2bee23fd1 | |
Jonathan Gramain | e87c2a4e5f | |
Jonathan Gramain | db943cd634 | |
bert-e | bf7a643d45 | |
bert-e | 874a53c767 | |
Jonathan Gramain | c7e1c6921b | |
Jonathan Gramain | 6d2d56bc1e | |
bert-e | 3f3bf0fdf0 | |
bert-e | 1922facb7b | |
Jonathan Gramain | fff03d3320 | |
Jonathan Gramain | 6e79d3f1a4 | |
bert-e | 2a44949048 | |
bert-e | 1576352613 | |
bert-e | 74978f423e | |
bert-e | 6f4cd75d6f | |
bert-e | 00906d04f5 | |
Jonathan Gramain | 270339f2bb | |
bert-e | 6660626190 | |
Nicolas Humbert | 049f52bf95 | |
williamlardier | 58fc0b7146 | |
williamlardier | 11e3d7ecb2 | |
williamlardier | 1bab851ce3 | |
bert-e | 0bc0341f33 | |
bert-e | b5af652dc8 | |
bert-e | 6c29be5137 | |
Jonathan Gramain | 2967f327ed | |
Jonathan Gramain | 0f8a56e9b5 | |
Jonathan Gramain | c1d2601237 | |
Jonathan Gramain | 885f95606c | |
bert-e | b5b0f6482b | |
Nicolas Humbert | ec9ed94555 | |
bert-e | 755f282f8e | |
Nicolas Humbert | 41cc399d85 | |
bert-e | c4dc928de2 | |
Nicolas Humbert | 6b8a2581b6 | |
Killian Gardahaut | a0087e8d77 | |
KillianG | 8e5bea56b6 | |
KillianG | 976e349036 | |
KillianG | de1c23ac1b | |
KillianG | 0b4d04a2a3 | |
KillianG | 049d396c8d | |
Naren | 5c04cbe6d1 | |
Naren | d3e538087a | |
bert-e | 7cc37c7f3d | |
Naren | 399d081d68 | |
Naren | c3fac24366 | |
Naren | 82687aa1a7 | |
Naren | 820ada48ce | |
Naren | df73cc7ebc | |
Naren | 429c62087d | |
Naren | 13fa26986d | |
bert-e | 5cb63991a8 | |
Naren | d5b336d1d9 | |
bert-e | 750223500d | |
Naren | 23ffbf77d2 | |
Naren | 6ea18bcef4 | |
Naren | c45dac7ffc | |
Naren | 878fc6819f | |
Naren | 43592f9392 | |
Naren | dbd1383c32 | |
Alexander Chan | c310cb3dd1 | |
bert-e | 7fe0e2891b | |
bert-e | 93442fed68 | |
Alexander Chan | 21612cfadd | |
bert-e | 644062f088 | |
Alexander Chan | d0eb81539e | |
bert-e | 22cda51944 | |
williamlardier | 408d0de732 | |
williamlardier | 83916c91fb | |
bert-e | 110b2a35ed | |
bert-e | 3b5f5875f3 | |
bert-e | bdaf92023f | |
bert-e | 25d1cd9601 | |
bert-e | 91c9eb6faa | |
Jonathan Gramain | 6306cf7fc7 | |
williamlardier | a8117ca037 | |
bert-e | 9145d1cf79 | |
bert-e | 0fb54c9d31 | |
bert-e | 63dc33a339 | |
bert-e | 49d46dfe04 | |
bert-e | 4bb331392e | |
bert-e | ae1b6dc3d1 | |
bert-e | 162157580f | |
bert-e | 4e4435d82e | |
Alexander Chan | b0db1f9a94 | |
Alexander Chan | 35d269c27c | |
bert-e | b1304b5f7f | |
bert-e | c355422a7e | |
bert-e | d44334ad22 | |
bert-e | 6e9c50eeba | |
bert-e | 6c7be8892c | |
bert-e | 82df91debb | |
bert-e | 6b1f8c61ec | |
bert-e | a12d44dc18 | |
bert-e | d5ec32fc5c | |
Jonathan Gramain | e16da9ab11 | |
Jonathan Gramain | d43e8d01bf | |
bert-e | 335bfabed1 | |
bert-e | 3b92eaaef2 | |
bert-e | a6fd8b2261 | |
bert-e | 00ab8d482d | |
bert-e | 29551f7edf | |
Alexander Chan | 7dd022f6cb | |
bert-e | 3398db3c0f | |
bert-e | 00a793be6e | |
bert-e | 68bb824b57 | |
Jonathan Gramain | 432680841e | |
bert-e | b2641f5c1b | |
Jonathan Gramain | 66c34e0272 | |
bert-e | 836e9fb22d | |
bert-e | 9bc7fa49ea | |
bert-e | e3087fb940 | |
Dimitrios Vasilas | 67e126320c | |
Dimitrios Vasilas | 66520571d3 | |
bert-e | ead7f5f7c2 | |
bert-e | fe636d22fc | |
bert-e | 6530e70761 | |
bert-e | 6d14bda3ed | |
bert-e | 416634cf11 | |
Dimitrios Vasilas | fd669664a6 | |
bert-e | c17059dc77 | |
bert-e | b4617f1362 | |
bert-e | 624d4708cf | |
bert-e | 95c180e9d9 | |
bert-e | 5a2b465d0f | |
Dimitrios Vasilas | 2cd10e7195 | |
bert-e | 8ace5b24a5 | |
bert-e | 4b1dcd531d | |
bert-e | 13ef509cbc | |
Dimitrios Vasilas | d4feda7bbd | |
bert-e | 39f7035dbd | |
bert-e | 7d3ab342f6 | |
Dimitrios Vasilas | af60df4caf | |
Dimitrios Vasilas | 2acd7348d4 | |
williamlardier | bb62ed4fa7 | |
williamlardier | c95368858d | |
bert-e | ffafe6ecfc | |
Dimitrios Vasilas | 4301fc57e2 | |
Dimitrios Vasilas | 072d8324ca | |
Dimitrios Vasilas | 25276dae3f | |
Dimitrios Vasilas | bdeeb25d19 | |
Dimitrios Vasilas | 5dc17db9df | |
bert-e | d8ff1377fc | |
bert-e | 425a9167ca | |
bert-e | 2f21b9cc52 | |
Alexander Chan | d6433961a1 | |
Alexander Chan | 090b276f23 | |
Jonathan Gramain | 28f4c5baee | |
Jonathan Gramain | 89a1c646ad | |
Jonathan Gramain | 5c249f0c56 | |
Jonathan Gramain | c971669b9b | |
Jonathan Gramain | 04e553b968 | |
Jonathan Gramain | 57ef76548e | |
Jonathan Gramain | 717a3274fc | |
Jonathan Gramain | 1b59d0efb8 | |
bert-e | 0a8f846f4b | |
bert-e | 045602fc00 | |
Alexander Chan | 5048c1fef1 | |
Alexander Chan | 1e95d108be | |
bert-e | 04abefd799 | |
Alexander Chan | 2772976e86 | |
Alexander Chan | 51905f82ba | |
Alexander Chan | b72adc50a7 | |
Thomas Carmet | b6def80347 | |
Thomas Carmet | b3f7a22a07 | |
Jonathan Gramain | ac5de47ca1 | |
Jonathan Gramain | 3c0f3e671a | |
Jonathan Gramain | a3dc3f9fb8 | |
Jonathan Gramain | e4bf9500a3 | |
Jonathan Gramain | ac33897f25 | |
Jonathan Gramain | c6a640ca9d | |
Jonathan Gramain | 3992ac2809 | |
williamlardier | c147785464 | |
williamlardier | ca8c788757 | |
williamlardier | cb2af364bb | |
williamlardier | 1eb27d610b | |
williamlardier | 73b295c91d | |
williamlardier | 8186c84bf9 | |
williamlardier | 93ef2d0545 | |
williamlardier | d7d0a31bb1 | |
williamlardier | 4c69b82508 | |
williamlardier | ca13284da3 | |
williamlardier | c6ed75a1d7 | |
williamlardier | 402d0dea1a | |
williamlardier | 95faec1db0 | |
Jonathan Gramain | ca9d53f430 | |
Jonathan Gramain | ba27ff7980 | |
Jonathan Gramain | 8957997e23 | |
Jonathan Gramain | 3caeda5d39 | |
Jonathan Gramain | feed423f56 | |
Jonathan Gramain | 4981d8f342 | |
bert-e | b1ee1f8ef7 | |
bert-e | 28d778c2d4 | |
bert-e | b180aac9ba | |
Taylor McKinnon | c353452128 | |
bert-e | 101b13abce | |
Taylor McKinnon | 9f5ae852bf | |
williamlardier | e882cb6781 | |
williamlardier | 8543f1a934 | |
williamlardier | fc871fbbfa | |
Francois Ferrand | cb7303636c | |
Francois Ferrand | 6d0f889c23 | |
Francois Ferrand | c13f2ae6a5 | |
Francois Ferrand | 03058371e9 | |
Francois Ferrand | 473fed7594 | |
Francois Ferrand | d86b9144be | |
Francois Ferrand | 2f2d9ced4c | |
Francois Ferrand | 57a0ffc746 | |
Francois Ferrand | d839cf2394 | |
bert-e | b6611c4711 | |
bert-e | 461f5ac5f9 | |
bert-e | 413a42adf0 | |
Jonathan Gramain | 7be27e0a83 | |
Jonathan Gramain | 3d3252361d | |
Jonathan Gramain | dad8bc7195 | |
Artem Bakalov | e6bda3460b | |
Artem Bakalov | 64334db65a | |
Jonathan Gramain | fa562ae85a | |
Jonathan Gramain | 6f32ebb2ce | |
Jonathan Gramain | 2b32ec6163 | |
Jonathan Gramain | d4063e157a | |
Nicolas Humbert | a481384538 | |
bert-e | ae4ece471b | |
williamlardier | 15b61cd947 | |
williamlardier | 91536c575f | |
bert-e | 864ce1f27d | |
bert-e | 9d007a76b1 | |
Artem Bakalov | f4e292c6f9 | |
bert-e | 436d1a9eab | |
Artem Bakalov | 3da8f88a12 | |
Francois Ferrand | a2eb347fe3 | |
Francois Ferrand | 0ff1262f97 | |
Francois Ferrand | 54a23d90c1 | |
Kerkesni | eb3dc9b79f | |
bert-e | 2c8968ef4a | |
Kerkesni | a449aa35f4 | |
Kerkesni | c2c8582585 | |
Kerkesni | 82c1bd7211 | |
bert-e | 776af747f2 | |
Alexander Chan | 453fec0cb0 | |
bert-e | f9fd3cae16 | |
bert-e | 3662c406ec | |
Taylor McKinnon | 243876ef81 | |
Taylor McKinnon | cf4706816f | |
Taylor McKinnon | 368971dacb | |
bert-e | f6fe11b763 | |
Taylor McKinnon | 5f94fce344 | |
Taylor McKinnon | af8420fe3c | |
bert-e | c3b209cbb5 | |
Taylor McKinnon | 3d6b7354a5 | |
Taylor McKinnon | a5d694a92c | |
Taylor McKinnon | 990e821ac8 | |
Taylor McKinnon | 8170bb9965 | |
Taylor McKinnon | 7e559d08c9 | |
bert-e | e5c58ecc3d | |
bert-e | 6ef88fd60e | |
bert-e | 483e91a8d6 | |
bert-e | 3c99c67a33 | |
Jonathan Gramain | 29f87c7f2f | |
Jonathan Gramain | 7692d2c376 | |
Jonathan Gramain | a0d7b07dc6 | |
Jonathan Gramain | a9c21b98f9 | |
Jonathan Gramain | fa9232f137 | |
Jonathan Gramain | bcf3b4a16a | |
bert-e | 3257f4e905 | |
bert-e | 1d190019f7 | |
Jonathan Gramain | 79e7dc3946 | |
bert-e | 1144e6bb33 | |
Jonathan Gramain | 950542237f | |
Jonathan Gramain | a3c3511ff9 | |
Francois Ferrand | 7db26fae9a | |
Francois Ferrand | 7faf8c2366 | |
Francois Ferrand | e803078952 | |
Francois Ferrand | cfd72f3a38 | |
Francois Ferrand | 69a96d3993 | |
Taylor McKinnon | d5bb8d8ed3 | |
Taylor McKinnon | aeb8de54db | |
Taylor McKinnon | 8f62260d70 | |
Taylor McKinnon | 293930ff74 | |
Taylor McKinnon | dd6deff075 | |
Taylor McKinnon | 4174106c2d | |
bert-e | 29985f8955 | |
Jonathan Gramain | b081918317 | |
Jonathan Gramain | 9049555887 | |
bert-e | 41063705a9 | |
Jonathan Gramain | 7cdb395ee3 | |
Jonathan Gramain | 45c6aefc35 | |
bert-e | b125bcb0b7 | |
bert-e | dd93e2f0be | |
Taylor McKinnon | d8dc35f1cf | |
Taylor McKinnon | 3df9712648 | |
Taylor McKinnon | d45b543053 | |
Taylor McKinnon | 3910b25f1c | |
Francois Ferrand | 4e935dff1a | |
Francois Ferrand | ecd54df821 | |
bert-e | d523b6f1b6 | |
Artem Bakalov | ab95973786 | |
bert-e | fa99e2f3b2 | |
Artem Bakalov | 49fded7d5f | |
bert-e | de094c53cd | |
bert-e | 0234ec7461 | |
bert-e | 34ece584a2 | |
Naren | 464930ff16 | |
bert-e | 4f1bd8e634 | |
bert-e | bcabab454c | |
Naren | fdcecbf5ef | |
Naren | 9e186f7107 | |
Naren | 82316c7b10 | |
bert-e | 47352b1df1 | |
bert-e | a019e89ebb | |
bert-e | d0eef7bf3f | |
Naren | 1db16d1cda | |
bert-e | 59c6a9fb2a | |
bert-e | 0c27fbebea | |
bert-e | 01afc596e9 | |
bert-e | dff4c42971 | |
bert-e | 55710d6a64 | |
Naren | 6e714cdb84 | |
williamlardier | 3ce13ddde9 | |
williamlardier | a327aa83c1 | |
williamlardier | 667cd471a4 | |
williamlardier | 1b6b2ef4ed | |
williamlardier | 04b1d6c6a4 | |
williamlardier | 887ab2510d | |
williamlardier | 51f7e390e8 | |
williamlardier | 16b5ef230a | |
williamlardier | f543eb30e8 | |
williamlardier | 50efadb55b | |
bert-e | a149336c1a | |
bert-e | d3847224a4 | |
Ronnie Smith | 66848a31e6 | |
Ronnie Smith | 9e76f3b769 | |
bert-e | ee090c4f03 | |
bert-e | 8f2aa95ec8 | |
bert-e | 17595bf0af | |
Ronnie Smith | bbb3e2f7ce | |
bert-e | c8cdd8eacb | |
bert-e | 30455b9d6f | |
bert-e | 773bfe1f14 | |
Ronnie Smith | 2bc80795a9 | |
bert-e | 73474be2fe | |
bert-e | 5cdbe049cf | |
bert-e | 2e40cd1b4c | |
Ronnie Smith | bace3047ec | |
bert-e | d819e9128a | |
bert-e | d5dcd1f2c1 | |
Ronnie Smith | 9079221ba0 | |
Ronnie Smith | 0f06277dce | |
bert-e | bc835899d0 | |
bert-e | 0e741e0b6f | |
bert-e | 33df88ac2d | |
Ronnie Smith | 71143fd0cf | |
bert-e | bfcfb43999 | |
bert-e | 00fc3496ac | |
bert-e | 547a8cc2d0 | |
Ronnie | 2d7990cb59 | |
Ronnie | 3569d816bd | |
bert-e | 1d702112f0 | |
Ronnie Smith | 1b35948ce2 | |
bert-e | c25d4661b4 | |
Ronnie | b554a7e517 | |
Ronnie Smith | 58e7c66eae | |
bert-e | 664ea4a23a | |
Ronnie Smith | 89843bc2ab | |
Alexander Chan | 113c5c166f | |
Alexander Chan | 53a988b167 | |
Alexander Chan | 6bee199b56 | |
Alexander Chan | 2a935b34ed | |
bert-e | d5b31cb669 | |
Ronnie Smith | 9bf176b7fb | |
bert-e | d2d5b1ee0a | |
Ronnie Smith | a916cac32d | |
Ronnie | 58bc54db87 | |
Ronnie Smith | 5d42dad5c3 | |
Ronnie | 09c8cd0fbd | |
bert-e | 7c92f34ee0 | |
bert-e | 81d34525c7 | |
bert-e | a571bfc721 | |
bert-e | 84a7547628 | |
bert-e | 9af76eb0ce | |
bert-e | d3a622ea27 | |
bert-e | a0b1e6b308 | |
Ronnie | fedb0547e1 | |
bert-e | 76301c9ec4 | |
bert-e | 3b6a2c9a55 | |
bert-e | c0b89c650e | |
bert-e | 97feb483c0 | |
bert-e | 5fb3cf0ede | |
Ronnie Smith | 3a7c8f920b | |
bert-e | 0e2b3b0f53 | |
bert-e | 83cf51a3d0 | |
Artem Bakalov | 9544b18f2e | |
bert-e | b73f28964a | |
Ronnie Smith | 3239e8da75 | |
bert-e | 22905c8967 | |
KillianG | d05f027837 | |
KillianG | 0a3986adb7 | |
Killian Gardahaut | 5a97f88b14 | |
Taylor McKinnon | fa43f86ce1 | |
bert-e | f0b34678f7 | |
bert-e | cddaef2bb9 | |
bert-e | c9d5c6cc18 | |
Alexander Chan | 1217a496ff | |
Killian Gardahaut | 3911c37d8e | |
bert-e | fdbfec2bcc | |
KillianG | 3f11dab32b | |
bert-e | e1ae7b84f2 | |
Killian Gardahaut | 2b35351649 | |
Killian Gardahaut | dacbc85e62 | |
Killian Gardahaut | 1ed1513729 | |
Ronnie | ab84030e8e | |
Ronnie Smith | e4c17569ce | |
Jonathan Gramain | 3be5f2633c | |
Jonathan Gramain | d947e4ffb2 | |
Jonathan Gramain | 8a982bbc37 | |
Jonathan Gramain | 2360e410e3 | |
bert-e | 4407b46d06 | |
bert-e | b1d42091b3 | |
Nicolas Humbert | dbc99acd0d | |
bert-e | 8ec404dc7a | |
Jonathan Gramain | a1a7e4d888 | |
bert-e | 93751da82a | |
bert-e | 1b560fa584 | |
Jonathan Gramain | 8a1828ef4c | |
bert-e | f4243e6408 | |
bert-e | 717d9f844e | |
Jonathan Gramain | 04e2396b3b | |
Jonathan Gramain | ffcf5517e8 | |
Jonathan Gramain | e426faa324 | |
bert-e | 443f239b8e | |
Jonathan Gramain | c57b6ff0e4 | |
bert-e | d807379c2d | |
Jonathan Gramain | 5265c1b35d | |
Jonathan Gramain | 43cc84ac9b | |
Jonathan Gramain | 806c79be7c | |
Jonathan Gramain | 0cdaf92b00 | |
Jonathan Gramain | 46454ac80b | |
bert-e | 760ef6e0d8 | |
bert-e | dcc1b32049 | |
bert-e | 7d85a7702e | |
Killian Gardahaut | 54fa51c101 | |
bert-e | 7bd3ec9954 | |
KillianG | 0b7c6a76cc | |
bert-e | 4dbbd31599 | |
KillianG | 4179227367 | |
bert-e | f115aeb7c2 | |
KillianG | add9e37712 | |
bert-e | ce0f7383aa | |
Killian Gardahaut | cc4eac28ac | |
bert-e | c0082d495d | |
Jonathan Gramain | 555b583354 | |
bert-e | 2b21481eb8 | |
Jonathan Gramain | 2225a8ebb4 | |
Kerkesni | e394cc304c | |
bert-e | e70753446f | |
bert-e | 29a3a6d845 | |
bert-e | 5110275a7e | |
Jonathan Gramain | 70d4646af6 | |
Nicolas Humbert | f2e7aec6c8 | |
bert-e | 9e85e8dd9e | |
Nicolas Humbert | f3f4937578 | |
bert-e | bcb879c2ff | |
Taylor McKinnon | 615ace071f | |
Taylor McKinnon | 3cfcc9aa28 | |
Taylor McKinnon | 49a9146550 | |
Taylor McKinnon | 885315bb93 | |
Taylor McKinnon | d4ae083b5a | |
Taylor McKinnon | e97e410ee4 | |
Taylor McKinnon | 91c202edec | |
Taylor McKinnon | b2ad4dfa96 | |
Taylor McKinnon | 11029f2d90 | |
Kerkesni | 3d86abd70a | |
Kerkesni | dc4dd2595e | |
bert-e | b712df6a1f | |
Kerkesni | 356c9f1e9c | |
Nicolas Humbert | 51e28def0e | |
bert-e | 55b0400b25 | |
bert-e | 4fc5ac5e58 | |
bert-e | 95b607d991 | |
bert-e | 09006f9a68 | |
bert-e | c2f86b63eb | |
bert-e | 35ecf8e556 | |
Will Toozs | aa6297a35b | |
bert-e | 4e89f4b025 | |
bert-e | fc5bde533c | |
Rached Ben Mustapha | 4fa5bf7409 | |
Will Toozs | 5effe07ebd | |
Will Toozs | dce1f83322 | |
Will Toozs | 89259c82cb | |
Kerkesni | 7e405ff963 | |
bert-e | ce705c8e78 | |
Will Toozs | 2ca6fb2fe6 | |
bert-e | 7b423666fe | |
Will Toozs | 07fc8c35d1 | |
bert-e | ec07bedd0b | |
bert-e | dc76bbb5c4 | |
bert-e | 75757a541b | |
bert-e | e130629ff9 | |
Nicolas Humbert | 8e60c2d300 | |
Will Toozs | 5c1b237e9e | |
Will Toozs | 8d7bd0809c | |
Will Toozs | 0f4a09fecc | |
bert-e | 31b3469e4a | |
Anurag Mittal | d841a31bf3 | |
Anurag Mittal | ca91cb1b9d | |
Taylor McKinnon | 47c6c7acf3 | |
Nicolas Humbert | 493ba63384 | |
Nicolas Humbert | 6f1c3286a9 | |
bert-e | b8058920d7 | |
bert-e | 1138a5952c | |
Taylor McKinnon | 4069a94f78 | |
Nicolas Humbert | 53708a5197 | |
Nicolas Humbert | 4e0a497367 | |
williamlardier | 7e6d5e5629 | |
williamlardier | 266776650e | |
williamlardier | 36d910fd97 | |
williamlardier | f5781c3609 | |
williamlardier | 3378b6a439 | |
Will Toozs | f47687de10 | |
Will Toozs | 9899e95cab | |
williamlardier | 4c60757086 | |
Will Toozs | 09062e3020 | |
bert-e | 2389f36f34 | |
Ronnie Smith | 0852be8a2b | |
KillianG | 3a5236239e | |
williamlardier | a77e558d53 | |
williamlardier | 888b760834 | |
KillianG | a1c8c9adc1 | |
KillianG | 7ac2adb23f | |
bert-e | 3e42758950 | |
bert-e | aa70b840b7 | |
Will Toozs | 90b7316043 | |
Will Toozs | a2ae5cc5e4 | |
bert-e | 8daac3c50b | |
bert-e | 835cd193d4 | |
bert-e | 75cbd72c52 | |
bert-e | 27777296bf | |
Will Toozs | d8f73ce56c | |
bert-e | 4dbfe27254 | |
bert-e | 8fe33dee76 | |
bert-e | cff15fe737 | |
bert-e | dec7a13106 | |
Will Toozs | 6ce675ce01 | |
bert-e | 32658b7e3b | |
KillianG | fcf617acf0 | |
KillianG | 910e62e0c9 | |
bert-e | 67e1611edb | |
bert-e | 05532878d6 | |
KillianG | 68037356f9 | |
Will Toozs | 3e83c7d836 | |
Will Toozs | 63ae80e76e | |
Will Toozs | 8729f86db0 | |
Killian Gardahaut | 66110ad2ac | |
Ronnie Smith | fdfb76a99a | |
williamlardier | ab59e98977 | |
williamlardier | 505c421014 | |
williamlardier | e046b87eec | |
williamlardier | 1b2f2478c8 | |
bert-e | f476a11faf | |
bert-e | f61c585184 | |
bert-e | 0174252f54 | |
Francois Ferrand | 5a13eecd05 | |
bert-e | bbd2dfefd7 | |
bert-e | b491a3e92c | |
bert-e | 9cb550514e | |
Nicolas Humbert | 74df4fcd65 | |
bert-e | 3a842d3b93 | |
KillianG | 63a790a3d7 | |
KillianG | 518253dc5e | |
Killian Gardahaut | c3d3171906 | |
Francois Ferrand | 717c5c6a7d | |
bert-e | 22fdaad636 | |
bert-e | ca91044fde | |
bert-e | 266e0f9aa3 | |
bert-e | e9d41f8db3 | |
bert-e | b63f210b8f | |
Nicolas Humbert | 61f30e659c | |
bert-e | 2c375ee10d | |
bert-e | 8638f00c95 | |
Ronnie Smith | 9b4b755cf8 | |
Ronnie Smith | 0f5ab42233 | |
Nicolas Humbert | d99397f604 | |
Nicolas Humbert | cb591f06fc | |
Jordi Bertran de Balanda | f26286667f | |
bert-e | abcbb75ad0 | |
Francois Ferrand | 1807abe656 | |
Jordi Bertran de Balanda | 3e74ead98a | |
Ronnie Smith | b8fae2d659 | |
Ronnie Smith | ea328177de | |
Ronnie Smith | fbd68e6839 | |
williamlardier | 8c939b3a05 | |
williamlardier | 5795a44e4c | |
williamlardier | 8c7862b51d | |
williamlardier | bf9d9fe3c5 | |
williamlardier | 127ce9c619 | |
bert-e | a10c705310 | |
Xin LI | cb518a8f08 | |
bert-e | dc408bcc3c | |
Nicolas Humbert | 7afced91ba | |
williamlardier | 91c1eca7d6 | |
williamlardier | 78498030ab | |
williamlardier | 60a917cc08 | |
bert-e | 687b8565f9 | |
bert-e | f6a66881fd | |
bert-e | fda4c099b3 | |
williamlardier | 3d0ee3fc4b | |
williamlardier | 6c2661eea5 | |
williamlardier | 413b52f4d0 | |
williamlardier | bf23f09c41 | |
Ronnie Smith | 89d030bfe9 | |
williamlardier | 2d91180469 | |
bert-e | 64b83e9f2c | |
Nicolas Humbert | 2ab4f3139e | |
Nicolas Humbert | 87b199dc6f | |
Nicolas Humbert | a0c97d2c06 | |
Nicolas Humbert | 8fd7b3ed30 | |
williamlardier | eeb6ff8c3b | |
williamlardier | 9922acf8f9 | |
williamlardier | 9d7c7dedbb | |
williamlardier | 3ff27f488f | |
williamlardier | a5df203ca2 | |
williamlardier | 58e2625e3e | |
williamlardier | c0acd4dc1b | |
williamlardier | 78f131df56 | |
williamlardier | 721f54cb92 | |
Yutaka Oishi | a39c3f475c | |
williamlardier | dd20b82745 | |
williamlardier | 62163eb8aa | |
williamlardier | 7ae162ee0c | |
williamlardier | 54d313fffa | |
williamlardier | d22b6b6aa5 | |
williamlardier | 5acdfccdea | |
Nicolas Humbert | 30f6c803af | |
Francois Ferrand | a235a1a175 | |
bert-e | 2a165dc0da | |
Rahul Padigela | 830ddb6e3c | |
KillianG | 8d00ef947d | |
KillianG | ea107a50e1 | |
KillianG | 4b4f35d36f |
|
@ -1,5 +1,8 @@
|
||||||
{
|
{
|
||||||
"extends": "scality",
|
"extends": "scality",
|
||||||
|
"plugins": [
|
||||||
|
"mocha"
|
||||||
|
],
|
||||||
"rules": {
|
"rules": {
|
||||||
"import/extensions": "off",
|
"import/extensions": "off",
|
||||||
"lines-around-directive": "off",
|
"lines-around-directive": "off",
|
||||||
|
@ -42,7 +45,8 @@
|
||||||
"no-restricted-properties": "off",
|
"no-restricted-properties": "off",
|
||||||
"new-parens": "off",
|
"new-parens": "off",
|
||||||
"no-multi-spaces": "off",
|
"no-multi-spaces": "off",
|
||||||
"quote-props": "off"
|
"quote-props": "off",
|
||||||
|
"mocha/no-exclusive-tests": "error",
|
||||||
},
|
},
|
||||||
"parserOptions": {
|
"parserOptions": {
|
||||||
"ecmaVersion": 2020
|
"ecmaVersion": 2020
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
---
|
||||||
|
name: "Setup CI environment"
|
||||||
|
description: "Setup Cloudserver CI environment"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Setup etc/hosts
|
||||||
|
shell: bash
|
||||||
|
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
|
||||||
|
- name: Setup Credentials
|
||||||
|
shell: bash
|
||||||
|
run: bash .github/scripts/credentials.bash
|
||||||
|
- name: Setup job artifacts directory
|
||||||
|
shell: bash
|
||||||
|
run: |-
|
||||||
|
set -exu;
|
||||||
|
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '16'
|
||||||
|
cache: 'yarn'
|
||||||
|
- name: install dependencies
|
||||||
|
shell: bash
|
||||||
|
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||||
|
- uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Setup python2 test environment
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt-get install -y libdigest-hmac-perl
|
||||||
|
pip install 's3cmd==2.3.0'
|
||||||
|
- name: fix sproxyd.conf permissions
|
||||||
|
shell: bash
|
||||||
|
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
||||||
|
- name: ensure fuse kernel module is loaded (for sproxyd)
|
||||||
|
shell: bash
|
||||||
|
run: sudo modprobe fuse
|
|
@ -0,0 +1,36 @@
|
||||||
|
azurebackend_AZURE_STORAGE_ACCESS_KEY
|
||||||
|
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
|
||||||
|
azurebackend_AZURE_STORAGE_ENDPOINT
|
||||||
|
azurebackend2_AZURE_STORAGE_ACCESS_KEY
|
||||||
|
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
|
||||||
|
azurebackend2_AZURE_STORAGE_ENDPOINT
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
|
||||||
|
azuretest_AZURE_BLOB_ENDPOINT
|
||||||
|
b2backend_B2_ACCOUNT_ID
|
||||||
|
b2backend_B2_STORAGE_ACCESS_KEY
|
||||||
|
GOOGLE_SERVICE_EMAIL
|
||||||
|
GOOGLE_SERVICE_KEY
|
||||||
|
AWS_S3_BACKEND_ACCESS_KEY
|
||||||
|
AWS_S3_BACKEND_SECRET_KEY
|
||||||
|
AWS_S3_BACKEND_ACCESS_KEY_2
|
||||||
|
AWS_S3_BACKEND_SECRET_KEY_2
|
||||||
|
AWS_GCP_BACKEND_ACCESS_KEY
|
||||||
|
AWS_GCP_BACKEND_SECRET_KEY
|
||||||
|
AWS_GCP_BACKEND_ACCESS_KEY_2
|
||||||
|
AWS_GCP_BACKEND_SECRET_KEY_2
|
||||||
|
b2backend_B2_STORAGE_ENDPOINT
|
||||||
|
gcpbackend2_GCP_SERVICE_EMAIL
|
||||||
|
gcpbackend2_GCP_SERVICE_KEY
|
||||||
|
gcpbackend2_GCP_SERVICE_KEYFILE
|
||||||
|
gcpbackend_GCP_SERVICE_EMAIL
|
||||||
|
gcpbackend_GCP_SERVICE_KEY
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_EMAIL
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_KEY
|
||||||
|
gcpbackend_GCP_SERVICE_KEYFILE
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
||||||
|
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
|
@ -0,0 +1,92 @@
|
||||||
|
services:
|
||||||
|
cloudserver:
|
||||||
|
image: ${CLOUDSERVER_IMAGE}
|
||||||
|
command: sh -c "yarn start > /artifacts/s3.log"
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- /tmp/ssl:/ssl
|
||||||
|
- /tmp/ssl-kmip:/ssl-kmip
|
||||||
|
- ${HOME}/.aws/credentials:/root/.aws/credentials
|
||||||
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
|
environment:
|
||||||
|
- CI=true
|
||||||
|
- ENABLE_LOCAL_CACHE=true
|
||||||
|
- REDIS_HOST=0.0.0.0
|
||||||
|
- REDIS_PORT=6379
|
||||||
|
- REPORT_TOKEN=report-token-1
|
||||||
|
- REMOTE_MANAGEMENT_DISABLE=1
|
||||||
|
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
|
||||||
|
- DATA_HOST=0.0.0.0
|
||||||
|
- METADATA_HOST=0.0.0.0
|
||||||
|
- S3BACKEND
|
||||||
|
- S3DATA
|
||||||
|
- S3METADATA
|
||||||
|
- MPU_TESTING
|
||||||
|
- S3VAULT
|
||||||
|
- S3_LOCATION_FILE
|
||||||
|
- ENABLE_UTAPI_V2
|
||||||
|
- BUCKET_DENY_FILTER
|
||||||
|
- S3KMS
|
||||||
|
- S3KMIP_PORT
|
||||||
|
- S3KMIP_HOSTS
|
||||||
|
- S3KMIP-COMPOUND_CREATE
|
||||||
|
- S3KMIP_BUCKET_ATTRIBUTE_NAME
|
||||||
|
- S3KMIP_PIPELINE_DEPTH
|
||||||
|
- S3KMIP_KEY
|
||||||
|
- S3KMIP_CERT
|
||||||
|
- S3KMIP_CA
|
||||||
|
- MONGODB_HOSTS=0.0.0.0:27018
|
||||||
|
- MONGODB_RS=rs0
|
||||||
|
- DEFAULT_BUCKET_KEY_FORMAT
|
||||||
|
- METADATA_MAX_CACHED_BUCKETS
|
||||||
|
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||||
|
- SCUBA_HOST
|
||||||
|
- SCUBA_PORT
|
||||||
|
- SCUBA_HEALTHCHECK_FREQUENCY
|
||||||
|
- S3QUOTA
|
||||||
|
- QUOTA_ENABLE_INFLIGHTS
|
||||||
|
env_file:
|
||||||
|
- creds.env
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
extra_hosts:
|
||||||
|
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
|
||||||
|
- "pykmip.local:127.0.0.1"
|
||||||
|
redis:
|
||||||
|
image: redis:alpine
|
||||||
|
network_mode: "host"
|
||||||
|
squid:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['ci-proxy']
|
||||||
|
image: scality/ci-squid
|
||||||
|
command: >-
|
||||||
|
sh -c 'mkdir -p /ssl &&
|
||||||
|
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
||||||
|
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
||||||
|
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
|
||||||
|
cp /ssl/myca.pem /ssl/CA.pem &&
|
||||||
|
squid -f /etc/squid/squid.conf -N -z &&
|
||||||
|
squid -f /etc/squid/squid.conf -NYCd 1'
|
||||||
|
volumes:
|
||||||
|
- /tmp/ssl:/ssl
|
||||||
|
pykmip:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['pykmip']
|
||||||
|
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
||||||
|
volumes:
|
||||||
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
|
mongo:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['mongo', 'ceph']
|
||||||
|
image: ${MONGODB_IMAGE}
|
||||||
|
ceph:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['ceph']
|
||||||
|
image: ghcr.io/scality/cloudserver/ci-ceph
|
||||||
|
sproxyd:
|
||||||
|
network_mode: "host"
|
||||||
|
profiles: ['sproxyd']
|
||||||
|
image: sproxyd-standalone
|
||||||
|
build: ./sproxyd
|
||||||
|
user: 0:0
|
||||||
|
privileged: yes
|
|
@ -0,0 +1,28 @@
|
||||||
|
FROM mongo:5.0.21
|
||||||
|
|
||||||
|
ENV USER=scality \
|
||||||
|
HOME_DIR=/home/scality \
|
||||||
|
CONF_DIR=/conf \
|
||||||
|
DATA_DIR=/data
|
||||||
|
|
||||||
|
# Set up directories and permissions
|
||||||
|
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
||||||
|
mkdir /logs; \
|
||||||
|
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
||||||
|
|
||||||
|
# Set up environment variables and directories for scality user
|
||||||
|
RUN mkdir ${CONF_DIR} && \
|
||||||
|
chown -R ${USER} ${CONF_DIR} && \
|
||||||
|
chown -R ${USER} ${DATA_DIR}
|
||||||
|
|
||||||
|
# copy the mongo config file
|
||||||
|
COPY /conf/mongod.conf /conf/mongod.conf
|
||||||
|
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
||||||
|
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
||||||
|
|
||||||
|
EXPOSE 27017/tcp
|
||||||
|
EXPOSE 27018
|
||||||
|
|
||||||
|
# Set up CMD
|
||||||
|
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
||||||
|
CMD ["bash", "/conf/mongo-run.sh"]
|
|
@ -0,0 +1,4 @@
|
||||||
|
rs.initiate({
|
||||||
|
_id: "rs0",
|
||||||
|
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
||||||
|
});
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -exo pipefail
|
||||||
|
|
||||||
|
init_RS() {
|
||||||
|
sleep 5
|
||||||
|
mongo --port 27018 /conf/initReplicaSet.js
|
||||||
|
}
|
||||||
|
init_RS &
|
||||||
|
|
||||||
|
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
@ -0,0 +1,15 @@
|
||||||
|
storage:
|
||||||
|
journal:
|
||||||
|
enabled: true
|
||||||
|
engine: wiredTiger
|
||||||
|
dbPath: "/data/db"
|
||||||
|
processManagement:
|
||||||
|
fork: false
|
||||||
|
net:
|
||||||
|
port: 27018
|
||||||
|
bindIp: 0.0.0.0
|
||||||
|
replication:
|
||||||
|
replSetName: "rs0"
|
||||||
|
enableMajorityReadConcern: true
|
||||||
|
security:
|
||||||
|
authorization: disabled
|
|
@ -0,0 +1,3 @@
|
||||||
|
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
||||||
|
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
||||||
|
RUN chown root:root /conf/sproxyd0.conf
|
|
@ -0,0 +1,26 @@
|
||||||
|
fastcgi_param QUERY_STRING $query_string;
|
||||||
|
fastcgi_param REQUEST_METHOD $request_method;
|
||||||
|
fastcgi_param CONTENT_TYPE $content_type;
|
||||||
|
fastcgi_param CONTENT_LENGTH $content_length;
|
||||||
|
|
||||||
|
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||||
|
fastcgi_param SCRIPT_NAME /var/www;
|
||||||
|
fastcgi_param PATH_INFO $document_uri;
|
||||||
|
|
||||||
|
fastcgi_param REQUEST_URI $request_uri;
|
||||||
|
fastcgi_param DOCUMENT_URI $document_uri;
|
||||||
|
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||||
|
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||||
|
fastcgi_param HTTPS $https if_not_empty;
|
||||||
|
|
||||||
|
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||||
|
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||||
|
|
||||||
|
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||||
|
fastcgi_param REMOTE_PORT $remote_port;
|
||||||
|
fastcgi_param SERVER_ADDR $server_addr;
|
||||||
|
fastcgi_param SERVER_PORT $server_port;
|
||||||
|
fastcgi_param SERVER_NAME $server_name;
|
||||||
|
|
||||||
|
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
||||||
|
fastcgi_param REDIRECT_STATUS 200;
|
|
@ -0,0 +1,88 @@
|
||||||
|
worker_processes 1;
|
||||||
|
error_log /logs/error.log;
|
||||||
|
user root root;
|
||||||
|
events {
|
||||||
|
worker_connections 1000;
|
||||||
|
reuse_port on;
|
||||||
|
multi_accept on;
|
||||||
|
}
|
||||||
|
worker_rlimit_nofile 20000;
|
||||||
|
http {
|
||||||
|
root /var/www/;
|
||||||
|
upstream sproxyds {
|
||||||
|
least_conn;
|
||||||
|
keepalive 40;
|
||||||
|
server 127.0.0.1:20000;
|
||||||
|
}
|
||||||
|
server {
|
||||||
|
client_max_body_size 0;
|
||||||
|
client_body_timeout 150;
|
||||||
|
client_header_timeout 150;
|
||||||
|
postpone_output 0;
|
||||||
|
client_body_postpone_size 0;
|
||||||
|
keepalive_requests 1100;
|
||||||
|
keepalive_timeout 300s;
|
||||||
|
server_tokens off;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
gzip off;
|
||||||
|
tcp_nodelay on;
|
||||||
|
tcp_nopush on;
|
||||||
|
sendfile on;
|
||||||
|
listen 81;
|
||||||
|
server_name localhost;
|
||||||
|
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
||||||
|
location ~* ^/proxy/(.*)$ {
|
||||||
|
rewrite ^/proxy/(.*)$ /$1 last;
|
||||||
|
}
|
||||||
|
allow 127.0.0.1;
|
||||||
|
|
||||||
|
deny all;
|
||||||
|
set $usermd '-';
|
||||||
|
set $sentusermd '-';
|
||||||
|
set $elapsed_ms '-';
|
||||||
|
set $now '-';
|
||||||
|
log_by_lua '
|
||||||
|
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
||||||
|
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
||||||
|
end
|
||||||
|
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
||||||
|
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
||||||
|
end
|
||||||
|
local elapsed_ms = tonumber(ngx.var.request_time)
|
||||||
|
if not ( elapsed_ms == nil) then
|
||||||
|
elapsed_ms = elapsed_ms * 1000
|
||||||
|
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
||||||
|
end
|
||||||
|
local time = tonumber(ngx.var.msec) * 1000
|
||||||
|
ngx.var.now = time
|
||||||
|
';
|
||||||
|
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
||||||
|
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
||||||
|
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
||||||
|
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
||||||
|
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
||||||
|
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
||||||
|
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
||||||
|
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
||||||
|
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
||||||
|
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
||||||
|
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
||||||
|
access_log /dev/stdout irm;
|
||||||
|
error_log /dev/stdout error;
|
||||||
|
location / {
|
||||||
|
proxy_request_buffering off;
|
||||||
|
fastcgi_request_buffering off;
|
||||||
|
fastcgi_no_cache 1;
|
||||||
|
fastcgi_cache_bypass 1;
|
||||||
|
fastcgi_buffering off;
|
||||||
|
fastcgi_ignore_client_abort on;
|
||||||
|
fastcgi_keep_conn on;
|
||||||
|
include fastcgi_params;
|
||||||
|
fastcgi_pass sproxyds;
|
||||||
|
fastcgi_next_upstream error timeout;
|
||||||
|
fastcgi_send_timeout 285s;
|
||||||
|
fastcgi_read_timeout 285s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"general": {
|
||||||
|
"ring": "DATA",
|
||||||
|
"port": 20000,
|
||||||
|
"syslog_facility": "local0"
|
||||||
|
},
|
||||||
|
"ring_driver:0": {
|
||||||
|
"alias": "dc1",
|
||||||
|
"type": "local",
|
||||||
|
"queue_path": "/tmp/ring-objs"
|
||||||
|
},
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
[supervisord]
|
||||||
|
nodaemon = true
|
||||||
|
loglevel = info
|
||||||
|
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
||||||
|
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
||||||
|
logfile_maxbytes = 20MB
|
||||||
|
logfile_backups = 2
|
||||||
|
|
||||||
|
[unix_http_server]
|
||||||
|
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||||
|
|
||||||
|
[rpcinterface:supervisor]
|
||||||
|
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||||
|
|
||||||
|
[supervisorctl]
|
||||||
|
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||||
|
|
||||||
|
[program:nginx]
|
||||||
|
directory=%(ENV_SUP_RUN_DIR)s
|
||||||
|
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
||||||
|
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||||
|
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
||||||
|
stdout_logfile_maxbytes=100MB
|
||||||
|
stdout_logfile_backups=7
|
||||||
|
stderr_logfile_maxbytes=100MB
|
||||||
|
stderr_logfile_backups=7
|
||||||
|
autorestart=true
|
||||||
|
autostart=true
|
||||||
|
user=root
|
||||||
|
|
||||||
|
[program:sproxyd]
|
||||||
|
directory=%(ENV_SUP_RUN_DIR)s
|
||||||
|
process_name=%(program_name)s-%(process_num)s
|
||||||
|
numprocs=1
|
||||||
|
numprocs_start=0
|
||||||
|
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
||||||
|
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||||
|
stdout_logfile_maxbytes=100MB
|
||||||
|
stdout_logfile_backups=7
|
||||||
|
redirect_stderr=true
|
||||||
|
autorestart=true
|
||||||
|
autostart=true
|
||||||
|
user=root
|
|
@ -2,9 +2,9 @@
|
||||||
set -x #echo on
|
set -x #echo on
|
||||||
set -e #exit at the first error
|
set -e #exit at the first error
|
||||||
|
|
||||||
mkdir -p ~/.aws
|
mkdir -p $HOME/.aws
|
||||||
|
|
||||||
cat >>/root/.aws/credentials <<EOF
|
cat >>$HOME/.aws/credentials <<EOF
|
||||||
[default]
|
[default]
|
||||||
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
|
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
|
||||||
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY
|
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY
|
|
@ -1,7 +1,10 @@
|
||||||
name: Test alerts
|
name: Test alerts
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push
|
push:
|
||||||
|
branches-ignore:
|
||||||
|
- 'development/**'
|
||||||
|
- 'q/*/**'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-alert-tests:
|
run-alert-tests:
|
||||||
|
@ -17,13 +20,16 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
- name: Render and test ${{ matrix.tests.name }}
|
||||||
uses: scality/action-prom-render-test@1.0.1
|
uses: scality/action-prom-render-test@1.0.3
|
||||||
with:
|
with:
|
||||||
alert_file_path: monitoring/alerts.yaml
|
alert_file_path: monitoring/alerts.yaml
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
test_file_path: ${{ matrix.tests.file }}
|
||||||
alert_inputs: >-
|
alert_inputs: |
|
||||||
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
|
namespace=zenko
|
||||||
|
service=artesca-data-connector-s3api-metrics
|
||||||
|
reportJob=artesca-data-ops-report-handler
|
||||||
|
replicas=3
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
name: codeQL
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [w/**, q/*]
|
||||||
|
pull_request:
|
||||||
|
branches: [development/*, stabilization/*, hotfix/*]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Static analysis with CodeQL
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v3
|
||||||
|
with:
|
||||||
|
languages: javascript, python, ruby
|
||||||
|
|
||||||
|
- name: Build and analyze
|
||||||
|
uses: github/codeql-action/analyze@v3
|
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
name: dependency review
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [development/*, stabilization/*, hotfix/*]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependency-review:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: 'Checkout Repository'
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: 'Dependency Review'
|
||||||
|
uses: actions/dependency-review-action@v4
|
|
@ -0,0 +1,80 @@
|
||||||
|
---
|
||||||
|
name: release
|
||||||
|
run-name: release ${{ inputs.tag }}
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Tag to be released'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
PROJECT_NAME: ${{ github.event.repository.name }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-federation-image:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Login to GitHub Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
- name: Build and push image for federation
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .
|
||||||
|
file: images/svc-base/Dockerfile
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
||||||
|
cache-from: type=gha,scope=federation
|
||||||
|
cache-to: type=gha,mode=max,scope=federation
|
||||||
|
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Docker Buildk
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Push dashboards into the production namespace
|
||||||
|
run: |
|
||||||
|
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
||||||
|
dashboard.json:application/grafana-dashboard+json \
|
||||||
|
alerts.yaml:application/prometheus-alerts+yaml
|
||||||
|
working-directory: monitoring
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Create Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
with:
|
||||||
|
name: Release ${{ github.event.inputs.tag }}
|
||||||
|
tag_name: ${{ github.event.inputs.tag }}
|
||||||
|
generate_release_notes: true
|
||||||
|
target_commitish: ${{ github.sha }}
|
|
@ -0,0 +1,533 @@
|
||||||
|
---
|
||||||
|
name: tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
push:
|
||||||
|
branches-ignore:
|
||||||
|
- 'development/**'
|
||||||
|
- 'q/*/**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Secrets
|
||||||
|
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||||
|
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||||
|
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||||
|
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
|
||||||
|
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
|
||||||
|
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||||
|
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||||
|
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
||||||
|
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||||
|
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
|
||||||
|
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
|
||||||
|
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
||||||
|
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
|
||||||
|
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||||
|
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||||
|
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
|
||||||
|
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
|
||||||
|
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
|
||||||
|
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
|
||||||
|
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
|
||||||
|
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
|
||||||
|
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
|
||||||
|
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
|
||||||
|
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
|
||||||
|
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
|
||||||
|
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
|
||||||
|
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||||
|
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||||
|
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
||||||
|
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
||||||
|
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
|
||||||
|
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||||
|
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||||
|
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||||
|
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||||
|
# Configs
|
||||||
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
|
REPORT_TOKEN: "report-token-1"
|
||||||
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
|
# https://github.com/git-lfs/git-lfs/issues/5749
|
||||||
|
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
||||||
|
jobs:
|
||||||
|
linting-coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '16'
|
||||||
|
cache: yarn
|
||||||
|
- name: install dependencies
|
||||||
|
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip
|
||||||
|
- name: Install python deps
|
||||||
|
run: pip install flake8
|
||||||
|
- name: Lint Javascript
|
||||||
|
run: yarn run --silent lint -- --max-warnings 0
|
||||||
|
- name: Lint Markdown
|
||||||
|
run: yarn run --silent lint_md
|
||||||
|
- name: Lint python
|
||||||
|
run: flake8 $(git ls-files "*.py")
|
||||||
|
- name: Lint Yaml
|
||||||
|
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
|
||||||
|
- name: Unit Coverage
|
||||||
|
run: |
|
||||||
|
set -ex
|
||||||
|
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||||
|
yarn test
|
||||||
|
yarn run test_legacy_location
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
|
CIRCLE_TEST_REPORTS: /tmp
|
||||||
|
CIRCLE_ARTIFACTS: /tmp
|
||||||
|
CI_REPORTS: /tmp
|
||||||
|
- name: Unit Coverage logs
|
||||||
|
run: find /tmp/unit -exec cat {} \;
|
||||||
|
- name: preparing junit files for upload
|
||||||
|
run: |
|
||||||
|
mkdir -p artifacts/junit
|
||||||
|
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||||
|
if: always()
|
||||||
|
- name: Upload files to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Login to GitHub Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
- name: Build and push cloudserver image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .
|
||||||
|
provenance: false
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
labels: |
|
||||||
|
git.repository=${{ github.repository }}
|
||||||
|
git.commit-sha=${{ github.sha }}
|
||||||
|
cache-from: type=gha,scope=cloudserver
|
||||||
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
|
- name: Build and push pykmip image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .github/pykmip
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||||
|
labels: |
|
||||||
|
git.repository=${{ github.repository }}
|
||||||
|
git.commit-sha=${{ github.sha }}
|
||||||
|
cache-from: type=gha,scope=pykmip
|
||||||
|
cache-to: type=gha,mode=max,scope=pykmip
|
||||||
|
- name: Build and push MongoDB
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .github/docker/mongodb
|
||||||
|
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
cache-from: type=gha,scope=mongodb
|
||||||
|
cache-to: type=gha,mode=max,scope=mongodb
|
||||||
|
|
||||||
|
multiple-backend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
S3BACKEND: mem
|
||||||
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
|
S3DATA: multiple
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile sproxyd up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run multiple backend test
|
||||||
|
run: |-
|
||||||
|
set -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
bash wait_for_local_port.bash 81 40
|
||||||
|
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
mongo-v0-ft-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: mem
|
||||||
|
MPU_TESTING: "yes"
|
||||||
|
S3METADATA: mongodb
|
||||||
|
S3KMS: file
|
||||||
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
|
DEFAULT_BUCKET_KEY_FORMAT: v0
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile mongo up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run functional tests
|
||||||
|
run: |-
|
||||||
|
set -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
mongo-v1-ft-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: mem
|
||||||
|
MPU_TESTING: "yes"
|
||||||
|
S3METADATA: mongodb
|
||||||
|
S3KMS: file
|
||||||
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
|
DEFAULT_BUCKET_KEY_FORMAT: v1
|
||||||
|
METADATA_MAX_CACHED_BUCKETS: 1
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile mongo up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run functional tests
|
||||||
|
run: |-
|
||||||
|
set -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
file-ft-tests:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- job-name: file-ft-tests
|
||||||
|
name: ${{ matrix.job-name }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: file
|
||||||
|
S3VAULT: mem
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
MPU_TESTING: "yes"
|
||||||
|
JOB_NAME: ${{ matrix.job-name }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup matrix job artifacts directory
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -exu
|
||||||
|
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run file ft tests
|
||||||
|
run: |-
|
||||||
|
set -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
utapi-v2-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
ENABLE_UTAPI_V2: t
|
||||||
|
S3BACKEND: mem
|
||||||
|
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run file utapi v2 tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
quota-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
inflights:
|
||||||
|
- name: "With Inflights"
|
||||||
|
value: "true"
|
||||||
|
- name: "Without Inflights"
|
||||||
|
value: "false"
|
||||||
|
env:
|
||||||
|
S3METADATA: mongodb
|
||||||
|
S3BACKEND: mem
|
||||||
|
S3QUOTA: scuba
|
||||||
|
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
||||||
|
SCUBA_HOST: localhost
|
||||||
|
SCUBA_PORT: 8100
|
||||||
|
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile mongo up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run quota tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
kmip-ft-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: file
|
||||||
|
S3VAULT: mem
|
||||||
|
MPU_TESTING: "yes"
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Copy KMIP certs
|
||||||
|
run: cp -r ./certs /tmp/ssl-kmip
|
||||||
|
working-directory: .github/pykmip
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile pykmip up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run file KMIP tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
bash wait_for_local_port.bash 5696 40
|
||||||
|
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
ceph-backend-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: mem
|
||||||
|
S3DATA: multiple
|
||||||
|
S3KMS: file
|
||||||
|
CI_CEPH: 'true'
|
||||||
|
MPU_TESTING: "yes"
|
||||||
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Login to GitHub Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: '2.5.9'
|
||||||
|
- name: Install Ruby dependencies
|
||||||
|
run: |
|
||||||
|
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
||||||
|
- name: Install Java dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose --profile ceph up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
env:
|
||||||
|
S3METADATA: mongodb
|
||||||
|
- name: Run Ceph multiple backend tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash .github/ceph/wait_for_ceph.sh
|
||||||
|
bash wait_for_local_port.bash 27018 40
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
|
S3METADATA: mem
|
||||||
|
- name: Run Java tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
|
||||||
|
working-directory: tests/functional/jaws
|
||||||
|
- name: Run Ruby tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
||||||
|
working-directory: tests/functional/fog
|
||||||
|
- name: Run Javascript AWS SDK tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
||||||
|
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
||||||
|
env:
|
||||||
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
|
||||||
|
S3BACKEND: file
|
||||||
|
S3VAULT: mem
|
||||||
|
S3METADATA: mongodb
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v4
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
81
Dockerfile
81
Dockerfile
|
@ -1,5 +1,38 @@
|
||||||
FROM node:16.13.2-slim
|
ARG NODE_VERSION=16.20-bullseye-slim
|
||||||
MAINTAINER Giorgio Regni <gr@scality.com>
|
|
||||||
|
FROM node:${NODE_VERSION} as builder
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
gnupg2 \
|
||||||
|
jq \
|
||||||
|
python3 \
|
||||||
|
ssh \
|
||||||
|
wget \
|
||||||
|
libffi-dev \
|
||||||
|
zlib1g-dev \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& mkdir -p /root/ssh \
|
||||||
|
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
||||||
|
|
||||||
|
ENV PYTHON=python3
|
||||||
|
COPY package.json yarn.lock /usr/src/app/
|
||||||
|
RUN npm install typescript -g
|
||||||
|
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
FROM node:${NODE_VERSION}
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
jq \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ENV NO_PROXY localhost,127.0.0.1
|
ENV NO_PROXY localhost,127.0.0.1
|
||||||
ENV no_proxy localhost,127.0.0.1
|
ENV no_proxy localhost,127.0.0.1
|
||||||
|
@ -7,47 +40,21 @@ ENV no_proxy localhost,127.0.0.1
|
||||||
EXPOSE 8000
|
EXPOSE 8000
|
||||||
EXPOSE 8002
|
EXPOSE 8002
|
||||||
|
|
||||||
COPY ./package.json /usr/src/app/
|
RUN apt-get update && \
|
||||||
COPY ./yarn.lock /usr/src/app/
|
apt-get install -y --no-install-recommends \
|
||||||
|
jq \
|
||||||
|
tini \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
RUN apt-get update \
|
# Keep the .git directory in order to properly report version
|
||||||
&& apt-get install -y \
|
|
||||||
curl \
|
|
||||||
gnupg2
|
|
||||||
|
|
||||||
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
|
|
||||||
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
build-essential \
|
|
||||||
ca-certificates \
|
|
||||||
git \
|
|
||||||
jq \
|
|
||||||
python3 \
|
|
||||||
ssh \
|
|
||||||
yarn \
|
|
||||||
wget \
|
|
||||||
libffi-dev \
|
|
||||||
zlib1g-dev \
|
|
||||||
&& mkdir -p /root/ssh \
|
|
||||||
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
|
||||||
|
|
||||||
ENV PYTHON=python3
|
|
||||||
RUN yarn cache clean \
|
|
||||||
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
|
||||||
&& apt-get autoremove --purge -y python git build-essential \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& yarn cache clean \
|
|
||||||
&& rm -rf ~/.node-gyp \
|
|
||||||
&& rm -rf /tmp/yarn-*
|
|
||||||
|
|
||||||
COPY . /usr/src/app
|
COPY . /usr/src/app
|
||||||
|
COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
||||||
|
|
||||||
|
|
||||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD [ "yarn", "start" ]
|
CMD [ "yarn", "start" ]
|
||||||
|
|
|
@ -7,7 +7,7 @@ COPY . /usr/src/app
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||||
&& yarn install --production --network-concurrency 1 \
|
&& yarn install --production \
|
||||||
&& apt-get autoremove --purge -y python git build-essential \
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& yarn cache clean \
|
&& yarn cache clean \
|
||||||
|
|
175
README.md
175
README.md
|
@ -1,10 +1,7 @@
|
||||||
# Zenko CloudServer
|
# Zenko CloudServer with Vitastor Backend
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
|
|
||||||
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -14,137 +11,71 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
CloudServer is useful for Developers, either to run as part of a
|
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
||||||
continous integration test environment to emulate the AWS S3 service locally
|
backend support.
|
||||||
or as an abstraction layer to develop object storage enabled
|
|
||||||
application on the go.
|
|
||||||
|
|
||||||
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
## Quick Start with Vitastor
|
||||||
|
|
||||||
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
Vitastor Backend is in experimental status, however you can already try to
|
||||||
|
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
||||||
|
it works too 😊.
|
||||||
|
|
||||||
## Docker
|
Installation instructions:
|
||||||
|
|
||||||
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
|
### Install Vitastor
|
||||||
|
|
||||||
## Contributing
|
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
||||||
|
|
||||||
In order to contribute, please follow the
|
### Install Zenko with Vitastor Backend
|
||||||
[Contributing Guidelines](
|
|
||||||
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
|
||||||
|
|
||||||
## Installation
|
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
||||||
|
- Install dependencies: `npm install --omit dev` or just `npm install`
|
||||||
|
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
||||||
|
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
||||||
|
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
||||||
|
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
||||||
|
|
||||||
### Dependencies
|
### Install and Configure MongoDB
|
||||||
|
|
||||||
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
||||||
. Up-to-date versions can be found at
|
|
||||||
[Nodesource](https://github.com/nodesource/distributions).
|
|
||||||
|
|
||||||
### Clone source code
|
### Setup Zenko
|
||||||
|
|
||||||
```shell
|
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
||||||
git clone https://github.com/scality/S3.git
|
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
||||||
|
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
||||||
|
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
||||||
|
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
||||||
|
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
||||||
|
access keys, but it's not published, so let's use a file for now.
|
||||||
|
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
||||||
|
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
||||||
|
in this file.
|
||||||
|
|
||||||
|
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
||||||
|
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
||||||
|
|
||||||
|
### Start Zenko
|
||||||
|
|
||||||
|
Start the S3 server with: `node index.js`
|
||||||
|
|
||||||
|
If you use default settings, Zenko CloudServer starts on port 8000.
|
||||||
|
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
||||||
|
|
||||||
|
Now you can access your S3 with `s3cmd` or `geesefs`:
|
||||||
|
|
||||||
|
```
|
||||||
|
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install js dependencies
|
```
|
||||||
|
AWS_ACCESS_KEY_ID=accessKey1 \
|
||||||
Go to the ./S3 folder,
|
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
||||||
|
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
||||||
```shell
|
|
||||||
yarn install --frozen-lockfile
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you get an error regarding installation of the diskUsage module,
|
# Author & License
|
||||||
please install g++.
|
|
||||||
|
|
||||||
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
||||||
```shell
|
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
||||||
yarn cache clean
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run it with a file backend
|
|
||||||
|
|
||||||
```shell
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
|
||||||
9991 are also open locally for internal transfer of metadata and data,
|
|
||||||
respectively.
|
|
||||||
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
By default the metadata files will be saved in the
|
|
||||||
localMetadata directory and the data files will be saved
|
|
||||||
in the localData directory within the ./S3 directory on your
|
|
||||||
machine. These directories have been pre-created within the
|
|
||||||
repository. If you would like to save the data or metadata in
|
|
||||||
different locations of your choice, you must specify them with absolute paths.
|
|
||||||
So, when starting the server:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
|
||||||
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
|
||||||
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
|
||||||
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run it with multiple data backends
|
|
||||||
|
|
||||||
```shell
|
|
||||||
export S3DATA='multiple'
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
With multiple backends, you have the ability to
|
|
||||||
choose where each object will be saved by setting
|
|
||||||
the following header with a locationConstraint on
|
|
||||||
a PUT request:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
|
||||||
```
|
|
||||||
|
|
||||||
If no header is sent with a PUT object request, the
|
|
||||||
location constraint of the bucket will determine
|
|
||||||
where the data is saved. If the bucket has no location
|
|
||||||
constraint, the endpoint of the PUT request will be
|
|
||||||
used to determine location.
|
|
||||||
|
|
||||||
See the Configuration section in our documentation
|
|
||||||
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
|
||||||
to learn how to set location constraints.
|
|
||||||
|
|
||||||
## Run it with an in-memory backend
|
|
||||||
|
|
||||||
```shell
|
|
||||||
yarn run mem_backend
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
The default access key is accessKey1 with
|
|
||||||
a secret key of verySecretKey1.
|
|
||||||
|
|
||||||
## Run it with Vault user management
|
|
||||||
|
|
||||||
Note: Vault is proprietary and must be accessed separately.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
export S3VAULT=vault
|
|
||||||
yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer using Vault for user management.
|
|
||||||
|
|
||||||
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
|
||||||
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
|
||||||
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
|
||||||
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const {
|
|
||||||
startWSManagementClient,
|
|
||||||
startPushConnectionHealthCheckServer,
|
|
||||||
} = require('../lib/management/push');
|
|
||||||
|
|
||||||
const logger = require('../lib/utilities/logger');
|
|
||||||
|
|
||||||
const {
|
|
||||||
PUSH_ENDPOINT: pushEndpoint,
|
|
||||||
INSTANCE_ID: instanceId,
|
|
||||||
MANAGEMENT_TOKEN: managementToken,
|
|
||||||
} = process.env;
|
|
||||||
|
|
||||||
if (!pushEndpoint) {
|
|
||||||
logger.error('missing push endpoint env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!instanceId) {
|
|
||||||
logger.error('missing instance id env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!managementToken) {
|
|
||||||
logger.error('missing management token env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
startPushConnectionHealthCheckServer(err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('could not start healthcheck server', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
|
|
||||||
startWSManagementClient(url, managementToken, err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('connection failed, exiting', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
logger.info('no more connection, exiting');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,46 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const {
|
|
||||||
startWSManagementClient,
|
|
||||||
startPushConnectionHealthCheckServer,
|
|
||||||
} = require('../lib/management/push');
|
|
||||||
|
|
||||||
const logger = require('../lib/utilities/logger');
|
|
||||||
|
|
||||||
const {
|
|
||||||
PUSH_ENDPOINT: pushEndpoint,
|
|
||||||
INSTANCE_ID: instanceId,
|
|
||||||
MANAGEMENT_TOKEN: managementToken,
|
|
||||||
} = process.env;
|
|
||||||
|
|
||||||
if (!pushEndpoint) {
|
|
||||||
logger.error('missing push endpoint env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!instanceId) {
|
|
||||||
logger.error('missing instance id env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!managementToken) {
|
|
||||||
logger.error('missing management token env var');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
startPushConnectionHealthCheckServer(err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('could not start healthcheck server', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
|
|
||||||
startWSManagementClient(url, managementToken, err => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('connection failed, exiting', { error: err });
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
logger.info('no more connection, exiting');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -4,6 +4,7 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
|
"workers": 4,
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -101,6 +102,14 @@
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
},
|
||||||
|
"authdata": "authdata.json",
|
||||||
|
"backends": {
|
||||||
|
"auth": "file",
|
||||||
|
"data": "file",
|
||||||
|
"metadata": "mongodb",
|
||||||
|
"kms": "file",
|
||||||
|
"quota": "none"
|
||||||
|
},
|
||||||
"externalBackends": {
|
"externalBackends": {
|
||||||
"aws_s3": {
|
"aws_s3": {
|
||||||
"httpAgent": {
|
"httpAgent": {
|
|
@ -0,0 +1,71 @@
|
||||||
|
{
|
||||||
|
"port": 8000,
|
||||||
|
"listenOn": [],
|
||||||
|
"metricsPort": 8002,
|
||||||
|
"metricsListenOn": [],
|
||||||
|
"replicationGroupId": "RG001",
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "STANDARD",
|
||||||
|
"127.0.0.1": "STANDARD",
|
||||||
|
"yourhostname.ru": "STANDARD"
|
||||||
|
},
|
||||||
|
"websiteEndpoints": [
|
||||||
|
"static.yourhostname.ru"
|
||||||
|
],
|
||||||
|
"replicationEndpoints": [ {
|
||||||
|
"site": "zenko",
|
||||||
|
"servers": ["127.0.0.1:8000"],
|
||||||
|
"default": true
|
||||||
|
} ],
|
||||||
|
"log": {
|
||||||
|
"logLevel": "info",
|
||||||
|
"dumpLevel": "error"
|
||||||
|
},
|
||||||
|
"healthChecks": {
|
||||||
|
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||||
|
},
|
||||||
|
"backends": {
|
||||||
|
"metadata": "mongodb"
|
||||||
|
},
|
||||||
|
"mongodb": {
|
||||||
|
"replicaSetHosts": "127.0.0.1:27017",
|
||||||
|
"writeConcern": "majority",
|
||||||
|
"replicaSet": "rs0",
|
||||||
|
"readPreference": "primary",
|
||||||
|
"database": "s3",
|
||||||
|
"authCredentials": {
|
||||||
|
"username": "s3",
|
||||||
|
"password": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"externalBackends": {
|
||||||
|
"aws_s3": {
|
||||||
|
"httpAgent": {
|
||||||
|
"keepAlive": false,
|
||||||
|
"keepAliveMsecs": 1000,
|
||||||
|
"maxFreeSockets": 256,
|
||||||
|
"maxSockets": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gcp": {
|
||||||
|
"httpAgent": {
|
||||||
|
"keepAlive": true,
|
||||||
|
"keepAliveMsecs": 1000,
|
||||||
|
"maxFreeSockets": 256,
|
||||||
|
"maxSockets": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"requests": {
|
||||||
|
"viaProxy": false,
|
||||||
|
"trustedProxyCIDRs": [],
|
||||||
|
"extractClientIPFromHeader": ""
|
||||||
|
},
|
||||||
|
"bucketNotificationDestinations": [
|
||||||
|
{
|
||||||
|
"resource": "target1",
|
||||||
|
"type": "dummy",
|
||||||
|
"host": "localhost:6000"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
53
constants.js
53
constants.js
|
@ -104,7 +104,6 @@ const constants = {
|
||||||
'policyStatus',
|
'policyStatus',
|
||||||
'publicAccessBlock',
|
'publicAccessBlock',
|
||||||
'requestPayment',
|
'requestPayment',
|
||||||
'restore',
|
|
||||||
'torrent',
|
'torrent',
|
||||||
],
|
],
|
||||||
|
|
||||||
|
@ -117,7 +116,8 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
objectLocationConstraintHeader: 'x-amz-storage-class',
|
||||||
|
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
// declare here all existing service accounts and their properties
|
||||||
// (if any, otherwise an empty object)
|
// (if any, otherwise an empty object)
|
||||||
|
@ -130,7 +130,7 @@ const constants = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true },
|
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
||||||
// some of the available data backends (if called directly rather
|
// some of the available data backends (if called directly rather
|
||||||
// than through the multiple backend gateway) need a key provided
|
// than through the multiple backend gateway) need a key provided
|
||||||
// as a string as first parameter of the get/delete methods.
|
// as a string as first parameter of the get/delete methods.
|
||||||
|
@ -176,6 +176,8 @@ const constants = {
|
||||||
'objectDeleteTagging',
|
'objectDeleteTagging',
|
||||||
'objectGetTagging',
|
'objectGetTagging',
|
||||||
'objectPutTagging',
|
'objectPutTagging',
|
||||||
|
'objectPutLegalHold',
|
||||||
|
'objectPutRetention',
|
||||||
],
|
],
|
||||||
// response header to be sent when there are invalid
|
// response header to be sent when there are invalid
|
||||||
// user metadata in the object's metadata
|
// user metadata in the object's metadata
|
||||||
|
@ -183,7 +185,6 @@ const constants = {
|
||||||
// Bucket specific queries supported by AWS that we do not currently support
|
// Bucket specific queries supported by AWS that we do not currently support
|
||||||
// these queries may or may not be supported at object level
|
// these queries may or may not be supported at object level
|
||||||
unsupportedBucketQueries: [
|
unsupportedBucketQueries: [
|
||||||
'tagging',
|
|
||||||
],
|
],
|
||||||
suppressedUtapiEventFields: [
|
suppressedUtapiEventFields: [
|
||||||
'object',
|
'object',
|
||||||
|
@ -197,7 +198,51 @@ const constants = {
|
||||||
'user',
|
'user',
|
||||||
'bucket',
|
'bucket',
|
||||||
],
|
],
|
||||||
|
arrayOfAllowed: [
|
||||||
|
'objectPutTagging',
|
||||||
|
'objectPutLegalHold',
|
||||||
|
'objectPutRetention',
|
||||||
|
],
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
|
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||||
|
lifecycleListing: {
|
||||||
|
CURRENT_TYPE: 'current',
|
||||||
|
NON_CURRENT_TYPE: 'noncurrent',
|
||||||
|
ORPHAN_DM_TYPE: 'orphan',
|
||||||
|
},
|
||||||
|
multiObjectDeleteConcurrency: 50,
|
||||||
|
maxScannedLifecycleListingEntries: 10000,
|
||||||
|
overheadField: [
|
||||||
|
'content-length',
|
||||||
|
'owner-id',
|
||||||
|
'versionId',
|
||||||
|
'isNull',
|
||||||
|
'isDeleteMarker',
|
||||||
|
],
|
||||||
|
unsupportedSignatureChecksums: new Set([
|
||||||
|
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
||||||
|
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
|
||||||
|
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
|
||||||
|
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
|
||||||
|
]),
|
||||||
|
supportedSignatureChecksums: new Set([
|
||||||
|
'UNSIGNED-PAYLOAD',
|
||||||
|
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
||||||
|
]),
|
||||||
|
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
|
||||||
|
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
|
||||||
|
// The AWS assumed Role resource type
|
||||||
|
assumedRoleArnResourceType: 'assumed-role',
|
||||||
|
// Session name of the backbeat lifecycle assumed role session.
|
||||||
|
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
||||||
|
actionsToConsiderAsObjectPut: [
|
||||||
|
'initiateMultipartUpload',
|
||||||
|
'objectPutPart',
|
||||||
|
'completeMultipartUpload',
|
||||||
|
],
|
||||||
|
// if requester is not bucket owner, bucket policy actions should be denied with
|
||||||
|
// MethodNotAllowed error
|
||||||
|
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -199,6 +199,10 @@ if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$TESTING_MODE" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||||
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||||
mv config.json.tmp config.json
|
mv config.json.tmp config.json
|
||||||
|
|
|
@ -2,11 +2,12 @@
|
||||||
|
|
||||||
## Docker Image Generation
|
## Docker Image Generation
|
||||||
|
|
||||||
Docker images are hosted on [registry.scality.com](registry.scality.com).
|
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
||||||
CloudServer has two namespaces there:
|
CloudServer has a few images there:
|
||||||
|
|
||||||
* Production Namespace: registry.scality.com/cloudserver
|
* Cloudserver container image: ghcr.io/scality/cloudserver
|
||||||
* Dev Namespace: registry.scality.com/cloudserver-dev
|
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||||
|
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
With every CI build, the CI will push images, tagging the
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
content with the developer branch's short SHA-1 commit hash.
|
||||||
|
@ -18,63 +19,55 @@ Tagged versions of cloudserver will be stored in the production namespace.
|
||||||
## How to Pull Docker Images
|
## How to Pull Docker Images
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
|
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
||||||
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
|
docker pull ghcr.io/scality/cloudserver:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Release Process
|
## Release Process
|
||||||
|
|
||||||
To release a production image:
|
To release a production image:
|
||||||
|
|
||||||
* Checkout the relevant branch. In this example,
|
* Create a PR to bump the package version
|
||||||
we are working on development/8.3, and we want to release version `8.3.0`.
|
Update Cloudserver's `package.json` by bumping it to the relevant next
|
||||||
|
version in a new PR. Per example if the last released version was
|
||||||
```sh
|
`8.4.7`, the next version would be `8.4.8`.
|
||||||
git checkout development/8.3
|
|
||||||
```
|
|
||||||
|
|
||||||
* Tag the branch with the release version. In this example, `8.3.0`
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git tag -a 8.3.0
|
|
||||||
# The message should be 'v<version>'
|
|
||||||
v8.3.0
|
|
||||||
```
|
|
||||||
|
|
||||||
* Push the tags to GitHub.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git push --tags
|
|
||||||
```
|
|
||||||
|
|
||||||
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
|
|
||||||
|
|
||||||
* Branch Name: The one used for the tag earlier. In this example 'development/8.3'
|
|
||||||
* Override Stage: 'release'
|
|
||||||
* Extra properties:
|
|
||||||
* name: `'tag'`, value: `[release version]`, in this example`'8.3.0'`
|
|
||||||
|
|
||||||
* Once the docker image is present on [registry.scality.com](registry.scality.com),
|
|
||||||
update CloudServers' `package.json`
|
|
||||||
by bumping it to the relevant next version in a new PR.
|
|
||||||
In this case, `8.3.1` .
|
|
||||||
|
|
||||||
```js
|
```js
|
||||||
{
|
{
|
||||||
"name": "@zenko/cloudserver",
|
"name": "cloudserver",
|
||||||
"version": "8.3.1", <--- Here
|
"version": "8.4.8", <--- Here
|
||||||
[...]
|
[...]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
* Finally, once your PR has been reviewed, release the release version on Jira,
|
* Review & merge the PR
|
||||||
set up the next version, and approve your PR.
|
|
||||||
|
* Create the release on GitHub
|
||||||
|
|
||||||
|
* Go the Release tab (https://github.com/scality/cloudserver/releases);
|
||||||
|
* Click on the `Draft new release button`;
|
||||||
|
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
|
||||||
|
to create the tag on publish;
|
||||||
|
* Click on `Generate release notes` button to fill the fields;
|
||||||
|
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
|
||||||
|
* Click to `Publish the release` to create the GitHub release and git tag
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
* the Git tag will be created automatically.
|
||||||
|
* this should be done as soon as the PR is merged, so that the tag
|
||||||
|
is put on the "version bump" commit.
|
||||||
|
|
||||||
|
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
|
||||||
|
|
||||||
|
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
|
||||||
|
* Override Stage: 'release'
|
||||||
|
* Extra properties:
|
||||||
|
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
|
||||||
|
|
||||||
|
* Release the release version on Jira
|
||||||
|
|
||||||
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
|
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
|
||||||
* Create a new version if necessary
|
* Create a next version
|
||||||
* Name: `[next version]`, in this example `8.3.1`
|
* Name: `[next version]`, in this example `8.4.9`
|
||||||
* Start Date: `[date of the release]`
|
* Click `...` and select `Release` on the recently released version (`8.4.8`)
|
||||||
* Click `...` and select `Release` on the release version
|
* Fill in the field to move incomplete version to the next one
|
||||||
* Return to the release ticket,
|
|
||||||
change the fix version of the ticket to the new version
|
|
||||||
* Return to your PR and type `/approve`
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
script_full_path=$(readlink -f "$0")
|
|
||||||
file_dir=$(dirname "$script_full_path")/..
|
|
||||||
|
|
||||||
PACKAGE_VERSION=$(cat $file_dir/package.json \
|
|
||||||
| grep version \
|
|
||||||
| head -1 \
|
|
||||||
| awk -F: '{ print $2 }' \
|
|
||||||
| sed 's/[",]//g' \
|
|
||||||
| tr -d '[[:space:]]')
|
|
||||||
|
|
||||||
echo $PACKAGE_VERSION
|
|
605
eve/main.yml
605
eve/main.yml
|
@ -1,605 +0,0 @@
|
||||||
---
|
|
||||||
version: 0.2
|
|
||||||
|
|
||||||
branches:
|
|
||||||
feature/*, documentation/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*, dependabot/*, user/*:
|
|
||||||
stage: pre-merge
|
|
||||||
|
|
||||||
models:
|
|
||||||
- env: &global-env
|
|
||||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
%(secret:azure_storage_access_key)s
|
|
||||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
%(secret:azure_storage_account_name)s
|
|
||||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
%(secret:azure_storage_endpoint)s
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
%(secret:azure_storage_access_key_2)s
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
%(secret:azure_storage_account_name_2)s
|
|
||||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
%(secret:azure_storage_endpoint_2)s
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
%(secret:azure_storage_access_key)s
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
%(secret:azure_storage_account_name)s
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
%(secret:azure_storage_endpoint)s
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
%(secret:azure_storage_access_key)s
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
%(secret:azure_storage_account_name)s
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
%(secret:azure_storage_endpoint)s
|
|
||||||
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
|
|
||||||
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
|
|
||||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
|
||||||
%(secret:b2backend_b2_storage_access_key)s
|
|
||||||
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
|
||||||
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
|
|
||||||
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
|
|
||||||
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
|
|
||||||
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
|
|
||||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
|
||||||
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
|
||||||
%(secret:gcpbackendmismatch_gcp_service_email)s
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
|
||||||
%(secret:gcpbackendmismatch_gcp_service_key)s
|
|
||||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
- env: &mongo-vars
|
|
||||||
S3BACKEND: "mem"
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3KMS: "file"
|
|
||||||
- env: &multiple-backend-vars
|
|
||||||
S3BACKEND: "mem"
|
|
||||||
S3DATA: "multiple"
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3KMS: "file"
|
|
||||||
- env: &file-mem-mpu
|
|
||||||
S3BACKEND: "file"
|
|
||||||
S3VAULT: "mem"
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
- env: &oras
|
|
||||||
REGISTRY: 'registry.scality.com'
|
|
||||||
PROJECT: '%(prop:git_slug)s'
|
|
||||||
LAYERS: >-
|
|
||||||
dashboard.json:application/grafana-dashboard+json
|
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
|
||||||
- Git: &clone
|
|
||||||
name: Pull repo
|
|
||||||
repourl: '%(prop:git_reference)s'
|
|
||||||
shallow: true
|
|
||||||
retryFetch: true
|
|
||||||
haltOnFailure: true
|
|
||||||
- ShellCommand: &credentials
|
|
||||||
name: Setup Credentials
|
|
||||||
command: bash eve/workers/build/credentials.bash
|
|
||||||
haltOnFailure: true
|
|
||||||
env: *global-env
|
|
||||||
- ShellCommand: &yarn-install
|
|
||||||
name: install modules
|
|
||||||
command: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
|
||||||
haltOnFailure: true
|
|
||||||
- ShellCommand: &check-s3-action-logs
|
|
||||||
name: Check s3 action logs
|
|
||||||
command: |
|
|
||||||
LOGS=`cat /artifacts/s3.log | grep 'No actionLog'`
|
|
||||||
test `echo -n ${LOGS} | wc -l` -eq 0 || (echo $LOGS && false)
|
|
||||||
- Upload: &upload-artifacts
|
|
||||||
source: /artifacts
|
|
||||||
urls:
|
|
||||||
- "*"
|
|
||||||
- ShellCommand: &follow-s3-log
|
|
||||||
logfiles:
|
|
||||||
s3:
|
|
||||||
filename: /artifacts/s3.log
|
|
||||||
follow: true
|
|
||||||
- ShellCommand: &follow-s3-ceph-logs
|
|
||||||
logfiles:
|
|
||||||
ceph:
|
|
||||||
filename: /artifacts/ceph.log
|
|
||||||
follow: true
|
|
||||||
s3:
|
|
||||||
filename: /artifacts/s3.log
|
|
||||||
follow: true
|
|
||||||
- ShellCommand: &add-hostname
|
|
||||||
name: add hostname
|
|
||||||
command: |
|
|
||||||
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
|
|
||||||
echo \
|
|
||||||
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
|
|
||||||
>> /etc/hosts
|
|
||||||
haltOnFailure: true
|
|
||||||
- ShellCommand: &setup-junit-upload
|
|
||||||
name: preparing junit files for upload
|
|
||||||
command: |
|
|
||||||
mkdir -p artifacts/junit
|
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
|
||||||
alwaysRun: true
|
|
||||||
- Upload: &upload-junits
|
|
||||||
source: artifacts
|
|
||||||
urls:
|
|
||||||
- "*"
|
|
||||||
alwaysRun: true
|
|
||||||
- env: &docker_env
|
|
||||||
DEVELOPMENT_DOCKER_IMAGE_NAME: >-
|
|
||||||
registry.scality.com/%(prop:git_slug)s-dev/%(prop:git_slug)s
|
|
||||||
PRODUCTION_DOCKER_IMAGE_NAME: >-
|
|
||||||
registry.scality.com/%(prop:git_slug)s/%(prop:git_slug)s
|
|
||||||
- ShellCommand: &docker_login
|
|
||||||
name: Login to docker registry
|
|
||||||
command: >
|
|
||||||
docker login
|
|
||||||
-u "${HARBOR_LOGIN}"
|
|
||||||
-p "${HARBOR_PASSWORD}"
|
|
||||||
registry.scality.com
|
|
||||||
usePTY: true
|
|
||||||
env:
|
|
||||||
HARBOR_LOGIN: '%(secret:harbor_login)s'
|
|
||||||
HARBOR_PASSWORD: '%(secret:harbor_password)s'
|
|
||||||
- ShellCommand: &wait_docker_daemon
|
|
||||||
name: Wait for Docker daemon to be ready
|
|
||||||
command: |
|
|
||||||
bash -c '
|
|
||||||
for i in {1..150}
|
|
||||||
do
|
|
||||||
docker info &> /dev/null && exit
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "Could not reach Docker daemon from buildbot worker" >&2
|
|
||||||
exit 1'
|
|
||||||
haltOnFailure: true
|
|
||||||
|
|
||||||
|
|
||||||
stages:
|
|
||||||
pre-merge:
|
|
||||||
worker:
|
|
||||||
type: local
|
|
||||||
steps:
|
|
||||||
- TriggerStages:
|
|
||||||
name: Launch all workers
|
|
||||||
stage_names:
|
|
||||||
- docker-build
|
|
||||||
- linting-coverage
|
|
||||||
- file-ft-tests
|
|
||||||
- multiple-backend-test
|
|
||||||
- mongo-v0-ft-tests
|
|
||||||
- mongo-v1-ft-tests
|
|
||||||
- ceph-backend-tests
|
|
||||||
- kmip-ft-tests
|
|
||||||
- utapi-v2-tests
|
|
||||||
waitForFinish: true
|
|
||||||
haltOnFailure: true
|
|
||||||
|
|
||||||
linting-coverage:
|
|
||||||
worker:
|
|
||||||
type: docker
|
|
||||||
path: eve/workers/build
|
|
||||||
volumes: &default_volumes
|
|
||||||
- '/home/eve/workspace'
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand: *add-hostname
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand:
|
|
||||||
name: Unit Coverage mandatory file
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
test -f .git/HEAD
|
|
||||||
- ShellCommand:
|
|
||||||
name: Linting
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
yarn run --silent lint -- --max-warnings 0
|
|
||||||
yarn run --silent lint_md
|
|
||||||
flake8 $(git ls-files "*.py")
|
|
||||||
yamllint -c yamllint.yml $(git ls-files "*.yml")
|
|
||||||
- ShellCommand:
|
|
||||||
name: Unit Coverage
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
unset HTTP_PROXY HTTPS_PROXY NO_PROXY
|
|
||||||
unset http_proxy https_proxy no_proxy
|
|
||||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
|
||||||
yarn test
|
|
||||||
yarn run test_versionid_base62
|
|
||||||
yarn run test_legacy_location
|
|
||||||
env: &shared-vars
|
|
||||||
<<: *global-env
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
CIRCLE_TEST_REPORTS: /tmp
|
|
||||||
CIRCLE_ARTIFACTS: /tmp
|
|
||||||
CI_REPORTS: /tmp
|
|
||||||
- ShellCommand:
|
|
||||||
name: Unit Coverage logs
|
|
||||||
command: find /tmp/unit -exec cat {} \;
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
multiple-backend-test:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2560Mi"
|
|
||||||
s3Mem: "2560Mi"
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
<<: *global-env
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
bash -c "
|
|
||||||
source /root/.aws/exports &> /dev/null
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run multiple_backend_test
|
|
||||||
yarn run ft_awssdk_external_backends"
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
<<: *global-env
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- ShellCommand:
|
|
||||||
command: mvn test
|
|
||||||
workdir: build/tests/functional/jaws
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
- ShellCommand:
|
|
||||||
command: rspec tests.rb
|
|
||||||
workdir: build/tests/functional/fog
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
- ShellCommand: *check-s3-action-logs
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
ceph-backend-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
ceph: eve/workers/ceph
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2500Mi"
|
|
||||||
s3Mem: "2560Mi"
|
|
||||||
redis: enabled
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
<<: *global-env
|
|
||||||
S3METADATA: mongodb
|
|
||||||
CI_CEPH: "true"
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
bash -c "
|
|
||||||
source /root/.aws/exports &> /dev/null
|
|
||||||
set -ex
|
|
||||||
bash eve/workers/ceph/wait_for_ceph.sh
|
|
||||||
bash wait_for_local_port.bash 27018 40
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run multiple_backend_test"
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
<<: *global-env
|
|
||||||
S3METADATA: mem
|
|
||||||
<<: *follow-s3-ceph-logs
|
|
||||||
- ShellCommand:
|
|
||||||
command: mvn test
|
|
||||||
workdir: build/tests/functional/jaws
|
|
||||||
<<: *follow-s3-ceph-logs
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
- ShellCommand:
|
|
||||||
command: rspec tests.rb
|
|
||||||
workdir: build/tests/functional/fog
|
|
||||||
<<: *follow-s3-ceph-logs
|
|
||||||
env:
|
|
||||||
<<: *multiple-backend-vars
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
yarn run ft_awssdk &&
|
|
||||||
yarn run ft_s3cmd
|
|
||||||
env:
|
|
||||||
<<: *file-mem-mpu
|
|
||||||
<<: *global-env
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3_LOCATION_FILE: "/kube_pod-prod-cloudserver-backend-0/\
|
|
||||||
build/tests/locationConfig/locationConfigCeph.json"
|
|
||||||
<<: *follow-s3-ceph-logs
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
mongo-v0-ft-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2Gi"
|
|
||||||
s3Mem: "1664Mi"
|
|
||||||
redis: enabled
|
|
||||||
env:
|
|
||||||
<<: *mongo-vars
|
|
||||||
<<: *global-env
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: "v0"
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *mongo-vars
|
|
||||||
<<: *global-env
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: "v0"
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
mongo-v1-ft-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2Gi"
|
|
||||||
s3Mem: "1664Mi"
|
|
||||||
redis: enabled
|
|
||||||
env:
|
|
||||||
<<: *mongo-vars
|
|
||||||
<<: *global-env
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: "v1"
|
|
||||||
METADATA_MAX_CACHED_BUCKETS: "1"
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test
|
|
||||||
yarn run ft_mixed_bucket_format_version
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *mongo-vars
|
|
||||||
<<: *global-env
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: "v1"
|
|
||||||
METADATA_MAX_CACHED_BUCKETS: "1"
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
file-ft-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
vars:
|
|
||||||
aggressorMem: "3Gi"
|
|
||||||
s3Mem: "2560Mi"
|
|
||||||
redis: enabled
|
|
||||||
env:
|
|
||||||
<<: *file-mem-mpu
|
|
||||||
<<: *global-env
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
<<: *file-mem-mpu
|
|
||||||
<<: *global-env
|
|
||||||
- ShellCommand: *check-s3-action-logs
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
kmip-ft-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
pykmip: eve/workers/pykmip
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2Gi"
|
|
||||||
s3Mem: "1664Mi"
|
|
||||||
redis: enabled
|
|
||||||
pykmip: enabled
|
|
||||||
env:
|
|
||||||
<<: *file-mem-mpu
|
|
||||||
<<: *global-env
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
bash wait_for_local_port.bash 5696 40
|
|
||||||
yarn run ft_kmip
|
|
||||||
logfiles:
|
|
||||||
pykmip:
|
|
||||||
filename: /artifacts/pykmip.log
|
|
||||||
follow: true
|
|
||||||
s3:
|
|
||||||
filename: /artifacts/s3.log
|
|
||||||
follow: true
|
|
||||||
env:
|
|
||||||
<<: *file-mem-mpu
|
|
||||||
<<: *global-env
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
utapi-v2-tests:
|
|
||||||
worker:
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/pod.yaml
|
|
||||||
images:
|
|
||||||
aggressor: eve/workers/build
|
|
||||||
s3: "."
|
|
||||||
vars:
|
|
||||||
aggressorMem: "2Gi"
|
|
||||||
s3Mem: "2Gi"
|
|
||||||
env:
|
|
||||||
ENABLE_UTAPI_V2: t
|
|
||||||
S3BACKEND: mem
|
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *credentials
|
|
||||||
- ShellCommand: *yarn-install
|
|
||||||
- ShellCommand:
|
|
||||||
command: |
|
|
||||||
bash -c "
|
|
||||||
source /root/.aws/exports &> /dev/null
|
|
||||||
set -ex
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_utapi_v2"
|
|
||||||
<<: *follow-s3-log
|
|
||||||
env:
|
|
||||||
ENABLE_UTAPI_V2: t
|
|
||||||
S3BACKEND: mem
|
|
||||||
- ShellCommand: *check-s3-action-logs
|
|
||||||
- ShellCommand: *setup-junit-upload
|
|
||||||
- Upload: *upload-artifacts
|
|
||||||
- Upload: *upload-junits
|
|
||||||
|
|
||||||
# The docker-build stage ensures that your images are built on every commit
|
|
||||||
# and also hosted on the registry to help you pull it up and
|
|
||||||
# test it in a real environment if needed.
|
|
||||||
# It also allows us to pull and rename it when performing a release.
|
|
||||||
docker-build:
|
|
||||||
worker: &docker_worker
|
|
||||||
type: kube_pod
|
|
||||||
path: eve/workers/docker/pod.yaml
|
|
||||||
images:
|
|
||||||
worker: eve/workers/docker
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *wait_docker_daemon
|
|
||||||
- ShellCommand: *docker_login
|
|
||||||
- ShellCommand:
|
|
||||||
name: docker build
|
|
||||||
command: >-
|
|
||||||
docker build .
|
|
||||||
--tag=${DEVELOPMENT_DOCKER_IMAGE_NAME}:%(prop:commit_short_revision)s
|
|
||||||
env: *docker_env
|
|
||||||
haltOnFailure: true
|
|
||||||
- ShellCommand:
|
|
||||||
name: push docker image into the development namespace
|
|
||||||
command: docker push ${DEVELOPMENT_DOCKER_IMAGE_NAME}
|
|
||||||
haltOnFailure: true
|
|
||||||
env: *docker_env
|
|
||||||
- ShellCommand: &oras_login
|
|
||||||
name: Oras login
|
|
||||||
command:
|
|
||||||
oras login --username "${HARBOR_LOGIN}" --password "${HARBOR_PASSWORD}" ${REGISTRY}
|
|
||||||
env:
|
|
||||||
<<: *oras
|
|
||||||
HARBOR_LOGIN: '%(secret:harbor_login)s'
|
|
||||||
HARBOR_PASSWORD: '%(secret:harbor_password)s'
|
|
||||||
- ShellCommand:
|
|
||||||
name: push dashboards to the development namespace
|
|
||||||
command: |
|
|
||||||
for revision in %(prop:commit_short_revision)s latest ; do
|
|
||||||
oras push ${REGISTRY}/${PROJECT}-dev/${PROJECT}-dashboards:$revision ${LAYERS}
|
|
||||||
done
|
|
||||||
env: *oras
|
|
||||||
workdir: build/monitoring/
|
|
||||||
|
|
||||||
|
|
||||||
# This stage can be used to release your Docker image.
|
|
||||||
# To use this stage:
|
|
||||||
# 1. Tag the repository
|
|
||||||
# 2. Force a build using:
|
|
||||||
# * A branch that ideally matches the tag
|
|
||||||
# * The release stage
|
|
||||||
# * An extra property with the name tag and its value being the actual tag
|
|
||||||
release:
|
|
||||||
worker:
|
|
||||||
type: local
|
|
||||||
steps:
|
|
||||||
- TriggerStages:
|
|
||||||
stage_names:
|
|
||||||
- docker-release
|
|
||||||
haltOnFailure: true
|
|
||||||
docker-release:
|
|
||||||
worker: *docker_worker
|
|
||||||
steps:
|
|
||||||
- Git: *clone
|
|
||||||
- ShellCommand: *wait_docker_daemon
|
|
||||||
- ShellCommand: *docker_login
|
|
||||||
- ShellCommand:
|
|
||||||
name: Checkout tag
|
|
||||||
command: git checkout refs/tags/%(prop:tag)s
|
|
||||||
haltOnFailure: true
|
|
||||||
- ShellCommand:
|
|
||||||
name: docker build
|
|
||||||
command: >-
|
|
||||||
docker build .
|
|
||||||
--tag=${PRODUCTION_DOCKER_IMAGE_NAME}:%(prop:tag)s
|
|
||||||
env: *docker_env
|
|
||||||
- ShellCommand:
|
|
||||||
name: publish docker image to Scality Production OCI registry
|
|
||||||
command: docker push ${PRODUCTION_DOCKER_IMAGE_NAME}:%(prop:tag)s
|
|
||||||
env: *docker_env
|
|
||||||
- ShellCommand: *oras_login
|
|
||||||
- ShellCommand:
|
|
||||||
name: push dashboards to the production namespace
|
|
||||||
command: |
|
|
||||||
oras push ${REGISTRY}/${PROJECT}/${PROJECT}-dashboards:%(prop:tag)s ${LAYERS}
|
|
||||||
env: *oras
|
|
||||||
workdir: build/monitoring/
|
|
|
@ -1,62 +0,0 @@
|
||||||
FROM buildpack-deps:bionic-curl
|
|
||||||
|
|
||||||
#
|
|
||||||
# Install packages needed by the buildchain
|
|
||||||
#
|
|
||||||
ENV LANG C.UTF-8
|
|
||||||
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
|
|
||||||
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
|
|
||||||
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y yarn \
|
|
||||||
&& cat /tmp/*packages.list | xargs apt-get install -y \
|
|
||||||
&& update-ca-certificates \
|
|
||||||
&& git clone https://github.com/tj/n.git \
|
|
||||||
&& make -C ./n \
|
|
||||||
&& n 16.13.2 \
|
|
||||||
&& pip install pip==9.0.1 \
|
|
||||||
&& rm -rf ./n \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& rm -f /tmp/packages.list
|
|
||||||
|
|
||||||
#
|
|
||||||
# Add user eve
|
|
||||||
#
|
|
||||||
|
|
||||||
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
|
|
||||||
&& adduser eve sudo \
|
|
||||||
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
|
|
||||||
#
|
|
||||||
# Install Dependencies
|
|
||||||
#
|
|
||||||
|
|
||||||
# Install RVM and gems
|
|
||||||
ENV RUBY_VERSION="2.5.0"
|
|
||||||
RUN gem update --system
|
|
||||||
|
|
||||||
RUN gpg2 --keyserver hkp://pgp.mit.edu --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB \
|
|
||||||
&& curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
|
|
||||||
&& usermod -a -G rvm eve
|
|
||||||
|
|
||||||
COPY ./gems.list /tmp/
|
|
||||||
|
|
||||||
RUN /bin/bash -l -c "\
|
|
||||||
source /usr/local/rvm/scripts/rvm \
|
|
||||||
&& cat /tmp/gems.list | xargs gem install \
|
|
||||||
&& rm /tmp/gems.list"
|
|
||||||
|
|
||||||
# Install Pip packages
|
|
||||||
COPY ./pip_packages.list /tmp/
|
|
||||||
RUN cat /tmp/pip_packages.list | xargs pip install \
|
|
||||||
&& rm -f /tmp/pip_packages.list \
|
|
||||||
&& mkdir /home/eve/.aws \
|
|
||||||
&& chown eve /home/eve/.aws
|
|
||||||
|
|
||||||
#
|
|
||||||
# Run buildbot-worker on startup
|
|
||||||
#
|
|
||||||
|
|
||||||
ARG BUILDBOT_VERSION
|
|
||||||
RUN pip install buildbot-worker==$BUILDBOT_VERSION
|
|
||||||
|
|
||||||
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]
|
|
|
@ -1,13 +0,0 @@
|
||||||
ca-certificates
|
|
||||||
git
|
|
||||||
gnupg
|
|
||||||
libffi-dev
|
|
||||||
libssl-dev
|
|
||||||
python-pip
|
|
||||||
python2.7
|
|
||||||
python2.7-dev
|
|
||||||
software-properties-common
|
|
||||||
sudo
|
|
||||||
tcl
|
|
||||||
wget
|
|
||||||
procps
|
|
|
@ -1,5 +0,0 @@
|
||||||
nokogiri:1.12.5
|
|
||||||
fog-aws:1.3.0
|
|
||||||
json
|
|
||||||
mime-types:3.1
|
|
||||||
rspec:3.5
|
|
|
@ -1,3 +0,0 @@
|
||||||
flake8
|
|
||||||
s3cmd==1.6.1
|
|
||||||
yamllint
|
|
|
@ -1,15 +0,0 @@
|
||||||
build-essential
|
|
||||||
ca-certificates
|
|
||||||
curl
|
|
||||||
default-jdk
|
|
||||||
gnupg2
|
|
||||||
libdigest-hmac-perl
|
|
||||||
lsof
|
|
||||||
maven
|
|
||||||
netcat
|
|
||||||
redis-server
|
|
||||||
yarn
|
|
||||||
zlib1g-dev
|
|
||||||
jq
|
|
||||||
openssl
|
|
||||||
ruby-full
|
|
|
@ -1,33 +0,0 @@
|
||||||
FROM centos:7
|
|
||||||
|
|
||||||
ARG BUILDBOT_VERSION=0.9.12
|
|
||||||
|
|
||||||
VOLUME /home/eve/workspace
|
|
||||||
|
|
||||||
WORKDIR /home/eve/workspace
|
|
||||||
|
|
||||||
RUN yum install -y epel-release \
|
|
||||||
&& yum-config-manager \
|
|
||||||
--add-repo \
|
|
||||||
https://download.docker.com/linux/centos/docker-ce.repo \
|
|
||||||
&& yum install -y \
|
|
||||||
python-devel \
|
|
||||||
python-pip \
|
|
||||||
python36 \
|
|
||||||
python36-devel \
|
|
||||||
python36-pip \
|
|
||||||
git \
|
|
||||||
docker-ce-cli-18.09.6 \
|
|
||||||
which \
|
|
||||||
&& adduser -u 1042 --home /home/eve eve --groups docker \
|
|
||||||
&& chown -R eve:eve /home/eve \
|
|
||||||
&& pip3 install buildbot-worker==${BUILDBOT_VERSION}
|
|
||||||
|
|
||||||
|
|
||||||
ARG ORAS_VERSION=0.12.0
|
|
||||||
RUN curl -LO https://github.com/oras-project/oras/releases/download/v${ORAS_VERSION}/oras_${ORAS_VERSION}_linux_amd64.tar.gz && \
|
|
||||||
mkdir -p oras-install/ && \
|
|
||||||
tar -zxf oras_${ORAS_VERSION}_*.tar.gz -C /usr/local/bin oras && \
|
|
||||||
rm -rf oras_${ORAS_VERSION}_*.tar.gz oras-install/
|
|
||||||
|
|
||||||
CMD buildbot-worker create-worker . ${BUILDMASTER}:${BUILDMASTER_PORT} ${WORKERNAME} ${WORKERPASS} && buildbot-worker start --nodaemon
|
|
|
@ -1,43 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: worker
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: build-worker
|
|
||||||
image: "{{ images.worker }}"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "250m"
|
|
||||||
memory: 2Gi
|
|
||||||
limits:
|
|
||||||
cpu: "1"
|
|
||||||
memory: 2Gi
|
|
||||||
env:
|
|
||||||
- name: DOCKER_HOST
|
|
||||||
value: localhost:2375
|
|
||||||
volumeMounts:
|
|
||||||
- name: worker-workspace
|
|
||||||
mountPath: /home/eve/workspace
|
|
||||||
- name: dind-daemon
|
|
||||||
image: docker:18.09.6-dind
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "500m"
|
|
||||||
memory: 2Gi
|
|
||||||
limits:
|
|
||||||
cpu: "1"
|
|
||||||
memory: 2Gi
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: docker-storage
|
|
||||||
mountPath: /var/lib/docker
|
|
||||||
- name: worker-workspace
|
|
||||||
mountPath: /home/eve/workspace
|
|
||||||
volumes:
|
|
||||||
- name: docker-storage
|
|
||||||
emptyDir: {}
|
|
||||||
- name: worker-workspace
|
|
||||||
emptyDir: {}
|
|
|
@ -1,233 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: "proxy-ci-test-pod"
|
|
||||||
spec:
|
|
||||||
restartPolicy: Never
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
hostAliases:
|
|
||||||
- ip: "127.0.0.1"
|
|
||||||
hostnames:
|
|
||||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
|
|
||||||
- "testrequestbucket.localhost"
|
|
||||||
- "pykmip.local"
|
|
||||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
|
||||||
initContainers:
|
|
||||||
- name: kmip-certs-installer
|
|
||||||
image: {{ images.pykmip }}
|
|
||||||
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
|
|
||||||
volumeMounts:
|
|
||||||
- name: kmip-certs
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /ssl-kmip
|
|
||||||
{%- endif %}
|
|
||||||
containers:
|
|
||||||
{% if vars.env.S3METADATA is defined and vars.env.S3METADATA == "mongodb" -%}
|
|
||||||
- name: mongo
|
|
||||||
image: scality/ci-mongo:3.6.8
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 1Gi
|
|
||||||
limits:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 1Gi
|
|
||||||
{%- endif %}
|
|
||||||
- name: aggressor
|
|
||||||
image: {{ images.aggressor }}
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "1"
|
|
||||||
memory: {{ vars.aggressorMem }}
|
|
||||||
limits:
|
|
||||||
cpu: "1"
|
|
||||||
memory: {{ vars.aggressorMem }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: creds
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /root/.aws
|
|
||||||
- name: artifacts
|
|
||||||
readOnly: true
|
|
||||||
mountPath: /artifacts
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
|
|
||||||
buildbot-worker start --nodaemon
|
|
||||||
env:
|
|
||||||
- name: CI
|
|
||||||
value: "true"
|
|
||||||
- name: ENABLE_LOCAL_CACHE
|
|
||||||
value: "true"
|
|
||||||
- name: REPORT_TOKEN
|
|
||||||
value: "report-token-1"
|
|
||||||
- name: REMOTE_MANAGEMENT_DISABLE
|
|
||||||
value: "1"
|
|
||||||
{% for key, value in vars.env.items() %}
|
|
||||||
- name: {{ key }}
|
|
||||||
value: "{{ value }}"
|
|
||||||
{% endfor %}
|
|
||||||
- name: s3
|
|
||||||
image: {{ images.s3 }}
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "1750m"
|
|
||||||
memory: {{ vars.s3Mem }}
|
|
||||||
limits:
|
|
||||||
cpu: "1750m"
|
|
||||||
memory: {{ vars.s3Mem }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: creds
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /root/.aws
|
|
||||||
- name: certs
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /tmp
|
|
||||||
- name: artifacts
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /artifacts
|
|
||||||
- name: kmip-certs
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /ssl-kmip
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- -ec
|
|
||||||
- |
|
|
||||||
sleep 10 # wait for mongo
|
|
||||||
/usr/src/app/docker-entrypoint.sh yarn start | tee -a /artifacts/s3.log
|
|
||||||
env:
|
|
||||||
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is not defined -%}
|
|
||||||
- name: S3_LOCATION_FILE
|
|
||||||
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
|
|
||||||
{%- endif %}
|
|
||||||
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
|
|
||||||
- name: S3_LOCATION_FILE
|
|
||||||
value: "/usr/src/app/tests/locationConfig/locationConfigCeph.json"
|
|
||||||
{%- endif %}
|
|
||||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
|
||||||
- name: S3KMS
|
|
||||||
value: kmip
|
|
||||||
- name: S3KMIP_PORT
|
|
||||||
value: "5696"
|
|
||||||
- name: S3KMIP_HOSTS
|
|
||||||
value: "pykmip.local"
|
|
||||||
- name: S3KMIP_COMPOUND_CREATE
|
|
||||||
value: "false"
|
|
||||||
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
|
|
||||||
value: ''
|
|
||||||
- name: S3KMIP_PIPELINE_DEPTH
|
|
||||||
value: "8"
|
|
||||||
- name: S3KMIP_KEY
|
|
||||||
value: /ssl-kmip/kmip-client-key.pem
|
|
||||||
- name: S3KMIP_CERT
|
|
||||||
value: /ssl-kmip/kmip-client-cert.pem
|
|
||||||
- name: S3KMIP_CA
|
|
||||||
value: /ssl-kmip/kmip-ca.pem
|
|
||||||
{%- endif %}
|
|
||||||
- name: CI
|
|
||||||
value: "true"
|
|
||||||
- name: ENABLE_LOCAL_CACHE
|
|
||||||
value: "true"
|
|
||||||
- name: MONGODB_HOSTS
|
|
||||||
value: "localhost:27018"
|
|
||||||
- name: MONGODB_RS
|
|
||||||
value: "rs0"
|
|
||||||
- name: REDIS_HOST
|
|
||||||
value: "localhost"
|
|
||||||
- name: REDIS_PORT
|
|
||||||
value: "6379"
|
|
||||||
- name: REPORT_TOKEN
|
|
||||||
value: "report-token-1"
|
|
||||||
- name: REMOTE_MANAGEMENT_DISABLE
|
|
||||||
value: "1"
|
|
||||||
- name: HEALTHCHECKS_ALLOWFROM
|
|
||||||
value: "0.0.0.0/0"
|
|
||||||
{% for key, value in vars.env.items() %}
|
|
||||||
- name: {{ key }}
|
|
||||||
value: "{{ value }}"
|
|
||||||
{% endfor %}
|
|
||||||
{% if vars.redis is defined and vars.redis == "enabled" -%}
|
|
||||||
- name: redis
|
|
||||||
image: redis:alpine
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 128Mi
|
|
||||||
{%- endif %}
|
|
||||||
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
|
|
||||||
- name: squid
|
|
||||||
image: scality/ci-squid
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 250m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 250m
|
|
||||||
memory: 128Mi
|
|
||||||
volumeMounts:
|
|
||||||
- name: certs
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /ssl
|
|
||||||
command:
|
|
||||||
- sh
|
|
||||||
- -exc
|
|
||||||
- |
|
|
||||||
mkdir -p /ssl
|
|
||||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
|
||||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
|
||||||
-keyout /ssl/myca.pem -out /ssl/myca.pem
|
|
||||||
cp /ssl/myca.pem /ssl/CA.pem
|
|
||||||
squid -f /etc/squid/squid.conf -N -z
|
|
||||||
squid -f /etc/squid/squid.conf -NYCd 1
|
|
||||||
{%- endif %}
|
|
||||||
{% if vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
|
|
||||||
- name: ceph
|
|
||||||
image: {{ images.ceph }}
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 1536Mi
|
|
||||||
limits:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 1536Mi
|
|
||||||
volumeMounts:
|
|
||||||
- name: artifacts
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /artifacts
|
|
||||||
{%- endif %}
|
|
||||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
|
||||||
- name: pykmip
|
|
||||||
image: {{ images.pykmip }}
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
volumeMounts:
|
|
||||||
- name: artifacts
|
|
||||||
readOnly: false
|
|
||||||
mountPath: /artifacts
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 128Mi
|
|
||||||
{%- endif %}
|
|
||||||
volumes:
|
|
||||||
- name: creds
|
|
||||||
emptyDir: {}
|
|
||||||
- name: certs
|
|
||||||
emptyDir: {}
|
|
||||||
- name: artifacts
|
|
||||||
emptyDir: {}
|
|
||||||
- name: kmip-certs
|
|
||||||
emptyDir: {}
|
|
|
@ -48,7 +48,7 @@ signed_headers = 'host;x-amz-content-sha256;x-amz-date'
|
||||||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
||||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
||||||
signed_headers, payload_hash)
|
signed_headers, payload_hash)
|
||||||
print canonical_request
|
print(canonical_request)
|
||||||
|
|
||||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
||||||
.format(date_stamp, region, service)
|
.format(date_stamp, region, service)
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
||||||
|
|
||||||
|
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||||
|
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||||
|
|
||||||
|
COPY . ${HOME_DIR}/s3
|
||||||
|
RUN chown -R ${USER} ${HOME_DIR}
|
||||||
|
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
|
||||||
|
apt-get install -y git-lfs
|
||||||
|
|
||||||
|
USER ${USER}
|
||||||
|
WORKDIR ${HOME_DIR}/s3
|
||||||
|
RUN rm -f ~/.gitconfig && \
|
||||||
|
git config --global --add safe.directory . && \
|
||||||
|
git lfs install && \
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||||
|
yarn global add typescript && \
|
||||||
|
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||||
|
yarn cache clean --all && \
|
||||||
|
yarn global remove typescript
|
||||||
|
|
||||||
|
# run symlinking separately to avoid yarn installation errors
|
||||||
|
# we might have to check if the symlinking is really needed!
|
||||||
|
RUN ln -sf /scality-kms node_modules
|
||||||
|
|
||||||
|
EXPOSE 8000
|
||||||
|
|
||||||
|
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"
|
7
index.js
7
index.js
|
@ -1,3 +1,10 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
||||||
|
undefined,
|
||||||
|
// Do not exit as workers have their own listener that will exit
|
||||||
|
// But primary don't have another listener
|
||||||
|
require('cluster').isPrimary ? 1 : null,
|
||||||
|
);
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
637
lib/Config.js
637
lib/Config.js
|
@ -8,18 +8,18 @@ const crypto = require('crypto');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const cronParser = require('cron-parser');
|
const cronParser = require('cron-parser');
|
||||||
const joi = require('@hapi/joi');
|
const joi = require('@hapi/joi');
|
||||||
|
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
|
||||||
const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
|
const { isValidBucketName } = s3routes.routesUtils;
|
||||||
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
|
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
|
||||||
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
||||||
const validExternalBackends = require('../constants').externalBackends;
|
const validExternalBackends = require('../constants').externalBackends;
|
||||||
const { azureAccountNameRegex, base64Regex,
|
const { azureAccountNameRegex, base64Regex,
|
||||||
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
||||||
} = require('../constants');
|
} = require('../constants');
|
||||||
const { utapiVersion } = require('utapi');
|
const { utapiVersion } = require('utapi');
|
||||||
const { versioning } = require('arsenal');
|
const { scaleMsPerDay } = s3middleware.objectUtils;
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const constants = require('../constants');
|
||||||
|
|
||||||
// config paths
|
// config paths
|
||||||
const configSearchPaths = [
|
const configSearchPaths = [
|
||||||
|
@ -107,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
|
||||||
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function parseRedisConfig(redisConfig) {
|
||||||
|
const joiSchema = joi.object({
|
||||||
|
password: joi.string().allow(''),
|
||||||
|
host: joi.string(),
|
||||||
|
port: joi.number(),
|
||||||
|
retry: joi.object({
|
||||||
|
connectBackoff: joi.object({
|
||||||
|
min: joi.number().required(),
|
||||||
|
max: joi.number().required(),
|
||||||
|
jitter: joi.number().required(),
|
||||||
|
factor: joi.number().required(),
|
||||||
|
deadline: joi.number().required(),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
// sentinel config
|
||||||
|
sentinels: joi.alternatives().try(
|
||||||
|
joi.string()
|
||||||
|
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
|
||||||
|
.custom(hosts => hosts.split(',').map(item => {
|
||||||
|
const [host, port] = item.split(':');
|
||||||
|
return { host, port: Number.parseInt(port, 10) };
|
||||||
|
})),
|
||||||
|
joi.array().items(
|
||||||
|
joi.object({
|
||||||
|
host: joi.string().required(),
|
||||||
|
port: joi.number().required(),
|
||||||
|
})
|
||||||
|
).min(1),
|
||||||
|
),
|
||||||
|
name: joi.string(),
|
||||||
|
sentinelPassword: joi.string().allow(''),
|
||||||
|
})
|
||||||
|
.and('host', 'port')
|
||||||
|
.and('sentinels', 'name')
|
||||||
|
.xor('host', 'sentinels')
|
||||||
|
.without('sentinels', ['host', 'port'])
|
||||||
|
.without('host', ['sentinels', 'sentinelPassword']);
|
||||||
|
|
||||||
|
return joi.attempt(redisConfig, joiSchema, 'bad config');
|
||||||
|
}
|
||||||
|
|
||||||
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
assert(typeof restEndpoints === 'object',
|
assert(typeof restEndpoints === 'object',
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
@ -137,26 +178,71 @@ function gcpLocationConstraintAssert(location, locationObj) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function azureLocationConstraintAssert(location, locationObj) {
|
function azureGetStorageAccountName(location, locationDetails) {
|
||||||
const {
|
const { azureStorageAccountName } = locationDetails;
|
||||||
azureStorageEndpoint,
|
|
||||||
azureStorageAccountName,
|
|
||||||
azureStorageAccessKey,
|
|
||||||
azureContainerName,
|
|
||||||
} = locationObj.details;
|
|
||||||
const storageEndpointFromEnv =
|
|
||||||
process.env[`${location}_AZURE_STORAGE_ENDPOINT`];
|
|
||||||
const storageAccountNameFromEnv =
|
const storageAccountNameFromEnv =
|
||||||
process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`];
|
process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`];
|
||||||
const storageAccessKeyFromEnv =
|
return storageAccountNameFromEnv || azureStorageAccountName;
|
||||||
process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`];
|
}
|
||||||
const locationParams = {
|
|
||||||
azureStorageEndpoint: storageEndpointFromEnv || azureStorageEndpoint,
|
function azureGetLocationCredentials(location, locationDetails) {
|
||||||
azureStorageAccountName:
|
const storageAccessKey =
|
||||||
storageAccountNameFromEnv || azureStorageAccountName,
|
process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`] ||
|
||||||
azureStorageAccessKey: storageAccessKeyFromEnv || azureStorageAccessKey,
|
locationDetails.azureStorageAccessKey;
|
||||||
azureContainerName,
|
const sasToken =
|
||||||
|
process.env[`${location}_AZURE_SAS_TOKEN`] ||
|
||||||
|
locationDetails.sasToken;
|
||||||
|
const clientKey =
|
||||||
|
process.env[`${location}_AZURE_CLIENT_KEY`] ||
|
||||||
|
locationDetails.clientKey;
|
||||||
|
|
||||||
|
const authMethod =
|
||||||
|
process.env[`${location}_AZURE_AUTH_METHOD`] ||
|
||||||
|
locationDetails.authMethod ||
|
||||||
|
(storageAccessKey && 'shared-key') ||
|
||||||
|
(sasToken && 'shared-access-signature') ||
|
||||||
|
(clientKey && 'client-secret') ||
|
||||||
|
'shared-key';
|
||||||
|
|
||||||
|
switch (authMethod) {
|
||||||
|
case 'shared-key':
|
||||||
|
default:
|
||||||
|
return {
|
||||||
|
authMethod,
|
||||||
|
storageAccountName:
|
||||||
|
azureGetStorageAccountName(location, locationDetails),
|
||||||
|
storageAccessKey,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
case 'shared-access-signature':
|
||||||
|
return {
|
||||||
|
authMethod,
|
||||||
|
sasToken,
|
||||||
|
};
|
||||||
|
|
||||||
|
case 'client-secret':
|
||||||
|
return {
|
||||||
|
authMethod,
|
||||||
|
tenantId:
|
||||||
|
process.env[`${location}_AZURE_TENANT_ID`] ||
|
||||||
|
locationDetails.tenantId,
|
||||||
|
clientId:
|
||||||
|
process.env[`${location}_AZURE_CLIENT_ID`] ||
|
||||||
|
locationDetails.clientId,
|
||||||
|
clientKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function azureLocationConstraintAssert(location, locationObj) {
|
||||||
|
const locationParams = {
|
||||||
|
...azureGetLocationCredentials(location, locationObj.details),
|
||||||
|
azureStorageEndpoint:
|
||||||
|
process.env[`${location}_AZURE_STORAGE_ENDPOINT`] ||
|
||||||
|
locationObj.details.azureStorageEndpoint,
|
||||||
|
azureContainerName: locationObj.details.azureContainerName,
|
||||||
|
};
|
||||||
|
|
||||||
Object.keys(locationParams).forEach(param => {
|
Object.keys(locationParams).forEach(param => {
|
||||||
const value = locationParams[param];
|
const value = locationParams[param];
|
||||||
assert.notEqual(value, undefined,
|
assert.notEqual(value, undefined,
|
||||||
|
@ -166,13 +252,16 @@ function azureLocationConstraintAssert(location, locationObj) {
|
||||||
`bad location constraint: "${location}" ${param} ` +
|
`bad location constraint: "${location}" ${param} ` +
|
||||||
`"${value}" must be a string`);
|
`"${value}" must be a string`);
|
||||||
});
|
});
|
||||||
assert(azureAccountNameRegex.test(locationParams.azureStorageAccountName),
|
|
||||||
|
if (locationParams.authMethod === 'shared-key') {
|
||||||
|
assert(azureAccountNameRegex.test(locationParams.storageAccountName),
|
||||||
`bad location constraint: "${location}" azureStorageAccountName ` +
|
`bad location constraint: "${location}" azureStorageAccountName ` +
|
||||||
`"${locationParams.storageAccountName}" is an invalid value`);
|
`"${locationParams.storageAccountName}" is an invalid value`);
|
||||||
assert(base64Regex.test(locationParams.azureStorageAccessKey),
|
assert(base64Regex.test(locationParams.storageAccessKey),
|
||||||
`bad location constraint: "${location}" ` +
|
`bad location constraint: "${location}" ` +
|
||||||
'azureStorageAccessKey is not a valid base64 string');
|
'azureStorageAccessKey is not a valid base64 string');
|
||||||
assert(isValidBucketName(azureContainerName, []),
|
}
|
||||||
|
assert(isValidBucketName(locationParams.azureContainerName, []),
|
||||||
`bad location constraint: "${location}" ` +
|
`bad location constraint: "${location}" ` +
|
||||||
'azureContainerName is an invalid container name');
|
'azureContainerName is an invalid container name');
|
||||||
}
|
}
|
||||||
|
@ -191,10 +280,104 @@ function hdClientLocationConstraintAssert(configHd) {
|
||||||
return hdclientFields;
|
return hdclientFields;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function azureArchiveLocationConstraintAssert(locationObj) {
|
||||||
|
const checkedFields = [
|
||||||
|
'azureContainerName',
|
||||||
|
'azureStorageEndpoint',
|
||||||
|
];
|
||||||
|
if (Object.keys(locationObj.details).length === 0 ||
|
||||||
|
!checkedFields.every(field => field in locationObj.details)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const {
|
||||||
|
azureContainerName,
|
||||||
|
azureStorageEndpoint,
|
||||||
|
} = locationObj.details;
|
||||||
|
const stringFields = [
|
||||||
|
azureContainerName,
|
||||||
|
azureStorageEndpoint,
|
||||||
|
];
|
||||||
|
stringFields.forEach(field => {
|
||||||
|
assert(typeof field === 'string',
|
||||||
|
`bad config: ${field} must be a string`);
|
||||||
|
});
|
||||||
|
|
||||||
|
let hasAuthMethod = false;
|
||||||
|
if (locationObj.details.sasToken !== undefined) {
|
||||||
|
assert(typeof locationObj.details.sasToken === 'string',
|
||||||
|
`bad config: ${locationObj.details.sasToken} must be a string`);
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (locationObj.details.azureStorageAccountName !== undefined &&
|
||||||
|
locationObj.details.azureStorageAccessKey !== undefined) {
|
||||||
|
assert(typeof locationObj.details.azureStorageAccountName === 'string',
|
||||||
|
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
|
||||||
|
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
|
||||||
|
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
|
||||||
|
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (locationObj.details.tenantId !== undefined &&
|
||||||
|
locationObj.details.clientId !== undefined &&
|
||||||
|
locationObj.details.clientKey !== undefined) {
|
||||||
|
assert(typeof locationObj.details.tenantId === 'string',
|
||||||
|
`bad config: ${locationObj.details.tenantId} must be a string`);
|
||||||
|
assert(typeof locationObj.details.clientId === 'string',
|
||||||
|
`bad config: ${locationObj.details.clientId} must be a string`);
|
||||||
|
assert(typeof locationObj.details.clientKey === 'string',
|
||||||
|
`bad config: ${locationObj.details.clientKey} must be a string`);
|
||||||
|
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
||||||
|
hasAuthMethod = true;
|
||||||
|
}
|
||||||
|
assert(hasAuthMethod, 'Missing authentication method');
|
||||||
|
}
|
||||||
|
|
||||||
|
function dmfLocationConstraintAssert(locationObj) {
|
||||||
|
const checkedFields = [
|
||||||
|
'endpoint',
|
||||||
|
'username',
|
||||||
|
'password',
|
||||||
|
'repoId',
|
||||||
|
'nsId',
|
||||||
|
];
|
||||||
|
if (Object.keys(locationObj.details).length === 0 ||
|
||||||
|
!checkedFields.every(field => field in locationObj.details)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const {
|
||||||
|
endpoint,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
repoId,
|
||||||
|
nsId,
|
||||||
|
} = locationObj.details;
|
||||||
|
const stringFields = [
|
||||||
|
endpoint,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
nsId,
|
||||||
|
];
|
||||||
|
stringFields.forEach(field => {
|
||||||
|
assert(typeof field === 'string',
|
||||||
|
`bad config: ${field} must be a string`);
|
||||||
|
});
|
||||||
|
assert.strictEqual(Array.isArray(repoId), true);
|
||||||
|
repoId.forEach(rId => {
|
||||||
|
assert(typeof rId === 'string',
|
||||||
|
`bad config: ${rId} must be a string`);
|
||||||
|
});
|
||||||
|
assert(repoId.every(
|
||||||
|
r => typeof r === 'string'),
|
||||||
|
'bad config: each repoId must be a string',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
function locationConstraintAssert(locationConstraints) {
|
function locationConstraintAssert(locationConstraints) {
|
||||||
const supportedBackends =
|
const supportedBackends =
|
||||||
['mem', 'file', 'scality',
|
['mem', 'file', 'scality',
|
||||||
'mongodb'].concat(Object.keys(validExternalBackends));
|
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
|
||||||
assert(typeof locationConstraints === 'object',
|
assert(typeof locationConstraints === 'object',
|
||||||
'bad config: locationConstraints must be an object');
|
'bad config: locationConstraints must be an object');
|
||||||
Object.keys(locationConstraints).forEach(l => {
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
@ -302,6 +485,12 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
if (locationConstraints[l].type === 'gcp') {
|
if (locationConstraints[l].type === 'gcp') {
|
||||||
gcpLocationConstraintAssert(l, locationConstraints[l]);
|
gcpLocationConstraintAssert(l, locationConstraints[l]);
|
||||||
}
|
}
|
||||||
|
if (locationConstraints[l].type === 'dmf') {
|
||||||
|
dmfLocationConstraintAssert(locationConstraints[l]);
|
||||||
|
}
|
||||||
|
if (locationConstraints[l].type === 'azure_archive') {
|
||||||
|
azureArchiveLocationConstraintAssert(locationConstraints[l]);
|
||||||
|
}
|
||||||
if (locationConstraints[l].type === 'pfs') {
|
if (locationConstraints[l].type === 'pfs') {
|
||||||
assert(typeof details.pfsDaemonEndpoint === 'object',
|
assert(typeof details.pfsDaemonEndpoint === 'object',
|
||||||
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
||||||
|
@ -313,26 +502,33 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
locationConstraints[l].details.connector.hdclient);
|
locationConstraints[l].details.connector.hdclient);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
assert(Object.keys(locationConstraints)
|
|
||||||
.includes('us-east-1'), 'bad locationConfig: must ' +
|
|
||||||
'include us-east-1 as a locationConstraint');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
|
function parseUtapiReindex(config) {
|
||||||
|
const {
|
||||||
|
enabled,
|
||||||
|
schedule,
|
||||||
|
redis,
|
||||||
|
bucketd,
|
||||||
|
onlyCountLatestWhenObjectLocked,
|
||||||
|
} = config;
|
||||||
assert(typeof enabled === 'boolean',
|
assert(typeof enabled === 'boolean',
|
||||||
'bad config: utapi.reindex.enabled must be a boolean');
|
'bad config: utapi.reindex.enabled must be a boolean');
|
||||||
assert(typeof sentinel === 'object',
|
|
||||||
'bad config: utapi.reindex.sentinel must be an object');
|
const parsedRedis = parseRedisConfig(redis);
|
||||||
assert(typeof sentinel.port === 'number',
|
assert(Array.isArray(parsedRedis.sentinels),
|
||||||
'bad config: utapi.reindex.sentinel.port must be a number');
|
'bad config: utapi reindex redis config requires a list of sentinels');
|
||||||
assert(typeof sentinel.name === 'string',
|
|
||||||
'bad config: utapi.reindex.sentinel.name must be a string');
|
|
||||||
assert(typeof bucketd === 'object',
|
assert(typeof bucketd === 'object',
|
||||||
'bad config: utapi.reindex.bucketd must be an object');
|
'bad config: utapi.reindex.bucketd must be an object');
|
||||||
assert(typeof bucketd.port === 'number',
|
assert(typeof bucketd.port === 'number',
|
||||||
'bad config: utapi.reindex.bucketd.port must be a number');
|
'bad config: utapi.reindex.bucketd.port must be a number');
|
||||||
assert(typeof schedule === 'string',
|
assert(typeof schedule === 'string',
|
||||||
'bad config: utapi.reindex.schedule must be a string');
|
'bad config: utapi.reindex.schedule must be a string');
|
||||||
|
if (onlyCountLatestWhenObjectLocked !== undefined) {
|
||||||
|
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
|
||||||
|
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
cronParser.parseExpression(schedule);
|
cronParser.parseExpression(schedule);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
|
@ -340,6 +536,13 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
|
||||||
'bad config: utapi.reindex.schedule must be a valid ' +
|
'bad config: utapi.reindex.schedule must be a valid ' +
|
||||||
`cron schedule. ${e.message}.`);
|
`cron schedule. ${e.message}.`);
|
||||||
}
|
}
|
||||||
|
return {
|
||||||
|
enabled,
|
||||||
|
schedule,
|
||||||
|
redis: parsedRedis,
|
||||||
|
bucketd,
|
||||||
|
onlyCountLatestWhenObjectLocked,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
|
@ -427,7 +630,6 @@ class Config extends EventEmitter {
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getLocationConfig();
|
this._getLocationConfig();
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
this._configureBackends();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_getLocationConfig() {
|
_getLocationConfig() {
|
||||||
|
@ -560,12 +762,6 @@ class Config extends EventEmitter {
|
||||||
const { replicationEndpoints } = config;
|
const { replicationEndpoints } = config;
|
||||||
assert(replicationEndpoints instanceof Array, 'bad config: ' +
|
assert(replicationEndpoints instanceof Array, 'bad config: ' +
|
||||||
'`replicationEndpoints` property must be an array');
|
'`replicationEndpoints` property must be an array');
|
||||||
if (replicationEndpoints.length > 1) {
|
|
||||||
const hasDefault = replicationEndpoints.some(
|
|
||||||
replicationEndpoint => replicationEndpoint.default);
|
|
||||||
assert(hasDefault, 'bad config: `replicationEndpoints` must ' +
|
|
||||||
'contain a default endpoint');
|
|
||||||
}
|
|
||||||
replicationEndpoints.forEach(replicationEndpoint => {
|
replicationEndpoints.forEach(replicationEndpoint => {
|
||||||
assert.strictEqual(typeof replicationEndpoint, 'object',
|
assert.strictEqual(typeof replicationEndpoint, 'object',
|
||||||
'bad config: `replicationEndpoints` property must be an ' +
|
'bad config: `replicationEndpoints` property must be an ' +
|
||||||
|
@ -583,7 +779,7 @@ class Config extends EventEmitter {
|
||||||
assert(validExternalBackends[type], 'bad config: `type` ' +
|
assert(validExternalBackends[type], 'bad config: `type` ' +
|
||||||
'property of `replicationEndpoints` object must be ' +
|
'property of `replicationEndpoints` object must be ' +
|
||||||
'a valid external backend (one of: "' +
|
'a valid external backend (one of: "' +
|
||||||
`${Object.keys(validExternalBackends).join('", "')})`);
|
`${Object.keys(validExternalBackends).join('", "')}")`);
|
||||||
} else {
|
} else {
|
||||||
assert.notStrictEqual(servers, undefined, 'bad config: ' +
|
assert.notStrictEqual(servers, undefined, 'bad config: ' +
|
||||||
'each object of `replicationEndpoints` array that is ' +
|
'each object of `replicationEndpoints` array that is ' +
|
||||||
|
@ -645,11 +841,11 @@ class Config extends EventEmitter {
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.clusters = false;
|
this.workers = false;
|
||||||
if (config.clusters !== undefined) {
|
if (config.workers !== undefined) {
|
||||||
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
assert(Number.isInteger(config.workers) && config.workers > 0,
|
||||||
'bad config: clusters must be a positive integer');
|
'bad config: workers must be a positive integer');
|
||||||
this.clusters = config.clusters;
|
this.workers = config.workers;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
|
@ -887,8 +1083,7 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.localCache.port === 'number',
|
assert(typeof config.localCache.port === 'number',
|
||||||
'config: bad port for localCache. port must be a number');
|
'config: bad port for localCache. port must be a number');
|
||||||
if (config.localCache.password !== undefined) {
|
if (config.localCache.password !== undefined) {
|
||||||
assert(
|
assert(typeof config.localCache.password === 'string',
|
||||||
this._verifyRedisPassword(config.localCache.password),
|
|
||||||
'config: vad password for localCache. password must' +
|
'config: vad password for localCache. password must' +
|
||||||
' be a string');
|
' be a string');
|
||||||
}
|
}
|
||||||
|
@ -914,56 +1109,46 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.redis) {
|
if (config.redis) {
|
||||||
if (config.redis.sentinels) {
|
this.redis = parseRedisConfig(config.redis);
|
||||||
this.redis = { sentinels: [], name: null };
|
|
||||||
|
|
||||||
assert(typeof config.redis.name === 'string',
|
|
||||||
'bad config: redis sentinel name must be a string');
|
|
||||||
this.redis.name = config.redis.name;
|
|
||||||
assert(Array.isArray(config.redis.sentinels) ||
|
|
||||||
typeof config.redis.sentinels === 'string',
|
|
||||||
'bad config: redis sentinels must be an array or string');
|
|
||||||
|
|
||||||
if (typeof config.redis.sentinels === 'string') {
|
|
||||||
config.redis.sentinels.split(',').forEach(item => {
|
|
||||||
const [host, port] = item.split(':');
|
|
||||||
this.redis.sentinels.push({ host,
|
|
||||||
port: Number.parseInt(port, 10) });
|
|
||||||
});
|
|
||||||
} else if (Array.isArray(config.redis.sentinels)) {
|
|
||||||
config.redis.sentinels.forEach(item => {
|
|
||||||
const { host, port } = item;
|
|
||||||
assert(typeof host === 'string',
|
|
||||||
'bad config: redis sentinel host must be a string');
|
|
||||||
assert(typeof port === 'number',
|
|
||||||
'bad config: redis sentinel port must be a number');
|
|
||||||
this.redis.sentinels.push({ host, port });
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
if (config.scuba) {
|
||||||
if (config.redis.sentinelPassword !== undefined) {
|
this.scuba = {};
|
||||||
assert(
|
if (config.scuba.host) {
|
||||||
this._verifyRedisPassword(config.redis.sentinelPassword));
|
assert(typeof config.scuba.host === 'string',
|
||||||
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
'bad config: scuba host must be a string');
|
||||||
|
this.scuba.host = config.scuba.host;
|
||||||
}
|
}
|
||||||
} else {
|
if (config.scuba.port) {
|
||||||
// check for standalone configuration
|
assert(Number.isInteger(config.scuba.port)
|
||||||
this.redis = {};
|
&& config.scuba.port > 0,
|
||||||
assert(typeof config.redis.host === 'string',
|
'bad config: scuba port must be a positive integer');
|
||||||
'bad config: redis.host must be a string');
|
this.scuba.port = config.scuba.port;
|
||||||
assert(typeof config.redis.port === 'number',
|
|
||||||
'bad config: redis.port must be a number');
|
|
||||||
this.redis.host = config.redis.host;
|
|
||||||
this.redis.port = config.redis.port;
|
|
||||||
}
|
|
||||||
if (config.redis.password !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(config.redis.password),
|
|
||||||
'bad config: invalid password for redis. password must ' +
|
|
||||||
'be a string');
|
|
||||||
this.redis.password = config.redis.password;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
|
||||||
|
assert(typeof process.env.SCUBA_HOST === 'string',
|
||||||
|
'bad config: scuba host must be a string');
|
||||||
|
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
|
||||||
|
&& Number(process.env.SCUBA_PORT) > 0,
|
||||||
|
'bad config: scuba port must be a positive integer');
|
||||||
|
this.scuba = {
|
||||||
|
host: process.env.SCUBA_HOST,
|
||||||
|
port: Number(process.env.SCUBA_PORT),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if (this.scuba) {
|
||||||
|
this.quotaEnabled = true;
|
||||||
|
}
|
||||||
|
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
|
||||||
|
config.quota?.maxStatenessMS ||
|
||||||
|
24 * 60 * 60 * 1000;
|
||||||
|
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
|
||||||
|
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
|
||||||
|
config.quota?.enableInflights || false;
|
||||||
|
this.quota = {
|
||||||
|
maxStaleness,
|
||||||
|
enableInflights,
|
||||||
|
};
|
||||||
if (config.utapi) {
|
if (config.utapi) {
|
||||||
this.utapi = { component: 's3' };
|
this.utapi = { component: 's3' };
|
||||||
if (config.utapi.host) {
|
if (config.utapi.host) {
|
||||||
|
@ -992,50 +1177,8 @@ class Config extends EventEmitter {
|
||||||
assert(config.redis, 'missing required property of utapi ' +
|
assert(config.redis, 'missing required property of utapi ' +
|
||||||
'configuration: redis');
|
'configuration: redis');
|
||||||
if (config.utapi.redis) {
|
if (config.utapi.redis) {
|
||||||
if (config.utapi.redis.sentinels) {
|
this.utapi.redis = parseRedisConfig(config.utapi.redis);
|
||||||
this.utapi.redis = { sentinels: [], name: null };
|
if (this.utapi.redis.retry === undefined) {
|
||||||
|
|
||||||
assert(typeof config.utapi.redis.name === 'string',
|
|
||||||
'bad config: redis sentinel name must be a string');
|
|
||||||
this.utapi.redis.name = config.utapi.redis.name;
|
|
||||||
|
|
||||||
assert(Array.isArray(config.utapi.redis.sentinels),
|
|
||||||
'bad config: redis sentinels must be an array');
|
|
||||||
config.utapi.redis.sentinels.forEach(item => {
|
|
||||||
const { host, port } = item;
|
|
||||||
assert(typeof host === 'string',
|
|
||||||
'bad config: redis sentinel host must be a string');
|
|
||||||
assert(typeof port === 'number',
|
|
||||||
'bad config: redis sentinel port must be a number');
|
|
||||||
this.utapi.redis.sentinels.push({ host, port });
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// check for standalone configuration
|
|
||||||
this.utapi.redis = {};
|
|
||||||
assert(typeof config.utapi.redis.host === 'string',
|
|
||||||
'bad config: redis.host must be a string');
|
|
||||||
assert(typeof config.utapi.redis.port === 'number',
|
|
||||||
'bad config: redis.port must be a number');
|
|
||||||
this.utapi.redis.host = config.utapi.redis.host;
|
|
||||||
this.utapi.redis.port = config.utapi.redis.port;
|
|
||||||
}
|
|
||||||
if (config.utapi.redis.retry !== undefined) {
|
|
||||||
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
|
||||||
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
|
||||||
assert.strictEqual(typeof min, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: min must be a number');
|
|
||||||
assert.strictEqual(typeof max, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: max must be a number');
|
|
||||||
assert.strictEqual(typeof jitter, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
|
||||||
assert.strictEqual(typeof factor, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: factor must be a number');
|
|
||||||
assert.strictEqual(typeof deadline, 'number',
|
|
||||||
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
|
||||||
}
|
|
||||||
|
|
||||||
this.utapi.redis.retry = config.utapi.redis.retry;
|
|
||||||
} else {
|
|
||||||
this.utapi.redis.retry = {
|
this.utapi.redis.retry = {
|
||||||
connectBackoff: {
|
connectBackoff: {
|
||||||
min: 10,
|
min: 10,
|
||||||
|
@ -1046,22 +1189,6 @@ class Config extends EventEmitter {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (config.utapi.redis.password !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(config.utapi.redis.password),
|
|
||||||
'config: invalid password for utapi redis. password' +
|
|
||||||
' must be a string');
|
|
||||||
this.utapi.redis.password = config.utapi.redis.password;
|
|
||||||
}
|
|
||||||
if (config.utapi.redis.sentinelPassword !== undefined) {
|
|
||||||
assert(
|
|
||||||
this._verifyRedisPassword(
|
|
||||||
config.utapi.redis.sentinelPassword),
|
|
||||||
'config: invalid password for utapi redis. password' +
|
|
||||||
' must be a string');
|
|
||||||
this.utapi.redis.sentinelPassword =
|
|
||||||
config.utapi.redis.sentinelPassword;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (config.utapi.metrics) {
|
if (config.utapi.metrics) {
|
||||||
this.utapi.metrics = config.utapi.metrics;
|
this.utapi.metrics = config.utapi.metrics;
|
||||||
|
@ -1131,8 +1258,7 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.utapi && config.utapi.reindex) {
|
if (config.utapi && config.utapi.reindex) {
|
||||||
parseUtapiReindex(config.utapi.reindex);
|
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
|
||||||
this.utapi.reindex = config.utapi.reindex;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1177,6 +1303,8 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.authdata = config.authdata || 'authdata.json';
|
||||||
|
|
||||||
this.kms = {};
|
this.kms = {};
|
||||||
if (config.kms) {
|
if (config.kms) {
|
||||||
assert(typeof config.kms.userName === 'string');
|
assert(typeof config.kms.userName === 'string');
|
||||||
|
@ -1396,25 +1524,6 @@ class Config extends EventEmitter {
|
||||||
this.outboundProxy.certs = certObj.certs;
|
this.outboundProxy.certs = certObj.certs;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.managementAgent = {};
|
|
||||||
this.managementAgent.port = 8010;
|
|
||||||
this.managementAgent.host = 'localhost';
|
|
||||||
if (config.managementAgent !== undefined) {
|
|
||||||
if (config.managementAgent.port !== undefined) {
|
|
||||||
assert(Number.isInteger(config.managementAgent.port)
|
|
||||||
&& config.managementAgent.port > 0,
|
|
||||||
'bad config: managementAgent port must be a positive ' +
|
|
||||||
'integer');
|
|
||||||
this.managementAgent.port = config.managementAgent.port;
|
|
||||||
}
|
|
||||||
if (config.managementAgent.host !== undefined) {
|
|
||||||
assert.strictEqual(typeof config.managementAgent.host, 'string',
|
|
||||||
'bad config: management agent host must ' +
|
|
||||||
'be a string');
|
|
||||||
this.managementAgent.host = config.managementAgent.host;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ephemeral token to protect the reporting endpoint:
|
// Ephemeral token to protect the reporting endpoint:
|
||||||
// try inherited from parent first, then hardcoded in conf file,
|
// try inherited from parent first, then hardcoded in conf file,
|
||||||
// then create a fresh one as last resort.
|
// then create a fresh one as last resort.
|
||||||
|
@ -1467,58 +1576,114 @@ class Config extends EventEmitter {
|
||||||
requestsConfigAssert(config.requests);
|
requestsConfigAssert(config.requests);
|
||||||
this.requests = config.requests;
|
this.requests = config.requests;
|
||||||
}
|
}
|
||||||
if (process.env.VERSION_ID_ENCODING_TYPE !== undefined) {
|
// CLDSRV-378: on 8.x branches, null version compatibility
|
||||||
// override config
|
// mode is enforced because null keys are not supported by the
|
||||||
config.versionIdEncodingType = process.env.VERSION_ID_ENCODING_TYPE;
|
// MongoDB backend.
|
||||||
}
|
this.nullVersionCompatMode = true;
|
||||||
if (config.versionIdEncodingType) {
|
|
||||||
if (config.versionIdEncodingType === 'hex') {
|
|
||||||
this.versionIdEncodingType = versionIdUtils.ENC_TYPE_HEX;
|
|
||||||
} else if (config.versionIdEncodingType === 'base62') {
|
|
||||||
this.versionIdEncodingType = versionIdUtils.ENC_TYPE_BASE62;
|
|
||||||
} else {
|
|
||||||
throw new Error(`Invalid versionIdEncodingType: ${config.versionIdEncodingType}`);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
this.versionIdEncodingType = versionIdUtils.ENC_TYPE_HEX;
|
|
||||||
}
|
|
||||||
if (config.bucketNotificationDestinations) {
|
if (config.bucketNotificationDestinations) {
|
||||||
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
|
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.lifecycleRoleName = config.lifecycleRoleName || null;
|
||||||
|
|
||||||
// Version of the configuration we're running under
|
// Version of the configuration we're running under
|
||||||
this.overlayVersion = config.overlayVersion || 0;
|
this.overlayVersion = config.overlayVersion || 0;
|
||||||
|
|
||||||
|
this._setTimeOptions();
|
||||||
|
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
|
||||||
|
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
|
||||||
|
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
|
||||||
|
this.multiObjectDeleteConcurrency = extractedNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.multiObjectDeleteEnableOptimizations = true;
|
||||||
|
if (config.multiObjectDeleteEnableOptimizations === false) {
|
||||||
|
this.multiObjectDeleteEnableOptimizations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.testingMode = config.testingMode || false;
|
||||||
|
|
||||||
|
this.maxScannedLifecycleListingEntries = constants.maxScannedLifecycleListingEntries;
|
||||||
|
if (config.maxScannedLifecycleListingEntries !== undefined) {
|
||||||
|
// maxScannedLifecycleListingEntries > 2 is required as a minimum because we must
|
||||||
|
// scan at least three entries to determine version eligibility.
|
||||||
|
// Two entries representing the master key and the following one representing the non-current version.
|
||||||
|
assert(Number.isInteger(config.maxScannedLifecycleListingEntries) &&
|
||||||
|
config.maxScannedLifecycleListingEntries > 2,
|
||||||
|
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
||||||
|
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._configureBackends(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
_setTimeOptions() {
|
||||||
|
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
|
||||||
|
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
|
||||||
|
// expedite the lifecycle of objects.
|
||||||
|
|
||||||
|
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
||||||
|
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
||||||
|
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
||||||
|
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
||||||
|
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
|
||||||
|
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
|
||||||
|
|
||||||
|
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
|
||||||
|
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
|
||||||
|
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
|
||||||
|
|
||||||
|
// The scaledMsPerDay value is initially set to the number of milliseconds per day
|
||||||
|
// (24 * 60 * 60 * 1000) as the default value.
|
||||||
|
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
|
||||||
|
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
|
||||||
|
// earlier in time.
|
||||||
|
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
|
||||||
|
|
||||||
|
this.timeOptions = {
|
||||||
|
expireOneDayEarlier,
|
||||||
|
transitionOneDayEarlier,
|
||||||
|
timeProgressionFactor,
|
||||||
|
scaledMsPerDay,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getTimeOptions() {
|
||||||
|
return this.timeOptions;
|
||||||
}
|
}
|
||||||
|
|
||||||
_getAuthData() {
|
_getAuthData() {
|
||||||
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
|
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
|
||||||
}
|
}
|
||||||
|
|
||||||
_configureBackends() {
|
_configureBackends(config) {
|
||||||
|
const backends = config.backends || {};
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
let auth = 'mem';
|
let auth = backends.auth || 'mem';
|
||||||
let data = 'multiple';
|
let data = backends.data || 'multiple';
|
||||||
let metadata = 'file';
|
let metadata = backends.metadata || 'file';
|
||||||
let kms = 'file';
|
let kms = backends.kms || 'file';
|
||||||
|
let quota = backends.quota || 'none';
|
||||||
if (process.env.S3BACKEND) {
|
if (process.env.S3BACKEND) {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi'
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND;
|
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
metadata = process.env.S3BACKEND;
|
metadata = process.env.S3BACKEND;
|
||||||
kms = process.env.S3BACKEND;
|
kms = process.env.S3BACKEND;
|
||||||
}
|
}
|
||||||
if (process.env.S3VAULT) {
|
if (process.env.S3VAULT) {
|
||||||
auth = process.env.S3VAULT;
|
auth = process.env.S3VAULT;
|
||||||
|
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
||||||
// Auth only checks for 'mem' since mem === file
|
// Auth only checks for 'mem' since mem === file
|
||||||
auth = 'mem';
|
|
||||||
let authData;
|
let authData;
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
|
@ -1547,10 +1712,10 @@ class Config extends EventEmitter {
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple'
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
}
|
|
||||||
if (data === 'scality' || data === 'multiple') {
|
if (data === 'scality' || data === 'multiple') {
|
||||||
data = 'multiple';
|
data = 'multiple';
|
||||||
}
|
}
|
||||||
|
}
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
'bad config: locationConstraints and restEndpoints must be set'
|
'bad config: locationConstraints and restEndpoints must be set'
|
||||||
|
@ -1562,18 +1727,18 @@ class Config extends EventEmitter {
|
||||||
if (process.env.S3KMS) {
|
if (process.env.S3KMS) {
|
||||||
kms = process.env.S3KMS;
|
kms = process.env.S3KMS;
|
||||||
}
|
}
|
||||||
|
if (process.env.S3QUOTA) {
|
||||||
|
quota = process.env.S3QUOTA;
|
||||||
|
}
|
||||||
this.backends = {
|
this.backends = {
|
||||||
auth,
|
auth,
|
||||||
data,
|
data,
|
||||||
metadata,
|
metadata,
|
||||||
kms,
|
kms,
|
||||||
|
quota,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
_verifyRedisPassword(password) {
|
|
||||||
return typeof password === 'string';
|
|
||||||
}
|
|
||||||
|
|
||||||
setAuthDataAccounts(accounts) {
|
setAuthDataAccounts(accounts) {
|
||||||
this.authData.accounts = accounts;
|
this.authData.accounts = accounts;
|
||||||
this.emit('authdata-update');
|
this.emit('authdata-update');
|
||||||
|
@ -1621,8 +1786,7 @@ class Config extends EventEmitter {
|
||||||
getAzureEndpoint(locationConstraint) {
|
getAzureEndpoint(locationConstraint) {
|
||||||
let azureStorageEndpoint =
|
let azureStorageEndpoint =
|
||||||
process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] ||
|
process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] ||
|
||||||
this.locationConstraints[locationConstraint]
|
this.locationConstraints[locationConstraint].details.azureStorageEndpoint;
|
||||||
.details.azureStorageEndpoint;
|
|
||||||
if (!azureStorageEndpoint.endsWith('/')) {
|
if (!azureStorageEndpoint.endsWith('/')) {
|
||||||
// append the trailing slash
|
// append the trailing slash
|
||||||
azureStorageEndpoint = `${azureStorageEndpoint}/`;
|
azureStorageEndpoint = `${azureStorageEndpoint}/`;
|
||||||
|
@ -1631,23 +1795,40 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
getAzureStorageAccountName(locationConstraint) {
|
getAzureStorageAccountName(locationConstraint) {
|
||||||
const { azureStorageAccountName } =
|
const accountName = azureGetStorageAccountName(
|
||||||
this.locationConstraints[locationConstraint].details;
|
locationConstraint,
|
||||||
const storageAccountNameFromEnv =
|
this.locationConstraints[locationConstraint].details
|
||||||
process.env[`${locationConstraint}_AZURE_STORAGE_ACCOUNT_NAME`];
|
);
|
||||||
return storageAccountNameFromEnv || azureStorageAccountName;
|
if (accountName) {
|
||||||
|
return accountName;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For SAS & ServicePrincipal, retrieve the accountName from the endpoint
|
||||||
|
const endpoint = this.getAzureEndpoint(locationConstraint);
|
||||||
|
const url = new URL(endpoint);
|
||||||
|
const fragments = url.hostname.split('.', 3);
|
||||||
|
if (fragments.length === 3 && fragments[1] === 'blob') {
|
||||||
|
return fragments[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should always reach here, though it may not be the case for "mock" servers,
|
||||||
|
// where the accoutName is in the path
|
||||||
|
const path = url.pathname.replace(/^\//, '').replace(/\/$/, '');
|
||||||
|
if (path) {
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should not reach here; if that happens, use the endpoint itself, which
|
||||||
|
// should be close-enough since this function is used for detecting when two
|
||||||
|
// locations actually point to the same account
|
||||||
|
return endpoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
getAzureStorageCredentials(locationConstraint) {
|
getAzureStorageCredentials(locationConstraint) {
|
||||||
const { azureStorageAccessKey } =
|
return azureGetLocationCredentials(
|
||||||
this.locationConstraints[locationConstraint].details;
|
locationConstraint,
|
||||||
const storageAccessKeyFromEnv =
|
this.locationConstraints[locationConstraint].details
|
||||||
process.env[`${locationConstraint}_AZURE_STORAGE_ACCESS_KEY`];
|
);
|
||||||
return {
|
|
||||||
storageAccountName:
|
|
||||||
this.getAzureStorageAccountName(locationConstraint),
|
|
||||||
storageAccessKey: storageAccessKeyFromEnv || azureStorageAccessKey,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
getPfsDaemonEndpoint(locationConstraint) {
|
getPfsDaemonEndpoint(locationConstraint) {
|
||||||
|
@ -1680,13 +1861,25 @@ class Config extends EventEmitter {
|
||||||
.update(instanceId)
|
.update(instanceId)
|
||||||
.digest('hex');
|
.digest('hex');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isQuotaEnabled() {
|
||||||
|
return !!this.quotaEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
isQuotaInflightEnabled() {
|
||||||
|
return this.quota.enableInflights;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
parseSproxydConfig,
|
parseSproxydConfig,
|
||||||
|
parseRedisConfig,
|
||||||
locationConstraintAssert,
|
locationConstraintAssert,
|
||||||
ConfigObject: Config,
|
ConfigObject: Config,
|
||||||
config: new Config(),
|
config: new Config(),
|
||||||
requestsConfigAssert,
|
requestsConfigAssert,
|
||||||
bucketNotifAssert,
|
bucketNotifAssert,
|
||||||
|
azureGetStorageAccountName,
|
||||||
|
azureGetLocationCredentials,
|
||||||
|
azureArchiveLocationConstraintAssert,
|
||||||
};
|
};
|
||||||
|
|
161
lib/api/api.js
161
lib/api/api.js
|
@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
|
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
||||||
const { bucketGet } = require('./bucketGet');
|
const { bucketGet } = require('./bucketGet');
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
|
@ -17,12 +18,16 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
|
const bucketGetQuota = require('./bucketGetQuota');
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
const bucketPutACL = require('./bucketPutACL');
|
const bucketPutACL = require('./bucketPutACL');
|
||||||
const bucketPutCors = require('./bucketPutCors');
|
const bucketPutCors = require('./bucketPutCors');
|
||||||
const bucketPutVersioning = require('./bucketPutVersioning');
|
const bucketPutVersioning = require('./bucketPutVersioning');
|
||||||
|
const bucketPutTagging = require('./bucketPutTagging');
|
||||||
|
const bucketDeleteTagging = require('./bucketDeleteTagging');
|
||||||
|
const bucketGetTagging = require('./bucketGetTagging');
|
||||||
const bucketPutWebsite = require('./bucketPutWebsite');
|
const bucketPutWebsite = require('./bucketPutWebsite');
|
||||||
const bucketPutReplication = require('./bucketPutReplication');
|
const bucketPutReplication = require('./bucketPutReplication');
|
||||||
const bucketPutLifecycle = require('./bucketPutLifecycle');
|
const bucketPutLifecycle = require('./bucketPutLifecycle');
|
||||||
|
@ -30,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
|
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -41,7 +47,7 @@ const metadataSearch = require('./metadataSearch');
|
||||||
const { multiObjectDelete } = require('./multiObjectDelete');
|
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||||
const multipartDelete = require('./multipartDelete');
|
const multipartDelete = require('./multipartDelete');
|
||||||
const objectCopy = require('./objectCopy');
|
const objectCopy = require('./objectCopy');
|
||||||
const objectDelete = require('./objectDelete');
|
const { objectDelete } = require('./objectDelete');
|
||||||
const objectDeleteTagging = require('./objectDeleteTagging');
|
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||||
const objectGet = require('./objectGet');
|
const objectGet = require('./objectGet');
|
||||||
const objectGetACL = require('./objectGetACL');
|
const objectGetACL = require('./objectGetACL');
|
||||||
|
@ -56,12 +62,12 @@ const objectPutTagging = require('./objectPutTagging');
|
||||||
const objectPutPart = require('./objectPutPart');
|
const objectPutPart = require('./objectPutPart');
|
||||||
const objectPutCopyPart = require('./objectPutCopyPart');
|
const objectPutCopyPart = require('./objectPutCopyPart');
|
||||||
const objectPutRetention = require('./objectPutRetention');
|
const objectPutRetention = require('./objectPutRetention');
|
||||||
|
const objectRestore = require('./objectRestore');
|
||||||
const prepareRequestContexts
|
const prepareRequestContexts
|
||||||
= require('./apiUtils/authorization/prepareRequestContexts');
|
= require('./apiUtils/authorization/prepareRequestContexts');
|
||||||
const serviceGet = require('./serviceGet');
|
const serviceGet = require('./serviceGet');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const websiteGet = require('./websiteGet');
|
const website = require('./website');
|
||||||
const websiteHead = require('./websiteHead');
|
|
||||||
const writeContinue = require('../utilities/writeContinue');
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
||||||
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
||||||
|
@ -79,6 +85,10 @@ const api = {
|
||||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.apiMethod = apiMethod;
|
request.apiMethod = apiMethod;
|
||||||
|
// Array of end of API callbacks, used to perform some logic
|
||||||
|
// at the end of an API.
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
request.finalizerHooks = [];
|
||||||
|
|
||||||
const actionLog = monitoringMap[apiMethod];
|
const actionLog = monitoringMap[apiMethod];
|
||||||
if (!actionLog &&
|
if (!actionLog &&
|
||||||
|
@ -113,6 +123,7 @@ const api = {
|
||||||
// no need to check auth on website or cors preflight requests
|
// no need to check auth on website or cors preflight requests
|
||||||
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
||||||
apiMethod === 'corsPreflight') {
|
apiMethod === 'corsPreflight') {
|
||||||
|
request.actionImplicitDenies = false;
|
||||||
return this[apiMethod](request, log, callback);
|
return this[apiMethod](request, log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,15 +146,25 @@ const api = {
|
||||||
|
|
||||||
const requestContexts = prepareRequestContexts(apiMethod, request,
|
const requestContexts = prepareRequestContexts(apiMethod, request,
|
||||||
sourceBucket, sourceObject, sourceVersionId);
|
sourceBucket, sourceObject, sourceVersionId);
|
||||||
|
// Extract all the _apiMethods and store them in an array
|
||||||
|
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
|
||||||
|
// Attach the names to the current request
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
request.apiMethods = apiMethods;
|
||||||
|
|
||||||
function checkAuthResults(authResults) {
|
function checkAuthResults(authResults) {
|
||||||
let returnTagCount = true;
|
let returnTagCount = true;
|
||||||
|
const isImplicitDeny = {};
|
||||||
|
let isOnlyImplicitDeny = true;
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
// first item checks s3:GetObject(Version) action
|
// first item checks s3:GetObject(Version) action
|
||||||
if (!authResults[0].isAllowed) {
|
if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
|
||||||
log.trace('get object authorization denial from Vault');
|
log.trace('get object authorization denial from Vault');
|
||||||
return errors.AccessDenied;
|
return errors.AccessDenied;
|
||||||
}
|
}
|
||||||
|
// TODO add support for returnTagCount in the bucket policy
|
||||||
|
// checks
|
||||||
|
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
|
||||||
// second item checks s3:GetObject(Version)Tagging action
|
// second item checks s3:GetObject(Version)Tagging action
|
||||||
if (!authResults[1].isAllowed) {
|
if (!authResults[1].isAllowed) {
|
||||||
log.trace('get tagging authorization denial ' +
|
log.trace('get tagging authorization denial ' +
|
||||||
|
@ -152,35 +173,41 @@ const api = {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (let i = 0; i < authResults.length; i++) {
|
for (let i = 0; i < authResults.length; i++) {
|
||||||
if (!authResults[i].isAllowed) {
|
isImplicitDeny[authResults[i].action] = true;
|
||||||
|
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
|
||||||
|
// Any explicit deny rejects the current API call
|
||||||
log.trace('authorization denial from Vault');
|
log.trace('authorization denial from Vault');
|
||||||
return errors.AccessDenied;
|
return errors.AccessDenied;
|
||||||
}
|
}
|
||||||
|
if (authResults[i].isAllowed) {
|
||||||
|
// If the action is allowed, the result is not implicit
|
||||||
|
// Deny.
|
||||||
|
isImplicitDeny[authResults[i].action] = false;
|
||||||
|
isOnlyImplicitDeny = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return returnTagCount;
|
}
|
||||||
|
// These two APIs cannot use ACLs or Bucket Policies, hence, any
|
||||||
|
// implicit deny from vault must be treated as an explicit deny.
|
||||||
|
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
|
||||||
|
return errors.AccessDenied;
|
||||||
|
}
|
||||||
|
return { returnTagCount, isImplicitDeny };
|
||||||
}
|
}
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) =>
|
next => auth.server.doAuth(
|
||||||
next(err, userInfo, authorizationResults, streamingV4Params), 's3', requestContexts),
|
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||||
(userInfo, authorizationResults, streamingV4Params, next) => {
|
|
||||||
if (authorizationResults) {
|
|
||||||
const checkedResults = checkAuthResults(authorizationResults);
|
|
||||||
if (checkedResults instanceof Error) {
|
|
||||||
return next(checkedResults);
|
|
||||||
}
|
|
||||||
returnTagCount = checkedResults;
|
|
||||||
}
|
|
||||||
return tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log,
|
|
||||||
(err, tagAuthResults, updatedContexts) =>
|
|
||||||
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
|
||||||
},
|
|
||||||
], (err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
|
// VaultClient returns standard errors, but the route requires
|
||||||
|
// Arsenal errors
|
||||||
|
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
||||||
log.trace('authentication error', { error: err });
|
log.trace('authentication error', { error: err });
|
||||||
return callback(err);
|
return next(arsenalError);
|
||||||
}
|
}
|
||||||
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
|
}, 's3', requestContexts),
|
||||||
|
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
||||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||||
if (userInfo.isRequesterAnIAMUser()) {
|
if (userInfo.isRequesterAnIAMUser()) {
|
||||||
authNames.userName = userInfo.getIAMdisplayName();
|
authNames.userName = userInfo.getIAMdisplayName();
|
||||||
|
@ -189,21 +216,12 @@ const api = {
|
||||||
authNames.sessionName = userInfo.getShortid().split(':')[1];
|
authNames.sessionName = userInfo.getShortid().split(':')[1];
|
||||||
}
|
}
|
||||||
log.addDefaultFields(authNames);
|
log.addDefaultFields(authNames);
|
||||||
if (tagAuthResults) {
|
|
||||||
const checkedResults = checkAuthResults(tagAuthResults);
|
|
||||||
if (checkedResults instanceof Error) {
|
|
||||||
return callback(checkedResults);
|
|
||||||
}
|
|
||||||
returnTagCount = checkedResults;
|
|
||||||
}
|
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
|
||||||
log, callback, authorizationResults);
|
|
||||||
}
|
}
|
||||||
// issue 100 Continue to the client
|
// issue 100 Continue to the client
|
||||||
writeContinue(request, response);
|
writeContinue(request, response);
|
||||||
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
|
const MAX_POST_LENGTH = request.method === 'POST' ?
|
||||||
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||||
const post = [];
|
const post = [];
|
||||||
let postLength = 0;
|
let postLength = 0;
|
||||||
|
@ -213,52 +231,80 @@ const api = {
|
||||||
if (postLength <= MAX_POST_LENGTH) {
|
if (postLength <= MAX_POST_LENGTH) {
|
||||||
post.push(chunk);
|
post.push(chunk);
|
||||||
}
|
}
|
||||||
return undefined;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
request.on('error', err => {
|
request.on('error', err => {
|
||||||
log.trace('error receiving request', {
|
log.trace('error receiving request', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
return callback(errors.InternalError);
|
return next(errors.InternalError);
|
||||||
});
|
});
|
||||||
|
|
||||||
request.on('end', () => {
|
request.on('end', () => {
|
||||||
if (postLength > MAX_POST_LENGTH) {
|
if (postLength > MAX_POST_LENGTH) {
|
||||||
log.error('body length is too long for request type',
|
log.error('body length is too long for request type',
|
||||||
{ postLength });
|
{ postLength });
|
||||||
return callback(errors.InvalidRequest);
|
return next(errors.InvalidRequest);
|
||||||
}
|
}
|
||||||
// Convert array of post buffers into one string
|
// Convert array of post buffers into one string
|
||||||
request.post = Buffer.concat(post, postLength).toString();
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
|
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||||
// IAM policy -Tag condition keys require information from CloudServer for evaluation
|
});
|
||||||
return tagConditionKeyAuth(authorizationResults, request, (updatedContexts || requestContexts),
|
return undefined;
|
||||||
apiMethod, log, (err, tagAuthResults) => {
|
},
|
||||||
|
// Tag condition keys require information from CloudServer for evaluation
|
||||||
|
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
||||||
|
authorizationResults,
|
||||||
|
request,
|
||||||
|
requestContexts,
|
||||||
|
apiMethod,
|
||||||
|
log,
|
||||||
|
(err, authResultsWithTags) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
||||||
|
},
|
||||||
|
),
|
||||||
|
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||||
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
if (tagAuthResults) {
|
request.accountQuotas = infos?.accountQuota;
|
||||||
const checkedResults = checkAuthResults(tagAuthResults);
|
if (authorizationResults) {
|
||||||
|
const checkedResults = checkAuthResults(authorizationResults);
|
||||||
if (checkedResults instanceof Error) {
|
if (checkedResults instanceof Error) {
|
||||||
return callback(checkedResults);
|
return callback(checkedResults);
|
||||||
}
|
}
|
||||||
returnTagCount = checkedResults;
|
returnTagCount = checkedResults.returnTagCount;
|
||||||
|
request.actionImplicitDenies = checkedResults.isImplicitDeny;
|
||||||
|
} else {
|
||||||
|
// create an object of keys apiMethods with all values to false:
|
||||||
|
// for backward compatibility, all apiMethods are allowed by default
|
||||||
|
// thus it is explicitly allowed, so implicit deny is false
|
||||||
|
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
|
||||||
|
acc[curr] = false;
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectCopy' ||
|
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
||||||
apiMethod === 'objectPutCopyPart') {
|
(hook, done) => hook(err, done),
|
||||||
|
() => callback(err, ...results));
|
||||||
|
|
||||||
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
|
request._response = response;
|
||||||
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
|
log, methodCallback, authorizationResults);
|
||||||
|
}
|
||||||
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, callback);
|
sourceObject, sourceVersionId, log, methodCallback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request,
|
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||||
returnTagCount, log, callback);
|
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, callback);
|
return this[apiMethod](userInfo, request, log, methodCallback);
|
||||||
});
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -278,15 +324,21 @@ const api = {
|
||||||
bucketPutACL,
|
bucketPutACL,
|
||||||
bucketPutCors,
|
bucketPutCors,
|
||||||
bucketPutVersioning,
|
bucketPutVersioning,
|
||||||
|
bucketPutTagging,
|
||||||
|
bucketDeleteTagging,
|
||||||
|
bucketGetTagging,
|
||||||
bucketPutWebsite,
|
bucketPutWebsite,
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
|
bucketDeleteQuota,
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
|
bucketUpdateQuota,
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
|
bucketGetQuota,
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
|
@ -316,9 +368,10 @@ const api = {
|
||||||
objectPutPart,
|
objectPutPart,
|
||||||
objectPutCopyPart,
|
objectPutCopyPart,
|
||||||
objectPutRetention,
|
objectPutRetention,
|
||||||
|
objectRestore,
|
||||||
serviceGet,
|
serviceGet,
|
||||||
websiteGet,
|
websiteGet: website,
|
||||||
websiteHead,
|
websiteHead: website,
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = api;
|
module.exports = api;
|
||||||
|
|
|
@ -1,11 +1,23 @@
|
||||||
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
|
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const { parseCIDR, isValid } = require('ipaddr.js');
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
|
const {
|
||||||
|
allAuthedUsersId,
|
||||||
|
bucketOwnerActions,
|
||||||
|
logId,
|
||||||
|
publicId,
|
||||||
|
arrayOfAllowed,
|
||||||
|
assumedRoleArnResourceType,
|
||||||
|
backbeatLifecycleSessionName,
|
||||||
|
actionsToConsiderAsObjectPut,
|
||||||
|
} = constants;
|
||||||
|
|
||||||
// whitelist buckets to allow public read on objects
|
// whitelist buckets to allow public read on objects
|
||||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
|
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
|
||||||
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
||||||
|
|
||||||
function getServiceAccountProperties(canonicalID) {
|
function getServiceAccountProperties(canonicalID) {
|
||||||
const canonicalIDArray = canonicalID.split('/');
|
const canonicalIDArray = canonicalID.split('/');
|
||||||
|
@ -26,13 +38,41 @@ function isRequesterNonAccountUser(authInfo) {
|
||||||
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkBucketAcls(bucket, requestType, canonicalID) {
|
/**
|
||||||
|
* Checks the access control for a given bucket based on the request type and user's canonical ID.
|
||||||
|
*
|
||||||
|
* @param {Bucket} bucket - The bucket to check access control for.
|
||||||
|
* @param {string} requestType - The list of s3 actions to check within the API call.
|
||||||
|
* @param {string} canonicalID - The canonical ID of the user making the request.
|
||||||
|
* @param {string} mainApiCall - The main API call (first item of the requestType).
|
||||||
|
*
|
||||||
|
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
|
||||||
|
*/
|
||||||
|
|
||||||
|
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
|
// Same logic applies on the Versioned APIs, so let's simplify it.
|
||||||
|
let requestTypeParsed = requestType.endsWith('Version') ?
|
||||||
|
requestType.slice(0, 'Version'.length * -1) : requestType;
|
||||||
|
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
|
||||||
|
'objectPut' : requestTypeParsed;
|
||||||
|
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
||||||
|
'objectPut' : mainApiCall;
|
||||||
if (bucket.getOwner() === canonicalID) {
|
if (bucket.getOwner() === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (parsedMainApiCall === 'objectGet') {
|
||||||
|
if (requestTypeParsed === 'objectGetTagging') {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (parsedMainApiCall === 'objectPut') {
|
||||||
|
if (arrayOfAllowed.includes(requestTypeParsed)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const bucketAcl = bucket.getAcl();
|
const bucketAcl = bucket.getAcl();
|
||||||
if (requestType === 'bucketGet' || requestType === 'bucketHead') {
|
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
|
||||||
if (bucketAcl.Canned === 'public-read'
|
if (bucketAcl.Canned === 'public-read'
|
||||||
|| bucketAcl.Canned === 'public-read-write'
|
|| bucketAcl.Canned === 'public-read-write'
|
||||||
|| (bucketAcl.Canned === 'authenticated-read'
|
|| (bucketAcl.Canned === 'authenticated-read'
|
||||||
|
@ -50,7 +90,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (requestType === 'bucketGetACL') {
|
if (requestTypeParsed === 'bucketGetACL') {
|
||||||
if ((bucketAcl.Canned === 'log-delivery-write'
|
if ((bucketAcl.Canned === 'log-delivery-write'
|
||||||
&& canonicalID === logId)
|
&& canonicalID === logId)
|
||||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -66,7 +106,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestType === 'bucketPutACL') {
|
if (requestTypeParsed === 'bucketPutACL') {
|
||||||
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -80,11 +120,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
|
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (requestType === 'objectDelete' || requestType === 'objectPut') {
|
|
||||||
if (bucketAcl.Canned === 'public-read-write'
|
if (bucketAcl.Canned === 'public-read-write'
|
||||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
||||||
|
@ -104,25 +140,39 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
|
||||||
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
||||||
// authorization check should just return true so can move on to check
|
// authorization check should just return true so can move on to check
|
||||||
// rights at the object level.
|
// rights at the object level.
|
||||||
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
|
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|
||||||
requestType === 'objectGet' || requestType === 'objectHead');
|
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
|
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
|
||||||
|
isUserUnauthenticated, mainApiCall) {
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
|
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
|
||||||
|
'objectPut' : requestType;
|
||||||
|
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
||||||
|
'objectPut' : mainApiCall;
|
||||||
// acls don't distinguish between users and accounts, so both should be allowed
|
// acls don't distinguish between users and accounts, so both should be allowed
|
||||||
if (bucketOwnerActions.includes(requestType)
|
if (bucketOwnerActions.includes(requestTypeParsed)
|
||||||
&& (bucketOwner === canonicalID)) {
|
&& (bucketOwner === canonicalID)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (objectMD['owner-id'] === canonicalID) {
|
if (objectMD['owner-id'] === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Backward compatibility
|
||||||
|
if (parsedMainApiCall === 'objectGet') {
|
||||||
|
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
|
||||||
|
&& requestTypeParsed === 'objectGetTagging') {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!objectMD.acl) {
|
if (!objectMD.acl) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
|
||||||
if (objectMD.acl.Canned === 'public-read'
|
if (objectMD.acl.Canned === 'public-read'
|
||||||
|| objectMD.acl.Canned === 'public-read-write'
|
|| objectMD.acl.Canned === 'public-read-write'
|
||||||
|| (objectMD.acl.Canned === 'authenticated-read'
|
|| (objectMD.acl.Canned === 'authenticated-read'
|
||||||
|
@ -148,11 +198,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
|
||||||
|
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
// bucket has canned ACL public-read-write
|
// bucket has canned ACL public-read-write
|
||||||
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestType === 'objectPutACL') {
|
if (requestTypeParsed === 'objectPutACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -168,7 +218,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestType === 'objectGetACL') {
|
if (requestTypeParsed === 'objectGetACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -187,9 +237,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
|
||||||
// allow public reads on buckets that are whitelisted for anonymous reads
|
// allow public reads on buckets that are whitelisted for anonymous reads
|
||||||
// TODO: remove this after bucket policies are implemented
|
// TODO: remove this after bucket policies are implemented
|
||||||
const bucketAcl = bucket.getAcl();
|
const bucketAcl = bucket.getAcl();
|
||||||
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) &&
|
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
|
||||||
bucketAcl.Canned === 'public-read' &&
|
&& bucketAcl.Canned === 'public-read'
|
||||||
(requestType === 'objectGet' || requestType === 'objectHead');
|
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
||||||
if (allowPublicReads) {
|
if (allowPublicReads) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -216,6 +266,20 @@ function _checkBucketPolicyResources(request, resource, log) {
|
||||||
return evaluators.isResourceApplicable(requestContext, resource, log);
|
return evaluators.isResourceApplicable(requestContext, resource, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function _checkBucketPolicyConditions(request, conditions, log) {
|
||||||
|
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
|
||||||
|
if (!conditions) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// build request context from the request!
|
||||||
|
const requestContext = new RequestContext(request.headers, request.query,
|
||||||
|
request.bucketName, request.objectKey, ip,
|
||||||
|
request.connection.encrypted, request.resourceType, 's3', null, null,
|
||||||
|
null, null, null, null, null, null, null, null, null,
|
||||||
|
request.objectLockRetentionDays);
|
||||||
|
return evaluators.meetConditions(requestContext, conditions, log);
|
||||||
|
}
|
||||||
|
|
||||||
function _getAccountId(arn) {
|
function _getAccountId(arn) {
|
||||||
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
|
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
|
||||||
return arn.substr(13, 12);
|
return arn.substr(13, 12);
|
||||||
|
@ -260,11 +324,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) {
|
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
|
||||||
let permission = 'defaultDeny';
|
let permission = 'defaultDeny';
|
||||||
// if requester is user within bucket owner account, actions should be
|
// if requester is user within bucket owner account, actions should be
|
||||||
// allowed unless explicitly denied (assumes allowed by IAM policy)
|
// allowed unless explicitly denied (assumes allowed by IAM policy)
|
||||||
if (bucketOwner === canonicalID) {
|
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
|
||||||
permission = 'allow';
|
permission = 'allow';
|
||||||
}
|
}
|
||||||
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
|
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
|
||||||
|
@ -273,12 +337,13 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
|
||||||
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
|
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
|
||||||
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
|
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
|
||||||
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
|
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
|
||||||
|
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
|
||||||
|
|
||||||
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') {
|
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
|
||||||
// explicit deny trumps any allows, so return immediately
|
// explicit deny trumps any allows, so return immediately
|
||||||
return 'explicitDeny';
|
return 'explicitDeny';
|
||||||
}
|
}
|
||||||
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') {
|
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
|
||||||
permission = 'allow';
|
permission = 'allow';
|
||||||
}
|
}
|
||||||
copiedStatement = copiedStatement.splice(1);
|
copiedStatement = copiedStatement.splice(1);
|
||||||
|
@ -286,7 +351,37 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
|
||||||
return permission;
|
return permission;
|
||||||
}
|
}
|
||||||
|
|
||||||
function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request) {
|
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
|
||||||
|
request, aclPermission, results, actionImplicitDenies) {
|
||||||
|
const bucketPolicy = bucket.getBucketPolicy();
|
||||||
|
let processedResult = results[requestType];
|
||||||
|
if (!bucketPolicy) {
|
||||||
|
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
||||||
|
} else {
|
||||||
|
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
|
||||||
|
bucketOwner, log, request, actionImplicitDenies);
|
||||||
|
|
||||||
|
if (bucketPolicyPermission === 'explicitDeny') {
|
||||||
|
processedResult = false;
|
||||||
|
} else if (bucketPolicyPermission === 'allow') {
|
||||||
|
processedResult = true;
|
||||||
|
} else {
|
||||||
|
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return processedResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
||||||
|
actionImplicitDeniesInput = {}, isWebsite = false) {
|
||||||
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
|
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||||
|
const mainApiCall = requestTypes[0];
|
||||||
|
const results = {};
|
||||||
|
return requestTypes.every(_requestType => {
|
||||||
|
// By default, all missing actions are defined as allowed from IAM, to be
|
||||||
|
// backward compatible
|
||||||
|
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||||
// Check to see if user is authorized to perform a
|
// Check to see if user is authorized to perform a
|
||||||
// particular action on bucket based on ACLs.
|
// particular action on bucket based on ACLs.
|
||||||
// TODO: Add IAM checks
|
// TODO: Add IAM checks
|
||||||
|
@ -297,69 +392,100 @@ function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, req
|
||||||
arn = authInfo.getArn();
|
arn = authInfo.getArn();
|
||||||
}
|
}
|
||||||
// if the bucket owner is an account, users should not have default access
|
// if the bucket owner is an account, users should not have default access
|
||||||
if (((bucket.getOwner() === canonicalID) && requesterIsNotUser)
|
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
||||||
|| isServiceAccount(canonicalID)) {
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
return true;
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
const aclPermission = checkBucketAcls(bucket, requestType, canonicalID);
|
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
|
||||||
const bucketPolicy = bucket.getBucketPolicy();
|
// In case of error bucket access is checked with bucketGet
|
||||||
if (!bucketPolicy) {
|
// For website, bucket policy only uses objectGet and ignores bucketGet
|
||||||
return aclPermission;
|
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
|
||||||
|
// bucketGet should be used to check acl but switched to objectGet for bucket policy
|
||||||
|
if (isWebsite && _requestType === 'bucketGet') {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
_requestType = 'objectGet';
|
||||||
|
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
|
||||||
}
|
}
|
||||||
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
|
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||||
canonicalID, arn, bucket.getOwner(), log, request);
|
request, aclPermission, results, actionImplicitDenies);
|
||||||
if (bucketPolicyPermission === 'explicitDeny') {
|
});
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return (aclPermission || (bucketPolicyPermission === 'allow'));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function isObjAuthorized(bucket, objectMD, requestType, canonicalID, authInfo, log, request) {
|
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
|
||||||
|
log, request) {
|
||||||
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
|
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||||
|
const results = {};
|
||||||
|
return requestTypes.every(_requestType => {
|
||||||
|
// By default, all missing actions are defined as allowed from IAM, to be
|
||||||
|
// backward compatible
|
||||||
|
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||||
|
let arn = null;
|
||||||
|
if (authInfo) {
|
||||||
|
arn = authInfo.getArn();
|
||||||
|
}
|
||||||
|
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||||
|
request, true, results, actionImplicitDenies);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
|
||||||
|
actionImplicitDeniesInput = {}, isWebsite = false) {
|
||||||
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
|
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||||
|
const results = {};
|
||||||
|
const mainApiCall = requestTypes[0];
|
||||||
|
return requestTypes.every(_requestType => {
|
||||||
|
// By default, all missing actions are defined as allowed from IAM, to be
|
||||||
|
// backward compatible
|
||||||
|
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||||
|
const parsedMethodName = _requestType.endsWith('Version')
|
||||||
|
? _requestType.slice(0, -7) : _requestType;
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
|
||||||
// bucket has canned ACL public-read-write
|
|
||||||
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// check bucket has read access
|
// check bucket has read access
|
||||||
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
||||||
return isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request);
|
let permission = 'bucketGet';
|
||||||
|
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
|
||||||
|
permission = 'objectPut';
|
||||||
|
}
|
||||||
|
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
|
||||||
|
actionImplicitDenies, isWebsite);
|
||||||
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
|
// bucket has canned ACL public-read-write
|
||||||
|
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
|
||||||
|
&& results[_requestType] === false) {
|
||||||
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
|
}
|
||||||
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
let requesterIsNotUser = true;
|
let requesterIsNotUser = true;
|
||||||
let arn = null;
|
let arn = null;
|
||||||
|
let isUserUnauthenticated = false;
|
||||||
if (authInfo) {
|
if (authInfo) {
|
||||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
||||||
arn = authInfo.getArn();
|
arn = authInfo.getArn();
|
||||||
|
isUserUnauthenticated = arn === undefined;
|
||||||
}
|
}
|
||||||
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) {
|
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
||||||
return true;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
}
|
return results[_requestType];
|
||||||
|
|
||||||
if (isServiceAccount(canonicalID)) {
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
// account is authorized if:
|
// account is authorized if:
|
||||||
// - requesttype is included in bucketOwnerActions and
|
// - requesttype is included in bucketOwnerActions and
|
||||||
// - account is the bucket owner
|
// - account is the bucket owner
|
||||||
// - requester is account, not user
|
// - requester is account, not user
|
||||||
if (bucketOwnerActions.includes(requestType)
|
if (bucketOwnerActions.includes(parsedMethodName)
|
||||||
&& (bucketOwner === canonicalID)
|
&& (bucketOwner === canonicalID)
|
||||||
&& requesterIsNotUser) {
|
&& requesterIsNotUser) {
|
||||||
return true;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
const aclPermission = checkObjectAcls(bucket, objectMD, requestType,
|
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
|
||||||
canonicalID);
|
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
|
||||||
const bucketPolicy = bucket.getBucketPolicy();
|
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
|
||||||
if (!bucketPolicy) {
|
log, request, aclPermission, results, actionImplicitDenies);
|
||||||
return aclPermission;
|
});
|
||||||
}
|
|
||||||
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
|
|
||||||
canonicalID, arn, bucket.getOwner(), log, request);
|
|
||||||
if (bucketPolicyPermission === 'explicitDeny') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return (aclPermission || (bucketPolicyPermission === 'allow'));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function _checkResource(resource, bucketArn) {
|
function _checkResource(resource, bucketArn) {
|
||||||
|
@ -388,6 +514,117 @@ function validatePolicyResource(bucketName, policy) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function checkIp(value) {
|
||||||
|
const errString = 'Invalid IP address in Conditions';
|
||||||
|
|
||||||
|
const values = Array.isArray(value) ? value : [value];
|
||||||
|
|
||||||
|
for (let i = 0; i < values.length; i++) {
|
||||||
|
// these preliminary checks are validating the provided
|
||||||
|
// ip address against ipaddr.js, the library we use when
|
||||||
|
// evaluating IP condition keys. It ensures compatibility,
|
||||||
|
// but additional checks are required to enforce the right
|
||||||
|
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
|
||||||
|
// we would accept different ip formats, which is not
|
||||||
|
// standard in an AWS use case.
|
||||||
|
try {
|
||||||
|
try {
|
||||||
|
parseCIDR(values[i]);
|
||||||
|
} catch (err) {
|
||||||
|
isValid(values[i]);
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
return errString;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the existing IP validation logic to each element
|
||||||
|
const validateIpRegex = ip => {
|
||||||
|
if (constants.ipv4Regex.test(ip)) {
|
||||||
|
return ip.split('.').every(part => parseInt(part, 10) <= 255);
|
||||||
|
}
|
||||||
|
if (constants.ipv6Regex.test(ip)) {
|
||||||
|
return ip.split(':').every(part => part.length <= 4);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (validateIpRegex(values[i]) !== true) {
|
||||||
|
return errString;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the function hasn't returned by now, all elements are valid
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function checks all bucket policy conditions if the values provided
|
||||||
|
// are valid for the condition type. If not it returns a relevant Malformed policy error string
|
||||||
|
function validatePolicyConditions(policy) {
|
||||||
|
const validConditions = [
|
||||||
|
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
|
||||||
|
{ conditionKey: 's3:object-lock-remaining-retention-days' },
|
||||||
|
];
|
||||||
|
// keys where value type does not seem to be checked by AWS:
|
||||||
|
// - s3:object-lock-remaining-retention-days
|
||||||
|
|
||||||
|
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// there can be multiple statements in the policy, each with a Condition enclosure
|
||||||
|
for (let i = 0; i < policy.Statement.length; i++) {
|
||||||
|
const s = policy.Statement[i];
|
||||||
|
if (s.Condition) {
|
||||||
|
const conditionOperators = Object.keys(s.Condition);
|
||||||
|
// there can be multiple condition operations in the Condition enclosure
|
||||||
|
// eslint-disable-next-line no-restricted-syntax
|
||||||
|
for (const conditionOperator of conditionOperators) {
|
||||||
|
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
|
||||||
|
const conditionValue = s.Condition[conditionOperator][conditionKey];
|
||||||
|
const validCondition = validConditions.find(validCondition =>
|
||||||
|
validCondition.conditionKey === conditionKey
|
||||||
|
);
|
||||||
|
// AWS returns does not return an error if the condition starts with 'aws:'
|
||||||
|
// so we reproduce this behaviour
|
||||||
|
if (!validCondition && !conditionKey.startsWith('aws:')) {
|
||||||
|
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
|
||||||
|
}
|
||||||
|
if (validCondition && validCondition.conditionValueTypeChecker) {
|
||||||
|
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
|
||||||
|
if (conditionValueTypeError) {
|
||||||
|
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
|
||||||
|
* @param {string} arn - Amazon resource name - example:
|
||||||
|
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
|
||||||
|
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
|
||||||
|
*/
|
||||||
|
function isLifecycleSession(arn) {
|
||||||
|
if (!arn) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const arnSplits = arn.split(':');
|
||||||
|
const service = arnSplits[2];
|
||||||
|
|
||||||
|
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
|
||||||
|
|
||||||
|
const resourceType = resourceNames[0];
|
||||||
|
const sessionName = resourceNames[resourceNames.length - 1];
|
||||||
|
|
||||||
|
return (service === 'sts'
|
||||||
|
&& resourceType === assumedRoleArnResourceType
|
||||||
|
&& sessionName === backbeatLifecycleSessionName);
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
isBucketAuthorized,
|
isBucketAuthorized,
|
||||||
isObjAuthorized,
|
isObjAuthorized,
|
||||||
|
@ -398,4 +635,7 @@ module.exports = {
|
||||||
checkBucketAcls,
|
checkBucketAcls,
|
||||||
checkObjectAcls,
|
checkObjectAcls,
|
||||||
validatePolicyResource,
|
validatePolicyResource,
|
||||||
|
validatePolicyConditions,
|
||||||
|
isLifecycleSession,
|
||||||
|
evaluateBucketPolicyWithIAM,
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
if (apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethodAfterVersionCheck === 'objectCopy'
|
if (apiMethod === 'multiObjectDelete') {
|
||||||
|
// MultiObjectDelete does not require any authorization when evaluating
|
||||||
|
// the API. Instead, we authorize each object passed.
|
||||||
|
// But in order to get any relevant information from the authorization service
|
||||||
|
// for example, the account quota, we must send a request context object
|
||||||
|
// with no `specificResource`. We expect the result to be an implicit deny.
|
||||||
|
// In the API, we then ignore these authorization results, and we can use
|
||||||
|
// any information returned, e.g., the quota.
|
||||||
|
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
||||||
|
requestContexts.push(requestContextMultiObjectDelete);
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
@ -97,12 +107,63 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
const objectGetTaggingAction = (request.query &&
|
const objectGetTaggingAction = (request.query &&
|
||||||
request.query.versionId) ? 'objectGetTaggingVersion' :
|
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||||
'objectGetTagging';
|
'objectGetTagging';
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const objectGetVersionAction = 'objectGetVersion';
|
||||||
|
const getVersionResourceVersion =
|
||||||
|
generateRequestContext(objectGetVersionAction);
|
||||||
|
requestContexts.push(getVersionResourceVersion);
|
||||||
|
}
|
||||||
const getRequestContext =
|
const getRequestContext =
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
const getTaggingRequestContext =
|
const getTaggingRequestContext =
|
||||||
generateRequestContext(objectGetTaggingAction);
|
generateRequestContext(objectGetTaggingAction);
|
||||||
requestContexts.push(getRequestContext, getTaggingRequestContext);
|
requestContexts.push(getRequestContext, getTaggingRequestContext);
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectGetTagging') {
|
||||||
|
const objectGetTaggingAction = 'objectGetTagging';
|
||||||
|
const getTaggingResourceVersion =
|
||||||
|
generateRequestContext(objectGetTaggingAction);
|
||||||
|
requestContexts.push(getTaggingResourceVersion);
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const objectGetTaggingVersionAction = 'objectGetTaggingVersion';
|
||||||
|
const getTaggingVersionResourceVersion =
|
||||||
|
generateRequestContext(objectGetTaggingVersionAction);
|
||||||
|
requestContexts.push(getTaggingVersionResourceVersion);
|
||||||
|
}
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectHead') {
|
||||||
|
const objectHeadAction = 'objectHead';
|
||||||
|
const headObjectAction =
|
||||||
|
generateRequestContext(objectHeadAction);
|
||||||
|
requestContexts.push(headObjectAction);
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const objectHeadVersionAction = 'objectGetVersion';
|
||||||
|
const headObjectVersion =
|
||||||
|
generateRequestContext(objectHeadVersionAction);
|
||||||
|
requestContexts.push(headObjectVersion);
|
||||||
|
}
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectPutTagging') {
|
||||||
|
const putObjectTaggingRequestContext =
|
||||||
|
generateRequestContext('objectPutTagging');
|
||||||
|
requestContexts.push(putObjectTaggingRequestContext);
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const putObjectVersionRequestContext =
|
||||||
|
generateRequestContext('objectPutTaggingVersion');
|
||||||
|
requestContexts.push(putObjectVersionRequestContext);
|
||||||
|
}
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
|
const putObjectRequestContext =
|
||||||
|
generateRequestContext('objectPut');
|
||||||
|
requestContexts.push(putObjectRequestContext);
|
||||||
|
const getObjectRequestContext =
|
||||||
|
generateRequestContext('objectGet');
|
||||||
|
requestContexts.push(getObjectRequestContext);
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
||||||
|
// if put object with version
|
||||||
|
if (request.headers['x-scal-s3-version-id'] ||
|
||||||
|
request.headers['x-scal-s3-version-id'] === '') {
|
||||||
|
const putVersionRequestContext =
|
||||||
|
generateRequestContext('objectPutVersion');
|
||||||
|
requestContexts.push(putVersionRequestContext);
|
||||||
|
} else {
|
||||||
const putRequestContext =
|
const putRequestContext =
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
requestContexts.push(putRequestContext);
|
requestContexts.push(putRequestContext);
|
||||||
|
@ -112,12 +173,60 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
generateRequestContext('objectPutTagging');
|
generateRequestContext('objectPutTagging');
|
||||||
requestContexts.push(putTaggingRequestContext);
|
requestContexts.push(putTaggingRequestContext);
|
||||||
}
|
}
|
||||||
|
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
|
||||||
|
const putLegalHoldStatusAction =
|
||||||
|
generateRequestContext('objectPutLegalHold');
|
||||||
|
requestContexts.push(putLegalHoldStatusAction);
|
||||||
|
}
|
||||||
// if put object (versioning) with ACL
|
// if put object (versioning) with ACL
|
||||||
if (isHeaderAcl(request.headers)) {
|
if (isHeaderAcl(request.headers)) {
|
||||||
const putAclRequestContext =
|
const putAclRequestContext =
|
||||||
generateRequestContext('objectPutACL');
|
generateRequestContext('objectPutACL');
|
||||||
requestContexts.push(putAclRequestContext);
|
requestContexts.push(putAclRequestContext);
|
||||||
}
|
}
|
||||||
|
if (request.headers['x-amz-object-lock-mode']) {
|
||||||
|
const putObjectLockRequestContext =
|
||||||
|
generateRequestContext('objectPutRetention');
|
||||||
|
requestContexts.push(putObjectLockRequestContext);
|
||||||
|
}
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const putObjectVersionRequestContext =
|
||||||
|
generateRequestContext('objectPutTaggingVersion');
|
||||||
|
requestContexts.push(putObjectVersionRequestContext);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
|
||||||
|
apiMethodAfterVersionCheck === 'objectPutPart' ||
|
||||||
|
apiMethodAfterVersionCheck === 'completeMultipartUpload'
|
||||||
|
) {
|
||||||
|
if (request.headers['x-scal-s3-version-id'] ||
|
||||||
|
request.headers['x-scal-s3-version-id'] === '') {
|
||||||
|
const putVersionRequestContext =
|
||||||
|
generateRequestContext('objectPutVersion');
|
||||||
|
requestContexts.push(putVersionRequestContext);
|
||||||
|
} else {
|
||||||
|
const putRequestContext =
|
||||||
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
|
requestContexts.push(putRequestContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
// if put object (versioning) with ACL
|
||||||
|
if (isHeaderAcl(request.headers)) {
|
||||||
|
const putAclRequestContext =
|
||||||
|
generateRequestContext('objectPutACL');
|
||||||
|
requestContexts.push(putAclRequestContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (request.headers['x-amz-object-lock-mode']) {
|
||||||
|
const putObjectLockRequestContext =
|
||||||
|
generateRequestContext('objectPutRetention');
|
||||||
|
requestContexts.push(putObjectLockRequestContext);
|
||||||
|
}
|
||||||
|
if (request.headers['x-amz-version-id']) {
|
||||||
|
const putObjectVersionRequestContext =
|
||||||
|
generateRequestContext('objectPutTaggingVersion');
|
||||||
|
requestContexts.push(putObjectVersionRequestContext);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
const requestContext =
|
const requestContext =
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const assert = require('assert');
|
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
|
|
||||||
const { auth, s3middleware } = require('arsenal');
|
const { auth, s3middleware } = require('arsenal');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const { decodeVersionId } = require('../object/versioning');
|
const { decodeVersionId } = require('../object/versioning');
|
||||||
|
@ -12,29 +12,24 @@ function makeTagQuery(tags) {
|
||||||
.join('&');
|
.join('&');
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) {
|
||||||
requestContexts.forEach(rc => {
|
async.waterfall([
|
||||||
rc.setNeedTagEval(true);
|
|
||||||
|
|
||||||
async.series([
|
|
||||||
next => {
|
next => {
|
||||||
if (request.headers['x-amz-tagging']) {
|
if (request.headers['x-amz-tagging']) {
|
||||||
rc.setRequestObjTags(request.headers['x-amz-tagging']);
|
return next(null, request.headers['x-amz-tagging']);
|
||||||
process.nextTick(() => next());
|
}
|
||||||
} else if (request.post && apiMethod === 'objectPutTagging') {
|
if (request.post && apiMethod === 'objectPutTagging') {
|
||||||
parseTagXml(request.post, log, (err, tags) => {
|
return parseTagXml(request.post, log, (err, tags) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error parsing request tags');
|
log.trace('error parsing request tags');
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
rc.setRequestObjTags(makeTagQuery(tags));
|
return next(null, makeTagQuery(tags));
|
||||||
return next();
|
|
||||||
});
|
});
|
||||||
} else {
|
|
||||||
process.nextTick(() => next());
|
|
||||||
}
|
}
|
||||||
|
return next(null, null);
|
||||||
},
|
},
|
||||||
next => {
|
(requestTagsQuery, next) => {
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const decodedVidResult = decodeVersionId(request.query);
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
@ -43,32 +38,40 @@ function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
||||||
versionId: request.query.versionId,
|
versionId: request.query.versionId,
|
||||||
error: decodedVidResult,
|
error: decodedVidResult,
|
||||||
});
|
});
|
||||||
return process.nextTick(() => next(decodedVidResult));
|
return next(decodedVidResult);
|
||||||
}
|
}
|
||||||
const reqVersionId = decodedVidResult;
|
const reqVersionId = decodedVidResult;
|
||||||
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
return metadata.getObjectMD(
|
||||||
(err, objMD) => {
|
bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.is.NoSuchKey) {
|
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||||
return next();
|
if (err.NoSuchKey) {
|
||||||
|
return next(null, requestTagsQuery, null);
|
||||||
}
|
}
|
||||||
log.trace('error getting request object tags');
|
log.trace('error getting request object tags');
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
const existingTags = objMD.tags;
|
const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags);
|
||||||
if (existingTags) {
|
return next(null, requestTagsQuery, existingTagsQuery);
|
||||||
rc.setExistingObjTag(makeTagQuery(existingTags));
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
], err => {
|
], (err, requestTagsQuery, existingTagsQuery) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error processing tag condition key evaluation');
|
log.trace('error processing tag condition key evaluation');
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb(null, requestContexts);
|
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
|
||||||
});
|
// eslint-disable-next-line no-restricted-syntax
|
||||||
|
for (const rc of requestContexts) {
|
||||||
|
rc.setNeedTagEval(true);
|
||||||
|
if (requestTagsQuery) {
|
||||||
|
rc.setRequestObjTags(requestTagsQuery);
|
||||||
|
}
|
||||||
|
if (existingTagsQuery) {
|
||||||
|
rc.setExistingObjTag(existingTagsQuery);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cb();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,23 +80,20 @@ function tagConditionKeyAuth(authorizationResults, request, requestContexts, api
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
|
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
|
||||||
return cb();
|
return cb(null, authorizationResults);
|
||||||
}
|
}
|
||||||
|
|
||||||
return updateRequestContexts(request, requestContexts, apiMethod, log, (err, updatedContexts) => {
|
return updateRequestContextsWithTags(request, requestContexts, apiMethod, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (assert.deepStrictEqual(requestContexts, updatedContexts)) {
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
return auth.server.doAuth(request, log,
|
return auth.server.doAuth(request, log,
|
||||||
(err, userInfo, tagAuthResults) => cb(err, tagAuthResults), 's3', updatedContexts);
|
(err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
tagConditionKeyAuth,
|
tagConditionKeyAuth,
|
||||||
updateRequestContexts,
|
updateRequestContextsWithTags,
|
||||||
makeTagQuery,
|
makeTagQuery,
|
||||||
};
|
};
|
||||||
|
|
|
@ -41,7 +41,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
||||||
usersBucket : oldUsersBucket;
|
usersBucket : oldUsersBucket;
|
||||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||||
omVal, {}, log, err => {
|
omVal, {}, log, err => {
|
||||||
if (err?.is.NoSuchBucket) {
|
if (err?.is?.NoSuchBucket) {
|
||||||
// There must be no usersBucket so createBucket
|
// There must be no usersBucket so createBucket
|
||||||
// one using the new format
|
// one using the new format
|
||||||
log.trace('users bucket does not exist, ' +
|
log.trace('users bucket does not exist, ' +
|
||||||
|
@ -61,8 +61,8 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
||||||
// from getting a BucketAlreadyExists
|
// from getting a BucketAlreadyExists
|
||||||
// error with respect
|
// error with respect
|
||||||
// to the usersBucket.
|
// to the usersBucket.
|
||||||
if (err &&
|
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||||
!err.is.BucketAlreadyExists) {
|
if (err && !err.BucketAlreadyExists) {
|
||||||
log.error('error from metadata', {
|
log.error('error from metadata', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
|
@ -223,7 +223,8 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
},
|
},
|
||||||
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
if (err?.is.NoSuchBucket) {
|
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||||
|
if (err && err.NoSuchBucket) {
|
||||||
return callback(null, 'NoBucketYet');
|
return callback(null, 'NoBucketYet');
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -16,14 +16,15 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||||
`${mpuBucketPrefix}${destinationBucketName}`;
|
`${mpuBucketPrefix}${destinationBucketName}`;
|
||||||
return metadata.deleteBucket(mpuBucketName, log, err => {
|
return metadata.deleteBucket(mpuBucketName, log, err => {
|
||||||
// If the mpu bucket does not exist, just move on
|
// If the mpu bucket does not exist, just move on
|
||||||
if (err?.is.NoSuchBucket) {
|
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||||
|
if (err && err.NoSuchBucket) {
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
return cb(err);
|
return cb(err);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
|
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
|
||||||
async.mapLimit(mpus, 1, (mpu, next) => {
|
async.mapLimit(mpus, 1, (mpu, next) => {
|
||||||
const splitterChar = mpu.key.includes(oldSplitter) ?
|
const splitterChar = mpu.key.includes(oldSplitter) ?
|
||||||
oldSplitter : splitter;
|
oldSplitter : splitter;
|
||||||
|
@ -39,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
|
||||||
byteLength: partSizeSum,
|
byteLength: partSizeSum,
|
||||||
});
|
});
|
||||||
next(err);
|
next(err);
|
||||||
});
|
}, request);
|
||||||
}, cb);
|
}, cb);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -48,11 +49,13 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
|
||||||
* @param {object} bucketMD - bucket attributes/metadata
|
* @param {object} bucketMD - bucket attributes/metadata
|
||||||
* @param {string} bucketName - bucket in which objectMetadata is stored
|
* @param {string} bucketName - bucket in which objectMetadata is stored
|
||||||
* @param {string} canonicalID - account canonicalID of requester
|
* @param {string} canonicalID - account canonicalID of requester
|
||||||
|
* @param {object} request - request object given by router
|
||||||
|
* including normalized headers
|
||||||
* @param {object} log - Werelogs logger
|
* @param {object} log - Werelogs logger
|
||||||
* @param {function} cb - callback from async.waterfall in bucketDelete
|
* @param {function} cb - callback from async.waterfall in bucketDelete
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
|
||||||
log.trace('deleting bucket from metadata');
|
log.trace('deleting bucket from metadata');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
assert.strictEqual(typeof canonicalID, 'string');
|
assert.strictEqual(typeof canonicalID, 'string');
|
||||||
|
@ -99,7 +102,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
}
|
}
|
||||||
if (objectsListRes.Contents.length) {
|
if (objectsListRes.Contents.length) {
|
||||||
return _deleteOngoingMPUs(authInfo, bucketName,
|
return _deleteOngoingMPUs(authInfo, bucketName,
|
||||||
bucketMD, objectsListRes.Contents, log, err => {
|
bucketMD, objectsListRes.Contents, request, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,9 @@ function bucketShield(bucket, requestType) {
|
||||||
// Otherwise return an error to the client
|
// Otherwise return an error to the client
|
||||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||||
(requestType !== 'objectPut' &&
|
(requestType !== 'objectPut' &&
|
||||||
|
requestType !== 'initiateMultipartUpload' &&
|
||||||
|
requestType !== 'objectPutPart' &&
|
||||||
|
requestType !== 'completeMultipartUpload' &&
|
||||||
requestType !== 'bucketPutACL' &&
|
requestType !== 'bucketPutACL' &&
|
||||||
requestType !== 'bucketDelete')) {
|
requestType !== 'bucketDelete')) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -20,7 +20,8 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
oldSplitter, bucketName);
|
oldSplitter, bucketName);
|
||||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||||
{}, log, error => {
|
{}, log, error => {
|
||||||
if (error && !error.is.NoSuchKey) {
|
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||||
|
if (error && !error.NoSuchKey) {
|
||||||
log.error('from metadata while deleting user bucket',
|
log.error('from metadata while deleting user bucket',
|
||||||
{ error });
|
{ error });
|
||||||
return cb(error);
|
return cb(error);
|
||||||
|
|
|
@ -3,7 +3,7 @@ const async = require('async');
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const locationConstraintCheck = require('../object/locationConstraintCheck');
|
const locationConstraintCheck = require('../object/locationConstraintCheck');
|
||||||
const { metadataValidateBucketAndObj } =
|
const { standardMetadataValidateBucketAndObj } =
|
||||||
require('../../../metadata/metadataUtils');
|
require('../../../metadata/metadataUtils');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
preciseRequestType: 'multipartDelete',
|
preciseRequestType: request.apiMethods || 'multipartDelete',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
|
@ -22,10 +22,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
// but the requestType is the more general 'objectDelete'
|
// but the requestType is the more general 'objectDelete'
|
||||||
const metadataValParams = Object.assign({}, metadataValMPUparams);
|
const metadataValParams = Object.assign({}, metadataValMPUparams);
|
||||||
metadataValParams.requestType = 'objectPut';
|
metadataValParams.requestType = 'objectPut';
|
||||||
|
const authzIdentityResult = request ? request.actionImplicitDenies : false;
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function checkDestBucketVal(next) {
|
function checkDestBucketVal(next) {
|
||||||
metadataValidateBucketAndObj(metadataValParams, log,
|
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
|
@ -56,9 +57,14 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
|
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
|
||||||
next) {
|
next) {
|
||||||
const location = mpuOverviewObj.controllingLocationConstraint;
|
const location = mpuOverviewObj.controllingLocationConstraint;
|
||||||
|
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
delete request.actionImplicitDenies;
|
||||||
return data.abortMPU(objectKey, uploadId, location, bucketName,
|
return data.abortMPU(objectKey, uploadId, location, bucketName,
|
||||||
request, destBucket, locationConstraintCheck, log,
|
request, destBucket, locationConstraintCheck, log,
|
||||||
(err, skipDataDelete) => {
|
(err, skipDataDelete) => {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
request.actionImplicitDenies = originalIdentityAuthzResults;
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,247 @@
|
||||||
|
/*
|
||||||
|
* Code based on Yutaka Oishi (Fujifilm) contributions
|
||||||
|
* Date: 11 Sep 2020
|
||||||
|
*/
|
||||||
|
const { ObjectMDArchive } = require('arsenal').models;
|
||||||
|
const errors = require('arsenal').errors;
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const { locationConstraints } = config;
|
||||||
|
|
||||||
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get response header "x-amz-restore"
|
||||||
|
* Be called by objectHead.js
|
||||||
|
* @param {object} objMD - object's metadata
|
||||||
|
* @returns {string|undefined} x-amz-restore
|
||||||
|
*/
|
||||||
|
function getAmzRestoreResHeader(objMD) {
|
||||||
|
if (objMD.archive &&
|
||||||
|
objMD.archive.restoreRequestedAt &&
|
||||||
|
!objMD.archive.restoreCompletedAt) {
|
||||||
|
// Avoid race condition by relying on the `archive` MD of the object
|
||||||
|
// and return the right header after a RESTORE request.
|
||||||
|
// eslint-disable-next-line
|
||||||
|
return `ongoing-request="true"`;
|
||||||
|
}
|
||||||
|
if (objMD['x-amz-restore']) {
|
||||||
|
if (objMD['x-amz-restore']['expiry-date']) {
|
||||||
|
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
|
||||||
|
// eslint-disable-next-line
|
||||||
|
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if restore can be done.
|
||||||
|
*
|
||||||
|
* @param {ObjectMD} objectMD - object metadata
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
||||||
|
*/
|
||||||
|
function _validateStartRestore(objectMD, log) {
|
||||||
|
if (objectMD.archive?.restoreCompletedAt) {
|
||||||
|
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
||||||
|
// return InvalidObjectState error if the restored object is expired
|
||||||
|
// but restore info md of this object has not yet been cleared
|
||||||
|
log.debug('The restored object already expired.',
|
||||||
|
{
|
||||||
|
archive: objectMD.archive,
|
||||||
|
method: '_validateStartRestore',
|
||||||
|
});
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If object is already restored, no further check is needed
|
||||||
|
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
|
||||||
|
// been reset.
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
||||||
|
if (!isLocationCold) {
|
||||||
|
// return InvalidObjectState error if the object is not in cold storage,
|
||||||
|
// not in cold storage means either location cold flag not exists or cold flag is explicit false
|
||||||
|
log.debug('The bucket of the object is not in a cold storage location.',
|
||||||
|
{
|
||||||
|
isLocationCold,
|
||||||
|
method: '_validateStartRestore',
|
||||||
|
});
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
if (objectMD.archive?.restoreRequestedAt) {
|
||||||
|
// return RestoreAlreadyInProgress error if the object is currently being restored
|
||||||
|
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
||||||
|
log.debug('The object is currently being restored.',
|
||||||
|
{
|
||||||
|
archive: objectMD.archive,
|
||||||
|
method: '_validateStartRestore',
|
||||||
|
});
|
||||||
|
return errors.RestoreAlreadyInProgress;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if "put version id" is allowed
|
||||||
|
*
|
||||||
|
* @param {ObjectMD} objMD - object metadata
|
||||||
|
* @param {string} versionId - object's version id
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
|
||||||
|
*/
|
||||||
|
function validatePutVersionId(objMD, versionId, log) {
|
||||||
|
if (!objMD) {
|
||||||
|
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||||
|
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objMD.isDeleteMarker) {
|
||||||
|
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
|
||||||
|
return errors.MethodNotAllowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
|
||||||
|
if (!isLocationCold) {
|
||||||
|
log.error('The object data is not stored in a cold storage location.',
|
||||||
|
{
|
||||||
|
isLocationCold,
|
||||||
|
dataStoreName: objMD.dataStoreName,
|
||||||
|
method: 'validatePutVersionId',
|
||||||
|
});
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure object archive restoration is in progress
|
||||||
|
// NOTE: we do not use putObjectVersion to update the restoration period.
|
||||||
|
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|
||||||
|
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
|
||||||
|
log.error('object archive restoration is not in progress',
|
||||||
|
{ method: 'validatePutVersionId', versionId });
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the object is already restored, and update the expiration date accordingly:
|
||||||
|
* > After restoring an archived object, you can update the restoration period by reissuing the
|
||||||
|
* > request with a new period. Amazon S3 updates the restoration period relative to the current
|
||||||
|
* > time.
|
||||||
|
*
|
||||||
|
* @param {ObjectMD} objectMD - object metadata
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if the object is already restored
|
||||||
|
*/
|
||||||
|
function _updateObjectExpirationDate(objectMD, log) {
|
||||||
|
// Check if restoreCompletedAt field exists
|
||||||
|
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
|
||||||
|
// checked earlier in the process, so checking again here would create weird states
|
||||||
|
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
|
||||||
|
log.debug('The restore status of the object.', {
|
||||||
|
isObjectAlreadyRestored,
|
||||||
|
method: 'isObjectAlreadyRestored'
|
||||||
|
});
|
||||||
|
if (isObjectAlreadyRestored) {
|
||||||
|
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
||||||
|
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
||||||
|
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
objectMD.archive.restoreWillExpireAt = expiryDate;
|
||||||
|
objectMD['x-amz-restore'] = {
|
||||||
|
'ongoing-request': false,
|
||||||
|
'expiry-date': expiryDate,
|
||||||
|
};
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
}
|
||||||
|
return isObjectAlreadyRestored;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* update restore expiration date.
|
||||||
|
*
|
||||||
|
* @param {ObjectMD} objectMD - objectMD instance
|
||||||
|
* @param {object} restoreParam - restore param
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {ArsenalError|undefined} internal error if object MD is not valid
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
function _updateRestoreInfo(objectMD, restoreParam, log) {
|
||||||
|
if (!objectMD.archive) {
|
||||||
|
log.debug('objectMD.archive doesn\'t exits', {
|
||||||
|
objectMD,
|
||||||
|
method: '_updateRestoreInfo'
|
||||||
|
});
|
||||||
|
return errors.InternalError.customizeDescription('Archive metadata is missing.');
|
||||||
|
}
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
objectMD.archive.restoreRequestedAt = new Date();
|
||||||
|
objectMD.archive.restoreRequestedDays = restoreParam.days;
|
||||||
|
objectMD.originOp = 's3:ObjectRestore:Post';
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
if (!ObjectMDArchive.isValid(objectMD.archive)) {
|
||||||
|
log.debug('archive is not valid', {
|
||||||
|
archive: objectMD.archive,
|
||||||
|
method: '_updateRestoreInfo'
|
||||||
|
});
|
||||||
|
return errors.InternalError.customizeDescription('Invalid archive metadata.');
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* start to restore object.
|
||||||
|
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
|
||||||
|
* calculate restore expiry-date and add it to objectMD.
|
||||||
|
* Be called by objectRestore.js
|
||||||
|
*
|
||||||
|
* @param {ObjectMD} objectMD - objectMd instance
|
||||||
|
* @param {object} restoreParam - bucket name
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @param {function} cb - bucket name
|
||||||
|
* @return {undefined}
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
function startRestore(objectMD, restoreParam, log, cb) {
|
||||||
|
log.info('Validating if restore can be done or not.');
|
||||||
|
const checkResultError = _validateStartRestore(objectMD, log);
|
||||||
|
if (checkResultError) {
|
||||||
|
return cb(checkResultError);
|
||||||
|
}
|
||||||
|
log.info('Updating restore information.');
|
||||||
|
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
|
||||||
|
if (updateResultError) {
|
||||||
|
return cb(updateResultError);
|
||||||
|
}
|
||||||
|
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
|
||||||
|
return cb(null, isObjectAlreadyRestored);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checks if object data is available or if it's in cold storage
|
||||||
|
* @param {ObjectMD} objMD Object metadata
|
||||||
|
* @returns {ArsenalError|null} error if object data is not available
|
||||||
|
*/
|
||||||
|
function verifyColdObjectAvailable(objMD) {
|
||||||
|
// return error when object is cold
|
||||||
|
if (objMD.archive &&
|
||||||
|
// Object is in cold backend
|
||||||
|
(!objMD.archive.restoreRequestedAt ||
|
||||||
|
// Object is being restored
|
||||||
|
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
|
||||||
|
const err = errors.InvalidObjectState
|
||||||
|
.customizeDescription('The operation is not valid for the object\'s storage class');
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
startRestore,
|
||||||
|
getAmzRestoreResHeader,
|
||||||
|
validatePutVersionId,
|
||||||
|
verifyColdObjectAvailable,
|
||||||
|
};
|
|
@ -5,10 +5,9 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
const logger = require('../../../utilities/logger');
|
|
||||||
const { dataStore } = require('./storeObject');
|
const { dataStore } = require('./storeObject');
|
||||||
const locationConstraintCheck = require('./locationConstraintCheck');
|
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||||
const { versioningPreprocessing } = require('./versioning');
|
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
||||||
const removeAWSChunked = require('./removeAWSChunked');
|
const removeAWSChunked = require('./removeAWSChunked');
|
||||||
const getReplicationInfo = require('./getReplicationInfo');
|
const getReplicationInfo = require('./getReplicationInfo');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
@ -21,7 +20,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
||||||
'a versioned object to a location-constraint of type Azure or GCP.';
|
'a versioned object to a location-constraint of type Azure or GCP.';
|
||||||
|
|
||||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
|
||||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||||
cipherBundle, metadataStoreParams, (err, result) => {
|
cipherBundle, metadataStoreParams, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -31,7 +30,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||||
dataGetInfo[0].dataStoreName : null;
|
dataGetInfo[0].dataStoreName : null;
|
||||||
return data.batchDelete(dataToDelete, requestMethod,
|
return data.batchDelete(dataToDelete, requestMethod,
|
||||||
newDataStoreName, deleteLog, err => callback(err, result));
|
newDataStoreName, log, err => callback(err, result));
|
||||||
}
|
}
|
||||||
return callback(null, result);
|
return callback(null, result);
|
||||||
});
|
});
|
||||||
|
@ -51,7 +50,9 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
||||||
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
||||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
|
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
||||||
* @param {RequestLogger} log - logger instance
|
* @param {RequestLogger} log - logger instance
|
||||||
|
* @param {string} originOp - Origin operation
|
||||||
* @param {function} callback - callback function
|
* @param {function} callback - callback function
|
||||||
* @return {undefined} and call callback with (err, result) -
|
* @return {undefined} and call callback with (err, result) -
|
||||||
* result.contentMD5 - content md5 of new object or version
|
* result.contentMD5 - content md5 of new object or version
|
||||||
|
@ -59,7 +60,10 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
*/
|
*/
|
||||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
log, callback) {
|
overheadField, log, originOp, callback) {
|
||||||
|
const putVersionId = request.headers['x-scal-s3-version-id'];
|
||||||
|
const isPutVersion = putVersionId || putVersionId === '';
|
||||||
|
|
||||||
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||||
// although the request method may actually be 'DELETE' if creating a
|
// although the request method may actually be 'DELETE' if creating a
|
||||||
// delete marker, for our purposes we consider this to be a 'PUT'
|
// delete marker, for our purposes we consider this to be a 'PUT'
|
||||||
|
@ -112,6 +116,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
isDeleteMarker,
|
isDeleteMarker,
|
||||||
replicationInfo: getReplicationInfo(
|
replicationInfo: getReplicationInfo(
|
||||||
objectKey, bucketMD, false, size, null, null, authInfo),
|
objectKey, bucketMD, false, size, null, null, authInfo),
|
||||||
|
overheadField,
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -129,7 +134,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (!isDeleteMarker) {
|
if (!isDeleteMarker) {
|
||||||
metadataStoreParams.contentType = request.headers['content-type'];
|
metadataStoreParams.contentType = request.headers['content-type'];
|
||||||
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||||
|
@ -139,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
removeAWSChunked(request.headers['content-encoding']);
|
removeAWSChunked(request.headers['content-encoding']);
|
||||||
metadataStoreParams.expires = request.headers.expires;
|
metadataStoreParams.expires = request.headers.expires;
|
||||||
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||||
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
|
metadataStoreParams.originOp = originOp;
|
||||||
const defaultObjectLockConfiguration
|
const defaultObjectLockConfiguration
|
||||||
= bucketMD.getObjectLockConfiguration();
|
= bucketMD.getObjectLockConfiguration();
|
||||||
if (defaultObjectLockConfiguration) {
|
if (defaultObjectLockConfiguration) {
|
||||||
|
@ -154,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.headers[constants.objectLocationConstraintHeader] =
|
request.headers[constants.objectLocationConstraintHeader] =
|
||||||
objMD[constants.objectLocationConstraintHeader];
|
objMD[constants.objectLocationConstraintHeader];
|
||||||
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
|
metadataStoreParams.originOp = originOp;
|
||||||
}
|
}
|
||||||
|
|
||||||
const backendInfoObj =
|
const backendInfoObj =
|
||||||
|
@ -185,14 +189,17 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (objMD && objMD.uploadId) {
|
||||||
|
metadataStoreParams.oldReplayId = objMD.uploadId;
|
||||||
|
}
|
||||||
|
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
const dontSkipBackend = externalBackends;
|
const dontSkipBackend = externalBackends;
|
||||||
/* eslint-enable camelcase */
|
/* eslint-enable camelcase */
|
||||||
|
|
||||||
const requestLogger =
|
|
||||||
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
|
||||||
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
||||||
const mdOnlySize = request.headers['x-amz-meta-size'];
|
const mdOnlySize = request.headers['x-amz-meta-size'];
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function storeData(next) {
|
function storeData(next) {
|
||||||
if (size === 0) {
|
if (size === 0) {
|
||||||
|
@ -257,6 +264,11 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
return next(null, dataGetInfoArr);
|
return next(null, dataGetInfoArr);
|
||||||
},
|
},
|
||||||
function getVersioningInfo(infoArr, next) {
|
function getVersioningInfo(infoArr, next) {
|
||||||
|
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
|
||||||
|
if (isPutVersion) {
|
||||||
|
const options = overwritingVersioning(objMD, metadataStoreParams);
|
||||||
|
return process.nextTick(() => next(null, options, infoArr));
|
||||||
|
}
|
||||||
return versioningPreprocessing(bucketName, bucketMD,
|
return versioningPreprocessing(bucketName, bucketMD,
|
||||||
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -276,11 +288,13 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
metadataStoreParams.versionId = options.versionId;
|
metadataStoreParams.versionId = options.versionId;
|
||||||
metadataStoreParams.versioning = options.versioning;
|
metadataStoreParams.versioning = options.versioning;
|
||||||
metadataStoreParams.isNull = options.isNull;
|
metadataStoreParams.isNull = options.isNull;
|
||||||
metadataStoreParams.nullVersionId = options.nullVersionId;
|
metadataStoreParams.deleteNullKey = options.deleteNullKey;
|
||||||
metadataStoreParams.nullUploadId = options.nullUploadId;
|
if (options.extraMD) {
|
||||||
|
Object.assign(metadataStoreParams, options.extraMD);
|
||||||
|
}
|
||||||
return _storeInMDandDeleteData(bucketName, infoArr,
|
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||||
cipherBundle, metadataStoreParams,
|
cipherBundle, metadataStoreParams,
|
||||||
options.dataToDelete, requestLogger, requestMethod, next);
|
options.dataToDelete, log, requestMethod, next);
|
||||||
},
|
},
|
||||||
], callback);
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
/**
|
||||||
|
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
|
||||||
|
* @param {BucketInfo} bucket - bucket object
|
||||||
|
* @return {boolean} whether objects require oplog updates on deletion, or not
|
||||||
|
*/
|
||||||
|
function _bucketRequiresOplogUpdate(bucket) {
|
||||||
|
// Default behavior is to require an oplog update
|
||||||
|
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// If the bucket has lifecycle configuration or notification configuration
|
||||||
|
// set, we also require an oplog update
|
||||||
|
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
_bucketRequiresOplogUpdate,
|
||||||
|
};
|
|
@ -4,23 +4,25 @@ const {
|
||||||
LifecycleDateTime,
|
LifecycleDateTime,
|
||||||
LifecycleUtils,
|
LifecycleUtils,
|
||||||
} = require('arsenal').s3middleware.lifecycleHelpers;
|
} = require('arsenal').s3middleware.lifecycleHelpers;
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
const {
|
||||||
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
expireOneDayEarlier,
|
||||||
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
transitionOneDayEarlier,
|
||||||
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
timeProgressionFactor,
|
||||||
|
scaledMsPerDay,
|
||||||
|
} = config.getTimeOptions();
|
||||||
|
|
||||||
const lifecycleDateTime = new LifecycleDateTime({
|
const lifecycleDateTime = new LifecycleDateTime({
|
||||||
transitionOneDayEarlier,
|
transitionOneDayEarlier,
|
||||||
expireOneDayEarlier,
|
expireOneDayEarlier,
|
||||||
|
timeProgressionFactor,
|
||||||
});
|
});
|
||||||
|
|
||||||
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
|
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
|
||||||
|
|
||||||
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
|
||||||
|
|
||||||
function calculateDate(objDate, expDays, datetime) {
|
function calculateDate(objDate, expDays, datetime) {
|
||||||
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
|
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
|
||||||
}
|
}
|
||||||
|
|
||||||
function formatExpirationHeader(date, id) {
|
function formatExpirationHeader(date, id) {
|
||||||
|
@ -37,8 +39,10 @@ const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
|
||||||
|
|
||||||
function _generateExpHeadersObjects(rules, params, datetime) {
|
function _generateExpHeadersObjects(rules, params, datetime) {
|
||||||
const tags = {
|
const tags = {
|
||||||
TagSet: Object.keys(params.tags)
|
TagSet: params.tags
|
||||||
.map(key => ({ Key: key, Value: params.tags[key] })),
|
? Object.keys(params.tags)
|
||||||
|
.map(key => ({ Key: key, Value: params.tags[key] }))
|
||||||
|
: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
const objectInfo = { Key: params.key };
|
const objectInfo = { Key: params.key };
|
||||||
|
|
|
@ -23,12 +23,12 @@ function _getStorageClasses(rule) {
|
||||||
}
|
}
|
||||||
const { replicationEndpoints } = s3config;
|
const { replicationEndpoints } = s3config;
|
||||||
// If no storage class, use the given default endpoint or the sole endpoint
|
// If no storage class, use the given default endpoint or the sole endpoint
|
||||||
if (replicationEndpoints.length > 1) {
|
if (replicationEndpoints.length > 0) {
|
||||||
const endPoint =
|
const endPoint =
|
||||||
replicationEndpoints.find(endpoint => endpoint.default);
|
replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0];
|
||||||
return [endPoint.site];
|
return [endPoint.site];
|
||||||
}
|
}
|
||||||
return [replicationEndpoints[0].site];
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
||||||
|
@ -36,6 +36,9 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
||||||
const storageTypes = [];
|
const storageTypes = [];
|
||||||
const backends = [];
|
const backends = [];
|
||||||
const storageClasses = _getStorageClasses(rule);
|
const storageClasses = _getStorageClasses(rule);
|
||||||
|
if (!storageClasses) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
storageClasses.forEach(storageClass => {
|
storageClasses.forEach(storageClass => {
|
||||||
const storageClassName =
|
const storageClassName =
|
||||||
storageClass.endsWith(':preferred_read') ?
|
storageClass.endsWith(':preferred_read') ?
|
||||||
|
|
|
@ -0,0 +1,190 @@
|
||||||
|
const { versioning } = require('arsenal');
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
const { lifecycleListing } = require('../../../../constants');
|
||||||
|
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
|
||||||
|
|
||||||
|
function _makeTags(tags) {
|
||||||
|
const res = [];
|
||||||
|
Object.entries(tags).forEach(([key, value]) =>
|
||||||
|
res.push(
|
||||||
|
{
|
||||||
|
Key: key,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
|
||||||
|
const data = {
|
||||||
|
Name: bucketName,
|
||||||
|
Prefix: listParams.prefix,
|
||||||
|
MaxKeys: listParams.maxKeys,
|
||||||
|
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
||||||
|
IsTruncated: !!list.IsTruncated,
|
||||||
|
Marker: listParams.marker,
|
||||||
|
BeforeDate: listParams.beforeDate,
|
||||||
|
NextMarker: list.NextMarker,
|
||||||
|
Contents: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
list.Contents.forEach(item => {
|
||||||
|
const v = item.value;
|
||||||
|
|
||||||
|
const content = {
|
||||||
|
Key: item.key,
|
||||||
|
LastModified: v.LastModified,
|
||||||
|
ETag: `"${v.ETag}"`,
|
||||||
|
Size: v.Size,
|
||||||
|
Owner: {
|
||||||
|
ID: v.Owner.ID,
|
||||||
|
DisplayName: v.Owner.DisplayName,
|
||||||
|
},
|
||||||
|
StorageClass: v.StorageClass,
|
||||||
|
TagSet: _makeTags(v.tags),
|
||||||
|
IsLatest: true, // for compatibility with AWS ListObjectVersions.
|
||||||
|
DataStoreName: v.dataStoreName,
|
||||||
|
ListType: CURRENT_TYPE,
|
||||||
|
};
|
||||||
|
|
||||||
|
// NOTE: The current versions listed to be lifecycle should include version id
|
||||||
|
// if the bucket is versioned.
|
||||||
|
if (isBucketVersioned) {
|
||||||
|
const versionId = (v.IsNull || v.VersionId === undefined) ?
|
||||||
|
'null' : versionIdUtils.encode(v.VersionId);
|
||||||
|
content.VersionId = versionId;
|
||||||
|
}
|
||||||
|
|
||||||
|
data.Contents.push(content);
|
||||||
|
});
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
function _encodeVersionId(vid) {
|
||||||
|
let versionId = vid;
|
||||||
|
if (versionId && versionId !== 'null') {
|
||||||
|
versionId = versionIdUtils.encode(versionId);
|
||||||
|
}
|
||||||
|
return versionId;
|
||||||
|
}
|
||||||
|
|
||||||
|
function processNonCurrents(bucketName, listParams, list) {
|
||||||
|
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
|
||||||
|
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
|
||||||
|
|
||||||
|
const data = {
|
||||||
|
Name: bucketName,
|
||||||
|
Prefix: listParams.prefix,
|
||||||
|
MaxKeys: listParams.maxKeys,
|
||||||
|
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
||||||
|
IsTruncated: !!list.IsTruncated,
|
||||||
|
KeyMarker: listParams.keyMarker,
|
||||||
|
VersionIdMarker: versionIdMarker,
|
||||||
|
BeforeDate: listParams.beforeDate,
|
||||||
|
NextKeyMarker: list.NextKeyMarker,
|
||||||
|
NextVersionIdMarker: nextVersionIdMarker,
|
||||||
|
Contents: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
list.Contents.forEach(item => {
|
||||||
|
const v = item.value;
|
||||||
|
const versionId = (v.IsNull || v.VersionId === undefined) ?
|
||||||
|
'null' : versionIdUtils.encode(v.VersionId);
|
||||||
|
|
||||||
|
const content = {
|
||||||
|
Key: item.key,
|
||||||
|
LastModified: v.LastModified,
|
||||||
|
ETag: `"${v.ETag}"`,
|
||||||
|
Size: v.Size,
|
||||||
|
Owner: {
|
||||||
|
ID: v.Owner.ID,
|
||||||
|
DisplayName: v.Owner.DisplayName,
|
||||||
|
},
|
||||||
|
StorageClass: v.StorageClass,
|
||||||
|
TagSet: _makeTags(v.tags),
|
||||||
|
staleDate: v.staleDate, // lowerCamelCase to be compatible with existing lifecycle.
|
||||||
|
VersionId: versionId,
|
||||||
|
DataStoreName: v.dataStoreName,
|
||||||
|
ListType: NON_CURRENT_TYPE,
|
||||||
|
};
|
||||||
|
|
||||||
|
data.Contents.push(content);
|
||||||
|
});
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
function processOrphans(bucketName, listParams, list) {
|
||||||
|
const data = {
|
||||||
|
Name: bucketName,
|
||||||
|
Prefix: listParams.prefix,
|
||||||
|
MaxKeys: listParams.maxKeys,
|
||||||
|
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
||||||
|
IsTruncated: !!list.IsTruncated,
|
||||||
|
Marker: listParams.marker,
|
||||||
|
BeforeDate: listParams.beforeDate,
|
||||||
|
NextMarker: list.NextMarker,
|
||||||
|
Contents: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
list.Contents.forEach(item => {
|
||||||
|
const v = item.value;
|
||||||
|
const versionId = (v.IsNull || v.VersionId === undefined) ?
|
||||||
|
'null' : versionIdUtils.encode(v.VersionId);
|
||||||
|
data.Contents.push({
|
||||||
|
Key: item.key,
|
||||||
|
LastModified: v.LastModified,
|
||||||
|
Owner: {
|
||||||
|
ID: v.Owner.ID,
|
||||||
|
DisplayName: v.Owner.DisplayName,
|
||||||
|
},
|
||||||
|
VersionId: versionId,
|
||||||
|
IsLatest: true, // for compatibility with AWS ListObjectVersions.
|
||||||
|
ListType: ORPHAN_DM_TYPE,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getLocationConstraintErrorMessage(locationName) {
|
||||||
|
return 'value of the location you are attempting to set ' +
|
||||||
|
`- ${locationName} - is not listed in the locationConstraint config`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
|
||||||
|
*
|
||||||
|
* @param {object} params - Query parameters
|
||||||
|
* @param {object} config - CloudServer configuration
|
||||||
|
* @param {number} min - Minimum number of entries to be scanned
|
||||||
|
* @returns {Object} - An object indicating the validation result:
|
||||||
|
* - isValid (boolean): Whether the validation is successful.
|
||||||
|
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
|
||||||
|
*/
|
||||||
|
function validateMaxScannedEntries(params, config, min) {
|
||||||
|
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
||||||
|
|
||||||
|
if (params['max-scanned-lifecycle-listing-entries']) {
|
||||||
|
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
|
||||||
|
|
||||||
|
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
|
||||||
|
maxEntriesParams > maxScannedLifecycleListingEntries) {
|
||||||
|
return { isValid: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
maxScannedLifecycleListingEntries = maxEntriesParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { isValid: true, maxScannedLifecycleListingEntries };
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
processCurrents,
|
||||||
|
processNonCurrents,
|
||||||
|
processOrphans,
|
||||||
|
getLocationConstraintErrorMessage,
|
||||||
|
validateMaxScannedEntries,
|
||||||
|
};
|
|
@ -1,5 +1,11 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors, auth, policies } = require('arsenal');
|
||||||
const moment = require('moment');
|
const moment = require('moment');
|
||||||
|
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const vault = require('../../../auth/vault');
|
||||||
|
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
||||||
|
|
||||||
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
/**
|
/**
|
||||||
* Calculates retain until date for the locked object version
|
* Calculates retain until date for the locked object version
|
||||||
* @param {object} retention - includes days or years retention period
|
* @param {object} retention - includes days or years retention period
|
||||||
|
@ -15,8 +21,9 @@ function calculateRetainUntilDate(retention) {
|
||||||
const date = moment();
|
const date = moment();
|
||||||
// Calculate the number of days to retain the lock on the object
|
// Calculate the number of days to retain the lock on the object
|
||||||
const retainUntilDays = days || years * 365;
|
const retainUntilDays = days || years * 365;
|
||||||
|
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
|
||||||
const retainUntilDate
|
const retainUntilDate
|
||||||
= date.add(retainUntilDays, 'days');
|
= date.add(retainUntilDaysInMs, 'ms');
|
||||||
return retainUntilDate.toISOString();
|
return retainUntilDate.toISOString();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -43,7 +50,7 @@ function validateHeaders(bucket, headers, log) {
|
||||||
!(objectLockMode && objectLockDate)) {
|
!(objectLockMode && objectLockDate)) {
|
||||||
return errors.InvalidArgument.customizeDescription(
|
return errors.InvalidArgument.customizeDescription(
|
||||||
'x-amz-object-lock-retain-until-date and ' +
|
'x-amz-object-lock-retain-until-date and ' +
|
||||||
'x-amz-object-lock-mode must both be supplied'
|
'x-amz-object-lock-mode must both be supplied',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
||||||
|
@ -126,73 +133,216 @@ function setObjectLockInformation(headers, md, defaultRetention) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* isObjectLocked - checks whether object is locked or not
|
* Helper class for check object lock state checks
|
||||||
* @param {obect} bucket - bucket metadata
|
|
||||||
* @param {object} objectMD - object metadata
|
|
||||||
* @param {array} headers - request headers
|
|
||||||
* @return {boolean} - indicates whether object is locked or not
|
|
||||||
*/
|
*/
|
||||||
function isObjectLocked(bucket, objectMD, headers) {
|
class ObjectLockInfo {
|
||||||
if (bucket.isObjectLockEnabled()) {
|
/**
|
||||||
const objectLegalHold = objectMD.legalHold;
|
*
|
||||||
if (objectLegalHold) {
|
* @param {object} retentionInfo - The object lock retention policy
|
||||||
|
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
|
||||||
|
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
|
||||||
|
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
|
||||||
|
*/
|
||||||
|
constructor(retentionInfo) {
|
||||||
|
this.mode = retentionInfo.mode || null;
|
||||||
|
this.date = retentionInfo.date || null;
|
||||||
|
this.legalHold = retentionInfo.legalHold || false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.isLocked
|
||||||
|
* @returns {bool} - Whether the retention policy is active and protecting the object
|
||||||
|
*/
|
||||||
|
isLocked() {
|
||||||
|
if (this.legalHold) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const retentionMode = objectMD.retentionMode;
|
|
||||||
const retentionDate = objectMD.retentionDate;
|
if (!this.mode || !this.date) {
|
||||||
if (!retentionMode || !retentionDate) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (retentionMode === 'GOVERNANCE' &&
|
|
||||||
headers['x-amz-bypass-governance-retention']) {
|
return !this.isExpired();
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
const objectDate = moment(retentionDate);
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.isGovernanceMode
|
||||||
|
* @returns {bool} - true if retention mode is GOVERNANCE
|
||||||
|
*/
|
||||||
|
isGovernanceMode() {
|
||||||
|
return this.mode === 'GOVERNANCE';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.isComplianceMode
|
||||||
|
* @returns {bool} - True if retention mode is COMPLIANCE
|
||||||
|
*/
|
||||||
|
isComplianceMode() {
|
||||||
|
return this.mode === 'COMPLIANCE';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.isExpired
|
||||||
|
* @returns {bool} - True if the retention policy has expired
|
||||||
|
*/
|
||||||
|
isExpired() {
|
||||||
const now = moment();
|
const now = moment();
|
||||||
// indicates retain until date has expired
|
return this.date === null || now.isSameOrAfter(this.date);
|
||||||
if (now.isSameOrAfter(objectDate)) {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.isExtended
|
||||||
|
* @param {string} timestamp - Timestamp in ISO-8601 format
|
||||||
|
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
|
||||||
|
*/
|
||||||
|
isExtended(timestamp) {
|
||||||
|
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.canModifyObject
|
||||||
|
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
|
||||||
|
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
|
||||||
|
*/
|
||||||
|
canModifyObject(hasGovernanceBypass) {
|
||||||
|
// can modify object if object is not locked
|
||||||
|
// cannot modify object in any cases if legal hold is enabled
|
||||||
|
// if no legal hold, can only modify object if bypassing governance when locked
|
||||||
|
if (!this.isLocked()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ObjectLockInfo.canModifyPolicy
|
||||||
|
* @param {object} policyChanges - Proposed changes to the retention policy
|
||||||
|
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
|
||||||
|
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
|
||||||
|
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
|
||||||
|
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
|
||||||
|
*/
|
||||||
|
canModifyPolicy(policyChanges, hasGovernanceBypass) {
|
||||||
|
// If an object does not have a retention policy or it is expired then all changes are allowed
|
||||||
|
if (!this.isLocked()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The only allowed change in compliance mode is extending the retention period
|
||||||
|
if (this.isComplianceMode()) {
|
||||||
|
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.isGovernanceMode()) {
|
||||||
|
// Extensions are always allowed in governance mode
|
||||||
|
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All other changes in governance mode require a bypass
|
||||||
|
if (hasGovernanceBypass) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
function validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance) {
|
|
||||||
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD;
|
|
||||||
if (!existingMode) {
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const existingDate = new Date(existingDateISO);
|
/**
|
||||||
const isExpired = existingDate < Date.now();
|
*
|
||||||
const isExtended = new Date(retentionInfo.date) > existingDate;
|
* @param {object} headers - s3 request headers
|
||||||
|
* @returns {bool} - True if the headers is present and === "true"
|
||||||
if (existingMode === 'GOVERNANCE' && !isExpired && !bypassGovernance) {
|
*/
|
||||||
if (retentionInfo.mode === 'GOVERNANCE' && isExtended) {
|
function hasGovernanceBypassHeader(headers) {
|
||||||
return null;
|
const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
|
||||||
}
|
return bypassHeader.toLowerCase() === 'true';
|
||||||
return errors.AccessDenied;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (existingMode === 'COMPLIANCE') {
|
|
||||||
if (retentionInfo.mode === 'GOVERNANCE' && !isExpired) {
|
|
||||||
return errors.AccessDenied;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isExtended) {
|
/**
|
||||||
return errors.AccessDenied;
|
* checkUserGovernanceBypass
|
||||||
}
|
*
|
||||||
}
|
* Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
|
||||||
|
*
|
||||||
|
* @param {object} request - Incoming s3 request
|
||||||
|
* @param {object} authInfo - s3 authentication info
|
||||||
|
* @param {object} bucketMD - bucket metadata
|
||||||
|
* @param {string} objectKey - object key
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
|
||||||
|
* @returns {undefined} -
|
||||||
|
*/
|
||||||
|
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
|
||||||
|
log.trace(
|
||||||
|
'object in GOVERNANCE mode and is user, checking for attached policies',
|
||||||
|
{ method: 'checkUserPolicyGovernanceBypass' },
|
||||||
|
);
|
||||||
|
|
||||||
return null;
|
const authParams = auth.server.extractParams(request, log, 's3', request.query);
|
||||||
|
const ip = policies.requestUtils.getClientIp(request, config);
|
||||||
|
const requestContextParams = {
|
||||||
|
constantParams: {
|
||||||
|
headers: request.headers,
|
||||||
|
query: request.query,
|
||||||
|
generalResource: bucketMD.getName(),
|
||||||
|
specificResource: { key: objectKey },
|
||||||
|
requesterIp: ip,
|
||||||
|
sslEnabled: request.connection.encrypted,
|
||||||
|
apiMethod: 'bypassGovernanceRetention',
|
||||||
|
awsService: 's3',
|
||||||
|
locationConstraint: bucketMD.getLocationConstraint(),
|
||||||
|
requesterInfo: authInfo,
|
||||||
|
signatureVersion: authParams.params.data.signatureVersion,
|
||||||
|
authType: authParams.params.data.authType,
|
||||||
|
signatureAge: authParams.params.data.signatureAge,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return vault.checkPolicies(requestContextParams,
|
||||||
|
authInfo.getArn(), log, (err, authorizationResults) => {
|
||||||
|
if (err) {
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const explicitDenyExists = authorizationResults.some(
|
||||||
|
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
|
||||||
|
if (explicitDenyExists) {
|
||||||
|
log.trace('authorization check failed for user',
|
||||||
|
{
|
||||||
|
'method': 'checkUserPolicyGovernanceBypass',
|
||||||
|
's3:BypassGovernanceRetention': false,
|
||||||
|
});
|
||||||
|
return cb(errors.AccessDenied);
|
||||||
|
}
|
||||||
|
// Convert authorization results into an easier to handle format
|
||||||
|
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
|
||||||
|
const apiMethod = authorizationResults[idx].action;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
acc[apiMethod] = curr.isImplicit;
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
// Evaluate against the bucket policies
|
||||||
|
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
|
||||||
|
bucketMD,
|
||||||
|
Object.keys(actionImplicitDenies),
|
||||||
|
authInfo.getCanonicalID(),
|
||||||
|
authInfo,
|
||||||
|
actionImplicitDenies,
|
||||||
|
log,
|
||||||
|
request);
|
||||||
|
|
||||||
|
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
calculateRetainUntilDate,
|
calculateRetainUntilDate,
|
||||||
compareObjectLockInformation,
|
compareObjectLockInformation,
|
||||||
setObjectLockInformation,
|
setObjectLockInformation,
|
||||||
isObjectLocked,
|
|
||||||
validateHeaders,
|
validateHeaders,
|
||||||
validateObjectLockUpdate,
|
hasGovernanceBypassHeader,
|
||||||
|
checkUserGovernanceBypass,
|
||||||
|
ObjectLockInfo,
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors, s3middleware } = require('arsenal');
|
||||||
|
|
||||||
|
const { allowedRestoreObjectRequestTierValues } = require('../../../../constants');
|
||||||
|
const coldStorage = require('./coldStorage');
|
||||||
|
const monitoring = require('../../../utilities/monitoringHandler');
|
||||||
|
const { pushMetric } = require('../../../utapi/utilities');
|
||||||
|
const { decodeVersionId } = require('./versioning');
|
||||||
|
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
||||||
|
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
||||||
|
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if tier is supported
|
||||||
|
* @param {object} restoreInfo - restore information
|
||||||
|
* @returns {ArsenalError|undefined} return NotImplemented error if tier not support
|
||||||
|
*/
|
||||||
|
function checkTierSupported(restoreInfo) {
|
||||||
|
if (!allowedRestoreObjectRequestTierValues.includes(restoreInfo.tier)) {
|
||||||
|
return errors.NotImplemented;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* POST Object restore process
|
||||||
|
*
|
||||||
|
* @param {MetadataWrapper} metadata - metadata wrapper
|
||||||
|
* @param {object} mdUtils - utility object to treat metadata
|
||||||
|
* @param {AuthInfo} userInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {IncomingMessage} request - request info
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback callback function
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
|
const METHOD = 'objectRestore';
|
||||||
|
|
||||||
|
const { bucketName, objectKey } = request;
|
||||||
|
|
||||||
|
log.debug('processing request', { method: METHOD });
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query',
|
||||||
|
{
|
||||||
|
method: METHOD,
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(decodedVidResult));
|
||||||
|
}
|
||||||
|
|
||||||
|
let isObjectRestored = false;
|
||||||
|
|
||||||
|
const mdValueParams = {
|
||||||
|
authInfo: userInfo,
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
versionId: decodedVidResult,
|
||||||
|
requestType: request.apiMethods || 'restoreObject',
|
||||||
|
/**
|
||||||
|
* Restoring an object might not cause any impact on
|
||||||
|
* the storage, if the object is already restored: in
|
||||||
|
* this case, the duration is extended. We disable the
|
||||||
|
* quota evaluation and trigger it manually.
|
||||||
|
*/
|
||||||
|
checkQuota: false,
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
|
||||||
|
return async.waterfall([
|
||||||
|
// get metadata of bucket and object
|
||||||
|
function validateBucketAndObject(next) {
|
||||||
|
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
|
||||||
|
log, (err, bucketMD, objectMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('request authorization failed', { method: METHOD, error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
// Call back error if object metadata could not be obtained
|
||||||
|
if (!objectMD) {
|
||||||
|
const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||||
|
log.trace('error no object metadata found', { method: METHOD, error: err });
|
||||||
|
return next(err, bucketMD);
|
||||||
|
}
|
||||||
|
// If object metadata is delete marker,
|
||||||
|
// call back NoSuchKey or MethodNotAllowed depending on specifying versionId
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
let err = errors.NoSuchKey;
|
||||||
|
if (decodedVidResult) {
|
||||||
|
err = errors.MethodNotAllowed;
|
||||||
|
}
|
||||||
|
log.trace('version is a delete marker', { method: METHOD, error: err });
|
||||||
|
return next(err, bucketMD, objectMD);
|
||||||
|
}
|
||||||
|
log.info('it acquired the object metadata.', {
|
||||||
|
'method': METHOD,
|
||||||
|
});
|
||||||
|
return next(null, bucketMD, objectMD);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
// generate restore param obj from xml of request body and check tier validity
|
||||||
|
function parseRequestXmlAndCheckTier(bucketMD, objectMD, next) {
|
||||||
|
log.trace('parsing object restore information');
|
||||||
|
return parseRestoreRequestXml(request.post, log, (err, restoreInfo) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err, bucketMD, objectMD, restoreInfo);
|
||||||
|
}
|
||||||
|
log.info('it parsed xml of the request body.', { method: METHOD, value: restoreInfo });
|
||||||
|
const checkTierResult = checkTierSupported(restoreInfo);
|
||||||
|
if (checkTierResult instanceof Error) {
|
||||||
|
return next(checkTierResult);
|
||||||
|
}
|
||||||
|
return next(null, bucketMD, objectMD, restoreInfo);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
// start restore process
|
||||||
|
function startRestore(bucketMD, objectMD, restoreInfo, next) {
|
||||||
|
return coldStorage.startRestore(objectMD, restoreInfo, log,
|
||||||
|
(err, _isObjectRestored) => {
|
||||||
|
isObjectRestored = _isObjectRestored;
|
||||||
|
return next(err, bucketMD, objectMD);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function evaluateQuotas(bucketMD, objectMD, next) {
|
||||||
|
if (isObjectRestored) {
|
||||||
|
return next(null, bucketMD, objectMD);
|
||||||
|
}
|
||||||
|
const actions = Array.isArray(mdValueParams.requestType) ?
|
||||||
|
mdValueParams.requestType : [mdValueParams.requestType];
|
||||||
|
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
|
||||||
|
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
|
||||||
|
false, log, err => next(err, bucketMD, objectMD));
|
||||||
|
},
|
||||||
|
function updateObjectMD(bucketMD, objectMD, next) {
|
||||||
|
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
||||||
|
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
||||||
|
log, err => next(err, bucketMD, objectMD));
|
||||||
|
},
|
||||||
|
],
|
||||||
|
(err, bucketMD) => {
|
||||||
|
// generate CORS response header
|
||||||
|
const responseHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD);
|
||||||
|
if (err) {
|
||||||
|
log.trace('error processing request',
|
||||||
|
{
|
||||||
|
method: METHOD,
|
||||||
|
error: err,
|
||||||
|
});
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'POST', bucketName, err.code, 'restoreObject');
|
||||||
|
return callback(err, err.code, responseHeaders);
|
||||||
|
}
|
||||||
|
pushMetric('restoreObject', log, {
|
||||||
|
userInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
if (isObjectRestored) {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'POST', bucketName, '200', 'restoreObject');
|
||||||
|
return callback(null, 200, responseHeaders);
|
||||||
|
}
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'POST', bucketName, '202', 'restoreObject');
|
||||||
|
return callback(null, 202, responseHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module.exports = objectRestore;
|
|
@ -0,0 +1,32 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
|
||||||
|
|
||||||
|
function validateChecksumHeaders(headers) {
|
||||||
|
// If the x-amz-trailer header is present the request is using one of the
|
||||||
|
// trailing checksum algorithms, which are not supported.
|
||||||
|
if (headers['x-amz-trailer'] !== undefined) {
|
||||||
|
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
|
||||||
|
}
|
||||||
|
|
||||||
|
const signatureChecksum = headers['x-amz-content-sha256'];
|
||||||
|
if (signatureChecksum === undefined) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (supportedSignatureChecksums.has(signatureChecksum)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the value is not one of the possible checksum algorithms
|
||||||
|
// the only other valid value is the actual sha256 checksum of the payload.
|
||||||
|
// Do a simple sanity check of the length to guard against future algos.
|
||||||
|
// If the value is an unknown algo, then it will fail checksum validation.
|
||||||
|
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = validateChecksumHeaders;
|
|
@ -4,13 +4,40 @@ const async = require('async');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
|
const { scaledMsPerDay } = config.getTimeOptions();
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
// Use Arsenal function to generate a version ID used internally by metadata
|
// Use Arsenal function to generate a version ID used internally by metadata
|
||||||
// for null versions that are created before bucket versioning is configured
|
// for null versions that are created before bucket versioning is configured
|
||||||
const nonVersionedObjId =
|
const nonVersionedObjId =
|
||||||
versionIdUtils.getInfVid(config.replicationGroupId);
|
versionIdUtils.getInfVid(config.replicationGroupId);
|
||||||
|
|
||||||
/** decodedVidResult - decode the version id from a query object
|
/** decodeVID - decode the version id
|
||||||
|
* @param {string} versionId - version ID
|
||||||
|
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
||||||
|
* fails due to improper format, otherwise undefined or the decoded version id
|
||||||
|
*/
|
||||||
|
function decodeVID(versionId) {
|
||||||
|
if (versionId === 'null') {
|
||||||
|
return versionId;
|
||||||
|
}
|
||||||
|
|
||||||
|
let decoded;
|
||||||
|
const invalidErr = errors.InvalidArgument.customizeDescription('Invalid version id specified');
|
||||||
|
try {
|
||||||
|
decoded = versionIdUtils.decode(versionId);
|
||||||
|
} catch (err) {
|
||||||
|
return invalidErr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (decoded instanceof Error) {
|
||||||
|
return invalidErr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoded;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** decodeVersionId - decode the version id from a query object
|
||||||
* @param {object} [reqQuery] - request query object
|
* @param {object} [reqQuery] - request query object
|
||||||
* @param {string} [reqQuery.versionId] - version ID sent in request query
|
* @param {string} [reqQuery.versionId] - version ID sent in request query
|
||||||
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
||||||
|
@ -20,16 +47,7 @@ function decodeVersionId(reqQuery) {
|
||||||
if (!reqQuery || !reqQuery.versionId) {
|
if (!reqQuery || !reqQuery.versionId) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
let versionId = reqQuery.versionId;
|
return decodeVID(reqQuery.versionId);
|
||||||
if (versionId === 'null') {
|
|
||||||
return versionId;
|
|
||||||
}
|
|
||||||
versionId = versionIdUtils.decode(versionId);
|
|
||||||
if (versionId instanceof Error) {
|
|
||||||
return errors.InvalidArgument
|
|
||||||
.customizeDescription('Invalid version id specified');
|
|
||||||
}
|
|
||||||
return versionId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** getVersionIdResHeader - return encrypted version ID if appropriate
|
/** getVersionIdResHeader - return encrypted version ID if appropriate
|
||||||
|
@ -40,11 +58,10 @@ function decodeVersionId(reqQuery) {
|
||||||
*/
|
*/
|
||||||
function getVersionIdResHeader(verCfg, objectMD) {
|
function getVersionIdResHeader(verCfg, objectMD) {
|
||||||
if (verCfg) {
|
if (verCfg) {
|
||||||
if (objectMD.isNull || (objectMD && !objectMD.versionId)) {
|
if (objectMD.isNull || !objectMD.versionId) {
|
||||||
return 'null';
|
return 'null';
|
||||||
}
|
}
|
||||||
return versionIdUtils.encode(objectMD.versionId,
|
return versionIdUtils.encode(objectMD.versionId);
|
||||||
config.versionIdEncodingType);
|
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
@ -62,17 +79,34 @@ function checkQueryVersionId(query) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) {
|
||||||
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
// In compatibility mode, create null versioned keys instead of null keys
|
||||||
|
let versionId;
|
||||||
|
let nullVersionMD;
|
||||||
|
if (config.nullVersionCompatMode) {
|
||||||
|
versionId = nullVersionId;
|
||||||
|
nullVersionMD = Object.assign({}, objMD, {
|
||||||
|
versionId: nullVersionId,
|
||||||
|
isNull: true,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
versionId = 'null';
|
||||||
|
nullVersionMD = Object.assign({}, objMD, {
|
||||||
|
versionId: nullVersionId,
|
||||||
|
isNull: true,
|
||||||
|
isNull2: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from metadata storing null version as new version',
|
log.debug('error from metadata storing null version as new version',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
}
|
}
|
||||||
cb(err, options);
|
cb(err);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** get location of null version data for deletion
|
/** check existence and get location of null version data for deletion
|
||||||
* @param {string} bucketName - name of bucket
|
* @param {string} bucketName - name of bucket
|
||||||
* @param {string} objKey - name of object key
|
* @param {string} objKey - name of object key
|
||||||
* @param {object} options - metadata options for getting object MD
|
* @param {object} options - metadata options for getting object MD
|
||||||
|
@ -83,49 +117,55 @@ function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
||||||
* @param {function} cb - callback
|
* @param {function} cb - callback
|
||||||
* @return {undefined} - and call callback with (err, dataToDelete)
|
* @return {undefined} - and call callback with (err, dataToDelete)
|
||||||
*/
|
*/
|
||||||
function _getNullVersionsToDelete(bucketName, objKey, options, mst, log, cb) {
|
function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) {
|
||||||
|
const nullOptions = {};
|
||||||
|
if (!options.deleteData) {
|
||||||
|
return process.nextTick(cb, null, nullOptions);
|
||||||
|
}
|
||||||
if (options.versionId === mst.versionId) {
|
if (options.versionId === mst.versionId) {
|
||||||
// no need to get delete location, we already have the master's metadata
|
// no need to get another key as the master is the target
|
||||||
const dataToDelete = mst.objLocation;
|
nullOptions.dataToDelete = mst.objLocation;
|
||||||
return process.nextTick(cb, null, dataToDelete);
|
return process.nextTick(cb, null, nullOptions);
|
||||||
|
}
|
||||||
|
if (options.versionId === 'null') {
|
||||||
|
// deletion of the null key will be done by the main metadata
|
||||||
|
// PUT via this option
|
||||||
|
nullOptions.deleteNullKey = true;
|
||||||
}
|
}
|
||||||
return metadata.getObjectMD(bucketName, objKey, options, log,
|
return metadata.getObjectMD(bucketName, objKey, options, log,
|
||||||
(err, versionMD) => {
|
(err, versionMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('err from metadata getting specified version', {
|
// the null key may not exist, hence it's a normal
|
||||||
error: err,
|
// situation to have a NoSuchKey error, in which case
|
||||||
method: '_getNullVersionsToDelete',
|
// there is nothing to delete
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
log.debug('null version does not exist', {
|
||||||
|
method: '_prepareNullVersionDeletion',
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
log.warn('could not get null version metadata', {
|
||||||
|
error: err,
|
||||||
|
method: '_prepareNullVersionDeletion',
|
||||||
|
});
|
||||||
|
}
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (!versionMD.location) {
|
if (versionMD.location) {
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
const dataToDelete = Array.isArray(versionMD.location) ?
|
const dataToDelete = Array.isArray(versionMD.location) ?
|
||||||
versionMD.location : [versionMD.location];
|
versionMD.location : [versionMD.location];
|
||||||
return cb(null, dataToDelete);
|
nullOptions.dataToDelete = dataToDelete;
|
||||||
|
}
|
||||||
|
return cb(null, nullOptions);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
|
function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
|
||||||
return _getNullVersionsToDelete(bucketName, objKey, options, mst, log,
|
return metadata.deleteObjectMD(bucketName, objKey, options, log, err => {
|
||||||
(err, nullDataToDelete) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.warn('could not find null version metadata', {
|
log.warn('metadata error deleting null versioned key',
|
||||||
error: err,
|
{ bucketName, objKey, error: err, method: '_deleteNullVersionMD' });
|
||||||
method: '_deleteNullVersionMD',
|
|
||||||
});
|
|
||||||
return cb(err);
|
|
||||||
}
|
}
|
||||||
return metadata.deleteObjectMD(bucketName, objKey, options, log,
|
|
||||||
err => {
|
|
||||||
if (err) {
|
|
||||||
log.warn('metadata error deleting null version',
|
|
||||||
{ error: err, method: '_deleteNullVersionMD' });
|
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
|
||||||
return cb(null, nullDataToDelete);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,73 +176,103 @@ function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
|
||||||
* @param {object} mst - state of master version, as returned by
|
* @param {object} mst - state of master version, as returned by
|
||||||
* getMasterState()
|
* getMasterState()
|
||||||
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
|
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
|
||||||
|
* @param {boolean} nullVersionCompatMode - if true, behaves in null
|
||||||
|
* version compatibility mode and return appropriate values: this mode
|
||||||
|
* does not attempt to create null keys but create null versioned keys
|
||||||
|
* instead
|
||||||
*
|
*
|
||||||
* @return {object} result object with the following attributes:
|
* @return {object} result object with the following attributes:
|
||||||
* - {object} options: versioning-related options to pass to the
|
* - {object} options: versioning-related options to pass to the
|
||||||
services.metadataStoreObject() call
|
services.metadataStoreObject() call
|
||||||
* - {object} [storeOptions]: options for metadata to create a new
|
* - {object} [options.extraMD]: extra attributes to set in object metadata
|
||||||
null version key, if needed
|
* - {string} [nullVersionId]: null version key to create, if needed
|
||||||
* - {object} [delOptions]: options for metadata to delete the null
|
* - {object} [delOptions]: options for metadata to delete the null
|
||||||
version key, if needed
|
version key, if needed
|
||||||
*/
|
*/
|
||||||
function processVersioningState(mst, vstat) {
|
function processVersioningState(mst, vstat, nullVersionCompatMode) {
|
||||||
const options = {};
|
const versioningSuspended = (vstat === 'Suspended');
|
||||||
const storeOptions = {};
|
const masterIsNull = mst.exists && (mst.isNull || !mst.versionId);
|
||||||
const delOptions = {};
|
|
||||||
// object does not exist or is not versioned (before versioning)
|
if (versioningSuspended) {
|
||||||
if (mst.versionId === undefined || mst.isNull) {
|
// versioning is suspended: overwrite the existing null version
|
||||||
// versioning is suspended, overwrite existing master version
|
const options = { versionId: '', isNull: true };
|
||||||
if (vstat === 'Suspended') {
|
if (masterIsNull) {
|
||||||
options.versionId = '';
|
// if the null version exists, clean it up prior to put
|
||||||
options.isNull = true;
|
if (mst.objLocation) {
|
||||||
options.dataToDelete = mst.objLocation;
|
options.dataToDelete = mst.objLocation;
|
||||||
// if null version exists, clean it up prior to put
|
|
||||||
if (mst.isNull) {
|
|
||||||
delOptions.versionId = mst.versionId;
|
|
||||||
if (mst.uploadId) {
|
|
||||||
delOptions.replayId = mst.uploadId;
|
|
||||||
}
|
}
|
||||||
|
// backward-compat: a null version key may exist even with
|
||||||
|
// a null master (due to S3C-7526), if so, delete it (its
|
||||||
|
// data will be deleted as part of the master cleanup, so
|
||||||
|
// no "deleteData" param is needed)
|
||||||
|
//
|
||||||
|
// "isNull2" attribute is set in master metadata when
|
||||||
|
// null keys are used, which is used as an optimization to
|
||||||
|
// avoid having to check the versioned key since there can
|
||||||
|
// be no more versioned key to clean up
|
||||||
|
if (mst.isNull && mst.versionId && !mst.isNull2) {
|
||||||
|
const delOptions = { versionId: mst.versionId };
|
||||||
return { options, delOptions };
|
return { options, delOptions };
|
||||||
}
|
}
|
||||||
return { options };
|
return { options };
|
||||||
}
|
}
|
||||||
// versioning is enabled, create a new version
|
if (mst.nullVersionId) {
|
||||||
options.versioning = true;
|
// backward-compat: delete the null versioned key and data
|
||||||
if (mst.exists) {
|
const delOptions = { versionId: mst.nullVersionId, deleteData: true };
|
||||||
// store master version in a new key
|
|
||||||
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
|
|
||||||
storeOptions.versionId = versionId;
|
|
||||||
storeOptions.isNull = true;
|
|
||||||
options.nullVersionId = versionId;
|
|
||||||
// non-versioned (non-null) MPU objects don't have a
|
|
||||||
// replay ID, so don't reference their uploadId
|
|
||||||
if (mst.isNull && mst.uploadId) {
|
|
||||||
options.nullUploadId = mst.uploadId;
|
|
||||||
}
|
|
||||||
return { options, storeOptions };
|
|
||||||
}
|
|
||||||
return { options };
|
|
||||||
}
|
|
||||||
// master is versioned and is not a null version
|
|
||||||
const nullVersionId = mst.nullVersionId;
|
|
||||||
if (vstat === 'Suspended') {
|
|
||||||
// versioning is suspended, overwrite the existing master version
|
|
||||||
options.versionId = '';
|
|
||||||
options.isNull = true;
|
|
||||||
if (nullVersionId === undefined) {
|
|
||||||
return { options };
|
|
||||||
}
|
|
||||||
delOptions.versionId = nullVersionId;
|
|
||||||
if (mst.nullUploadId) {
|
if (mst.nullUploadId) {
|
||||||
delOptions.replayId = mst.nullUploadId;
|
delOptions.replayId = mst.nullUploadId;
|
||||||
}
|
}
|
||||||
return { options, delOptions };
|
return { options, delOptions };
|
||||||
}
|
}
|
||||||
// versioning is enabled, put the new version
|
// clean up the eventual null key's location data prior to put
|
||||||
options.versioning = true;
|
|
||||||
options.nullVersionId = nullVersionId;
|
// NOTE: due to metadata v1 internal format, we cannot guess
|
||||||
|
// from the master key whether there is an associated null
|
||||||
|
// key, because the master key may be removed whenever the
|
||||||
|
// latest version becomes a delete marker. Hence we need to
|
||||||
|
// pessimistically try to get the null key metadata and delete
|
||||||
|
// it if it exists.
|
||||||
|
const delOptions = { versionId: 'null', deleteData: true };
|
||||||
|
return { options, delOptions };
|
||||||
|
}
|
||||||
|
|
||||||
|
// versioning is enabled: create a new version
|
||||||
|
const options = { versioning: true };
|
||||||
|
if (masterIsNull) {
|
||||||
|
// if master is a null version or a non-versioned key,
|
||||||
|
// copy it to a new null key
|
||||||
|
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
|
||||||
|
if (nullVersionCompatMode) {
|
||||||
|
options.extraMD = {
|
||||||
|
nullVersionId,
|
||||||
|
};
|
||||||
|
if (mst.uploadId) {
|
||||||
|
options.extraMD.nullUploadId = mst.uploadId;
|
||||||
|
}
|
||||||
|
return { options, nullVersionId };
|
||||||
|
}
|
||||||
|
if (mst.isNull && !mst.isNull2) {
|
||||||
|
// if master null version was put with an older
|
||||||
|
// Cloudserver (or in compat mode), there is a
|
||||||
|
// possibility that it also has a null versioned key
|
||||||
|
// associated, so we need to delete it as we write the
|
||||||
|
// null key
|
||||||
|
const delOptions = {
|
||||||
|
versionId: nullVersionId,
|
||||||
|
};
|
||||||
|
return { options, nullVersionId, delOptions };
|
||||||
|
}
|
||||||
|
return { options, nullVersionId };
|
||||||
|
}
|
||||||
|
// backward-compat: keep a reference to the existing null
|
||||||
|
// versioned key
|
||||||
|
if (mst.nullVersionId) {
|
||||||
|
options.extraMD = {
|
||||||
|
nullVersionId: mst.nullVersionId,
|
||||||
|
};
|
||||||
if (mst.nullUploadId) {
|
if (mst.nullUploadId) {
|
||||||
options.nullUploadId = mst.nullUploadId;
|
options.extraMD.nullUploadId = mst.nullUploadId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return { options };
|
return { options };
|
||||||
}
|
}
|
||||||
|
@ -229,6 +299,7 @@ function getMasterState(objMD) {
|
||||||
versionId: objMD.versionId,
|
versionId: objMD.versionId,
|
||||||
uploadId: objMD.uploadId,
|
uploadId: objMD.uploadId,
|
||||||
isNull: objMD.isNull,
|
isNull: objMD.isNull,
|
||||||
|
isNull2: objMD.isNull2,
|
||||||
nullVersionId: objMD.nullVersionId,
|
nullVersionId: objMD.nullVersionId,
|
||||||
nullUploadId: objMD.nullUploadId,
|
nullUploadId: objMD.nullUploadId,
|
||||||
};
|
};
|
||||||
|
@ -252,9 +323,6 @@ function getMasterState(objMD) {
|
||||||
* ('' overwrites the master version)
|
* ('' overwrites the master version)
|
||||||
* options.versioning - (true/undefined) metadata instruction to create new ver
|
* options.versioning - (true/undefined) metadata instruction to create new ver
|
||||||
* options.isNull - (true/undefined) whether new version is null or not
|
* options.isNull - (true/undefined) whether new version is null or not
|
||||||
* options.nullVersionId - if storing a null version in version history, the
|
|
||||||
* version id of the null version
|
|
||||||
* options.deleteNullVersionData - whether to delete the data of the null ver
|
|
||||||
*/
|
*/
|
||||||
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
log, callback) {
|
log, callback) {
|
||||||
|
@ -266,42 +334,102 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
return process.nextTick(callback, null, options);
|
return process.nextTick(callback, null, options);
|
||||||
}
|
}
|
||||||
// bucket is versioning configured
|
// bucket is versioning configured
|
||||||
const { options, storeOptions, delOptions } =
|
const { options, nullVersionId, delOptions } =
|
||||||
processVersioningState(mst, vCfg.Status);
|
processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode);
|
||||||
return async.series([
|
return async.series([
|
||||||
function storeVersion(next) {
|
function storeNullVersionMD(next) {
|
||||||
if (!storeOptions) {
|
if (!nullVersionId) {
|
||||||
return process.nextTick(next);
|
return process.nextTick(next);
|
||||||
}
|
}
|
||||||
const versionMD = Object.assign({}, objMD, storeOptions);
|
return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next);
|
||||||
const params = { versionId: storeOptions.versionId };
|
|
||||||
return _storeNullVersionMD(bucketName, objectKey, versionMD,
|
|
||||||
params, log, next);
|
|
||||||
},
|
},
|
||||||
function deleteNullVersion(next) {
|
function prepareNullVersionDeletion(next) {
|
||||||
if (!delOptions) {
|
if (!delOptions) {
|
||||||
return process.nextTick(next);
|
return process.nextTick(next);
|
||||||
}
|
}
|
||||||
return _deleteNullVersionMD(bucketName, objectKey, delOptions, mst,
|
return _prepareNullVersionDeletion(
|
||||||
log, (err, nullDataToDelete) => {
|
bucketName, objectKey, delOptions, mst, log,
|
||||||
|
(err, nullOptions) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.warn('unexpected error deleting null version md', {
|
return next(err);
|
||||||
error: err,
|
|
||||||
method: 'versioningPreprocessing',
|
|
||||||
});
|
|
||||||
// it's possible there was a concurrent request to
|
|
||||||
// delete the null version, so proceed with putting a
|
|
||||||
// new version
|
|
||||||
if (err.is.NoSuchKey) {
|
|
||||||
return next(null, options);
|
|
||||||
}
|
}
|
||||||
return next(errors.InternalError);
|
Object.assign(options, nullOptions);
|
||||||
}
|
|
||||||
Object.assign(options, { dataToDelete: nullDataToDelete });
|
|
||||||
return next();
|
return next();
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
], err => callback(err, options));
|
function deleteNullVersionMD(next) {
|
||||||
|
if (delOptions &&
|
||||||
|
delOptions.versionId &&
|
||||||
|
delOptions.versionId !== 'null') {
|
||||||
|
// backward-compat: delete old null versioned key
|
||||||
|
return _deleteNullVersionMD(
|
||||||
|
bucketName, objectKey, { versionId: delOptions.versionId }, log, next);
|
||||||
|
}
|
||||||
|
return process.nextTick(next);
|
||||||
|
},
|
||||||
|
], err => {
|
||||||
|
// it's possible there was a prior request that deleted the
|
||||||
|
// null version, so proceed with putting a new version
|
||||||
|
if (err && err.is.NoSuchKey) {
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
return callback(err, options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Return options to pass to Metadata layer for version-specific
|
||||||
|
* operations with the given requested version ID
|
||||||
|
*
|
||||||
|
* @param {object} objectMD - object metadata
|
||||||
|
* @param {boolean} nullVersionCompatMode - if true, behaves in null
|
||||||
|
* version compatibility mode
|
||||||
|
* @return {object} options object with params:
|
||||||
|
* {string} [options.versionId] - specific versionId to update
|
||||||
|
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
|
||||||
|
* Metadata backend if we're updating or deleting a new-style null
|
||||||
|
* version (stored in master or null key), or not a null version.
|
||||||
|
*/
|
||||||
|
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
|
||||||
|
// Use the internal versionId if it is a "real" null version (not
|
||||||
|
// non-versioned)
|
||||||
|
//
|
||||||
|
// If the target object is non-versioned: do not specify a
|
||||||
|
// "versionId" attribute nor "isNull"
|
||||||
|
//
|
||||||
|
// If the target version is a null version, i.e. has the "isNull"
|
||||||
|
// attribute:
|
||||||
|
//
|
||||||
|
// - send the "isNull=true" param to Metadata if the version is
|
||||||
|
// already a null key put by a non-compat mode Cloudserver, to
|
||||||
|
// let Metadata know that the null key is to be updated or
|
||||||
|
// deleted. This is the case if the "isNull2" metadata attribute
|
||||||
|
// exists
|
||||||
|
//
|
||||||
|
// - otherwise, do not send the "isNull" parameter to hint
|
||||||
|
// Metadata that it is a legacy null version
|
||||||
|
//
|
||||||
|
// If the target version is not a null version and is versioned:
|
||||||
|
//
|
||||||
|
// - send the "isNull=false" param to Metadata in non-compat
|
||||||
|
// mode (mandatory for v1 format)
|
||||||
|
//
|
||||||
|
// - otherwise, do not send the "isNull" parameter to hint
|
||||||
|
// Metadata that an existing null version may not be stored in a
|
||||||
|
// null key
|
||||||
|
//
|
||||||
|
//
|
||||||
|
if (objectMD.versionId === undefined) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
const options = { versionId: objectMD.versionId };
|
||||||
|
if (objectMD.isNull) {
|
||||||
|
if (objectMD.isNull2) {
|
||||||
|
options.isNull = true;
|
||||||
|
}
|
||||||
|
} else if (!nullVersionCompatMode) {
|
||||||
|
options.isNull = false;
|
||||||
|
}
|
||||||
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** preprocessingVersioningDelete - return versioning information for S3 to
|
/** preprocessingVersioningDelete - return versioning information for S3 to
|
||||||
|
@ -310,64 +438,113 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
* @param {object} bucketMD - bucket metadata
|
* @param {object} bucketMD - bucket metadata
|
||||||
* @param {object} objectMD - obj metadata
|
* @param {object} objectMD - obj metadata
|
||||||
* @param {string} [reqVersionId] - specific version ID sent as part of request
|
* @param {string} [reqVersionId] - specific version ID sent as part of request
|
||||||
* @param {RequestLogger} log - logger instance
|
* @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode
|
||||||
* @param {function} callback - callback
|
* @return {object} options object with params:
|
||||||
* @return {undefined} and call callback with params (err, options):
|
* {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
|
||||||
* options.deleteData - (true/undefined) whether to delete data (if undefined
|
|
||||||
* means creating a delete marker instead)
|
* means creating a delete marker instead)
|
||||||
* options.versionId - specific versionId to delete
|
* {string} [options.versionId] - specific versionId to delete
|
||||||
|
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
|
||||||
|
* Metadata backend if we're deleting a new-style null version (stored
|
||||||
|
* in master or null key), or not a null version.
|
||||||
*/
|
*/
|
||||||
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
|
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
|
||||||
reqVersionId, log, callback) {
|
let options = {};
|
||||||
const options = {};
|
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
|
||||||
// bucket is not versioning enabled
|
options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
|
||||||
if (!bucketMD.getVersioningConfiguration()) {
|
}
|
||||||
|
if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
|
||||||
|
// delete data if bucket is non-versioned or the request
|
||||||
|
// deletes a specific version
|
||||||
options.deleteData = true;
|
options.deleteData = true;
|
||||||
return callback(null, options);
|
|
||||||
}
|
}
|
||||||
// bucket is versioning enabled
|
return options;
|
||||||
if (reqVersionId && reqVersionId !== 'null') {
|
|
||||||
// deleting a specific version
|
|
||||||
options.deleteData = true;
|
|
||||||
options.versionId = reqVersionId;
|
|
||||||
if (objectMD.uploadId) {
|
|
||||||
options.replayId = objectMD.uploadId;
|
|
||||||
}
|
}
|
||||||
return callback(null, options);
|
|
||||||
|
/**
|
||||||
|
* Keep metadatas when the object is restored from cold storage
|
||||||
|
* but remove the specific ones we don't want to keep
|
||||||
|
* @param {object} objMD - obj metadata
|
||||||
|
* @param {object} metadataStoreParams - custom built object containing resource details.
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function restoreMetadata(objMD, metadataStoreParams) {
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
|
||||||
|
// We need to keep user metadata and tags
|
||||||
|
Object.keys(objMD).forEach(key => {
|
||||||
|
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
|
||||||
|
metadataStoreParams.metaHeaders[key] = objMD[key];
|
||||||
}
|
}
|
||||||
if (reqVersionId) {
|
});
|
||||||
// deleting the 'null' version if it exists
|
|
||||||
if (objectMD.versionId === undefined) {
|
if (objMD['x-amz-website-redirect-location']) {
|
||||||
// object is not versioned, deleting it
|
if (!metadataStoreParams.headers) {
|
||||||
options.deleteData = true;
|
metadataStoreParams.headers = {};
|
||||||
// non-versioned (non-null) MPU objects don't have a
|
|
||||||
// replay ID, so don't reference their uploadId
|
|
||||||
return callback(null, options);
|
|
||||||
}
|
}
|
||||||
if (objectMD.isNull) {
|
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
|
||||||
// master is the null version
|
|
||||||
options.deleteData = true;
|
|
||||||
options.versionId = objectMD.versionId;
|
|
||||||
if (objectMD.uploadId) {
|
|
||||||
options.replayId = objectMD.uploadId;
|
|
||||||
}
|
}
|
||||||
return callback(null, options);
|
|
||||||
|
if (objMD.replicationInfo) {
|
||||||
|
metadataStoreParams.replicationInfo = objMD.replicationInfo;
|
||||||
}
|
}
|
||||||
if (objectMD.nullVersionId) {
|
|
||||||
// null version exists, deleting it
|
if (objMD.legalHold) {
|
||||||
options.deleteData = true;
|
metadataStoreParams.legalHold = objMD.legalHold;
|
||||||
options.versionId = objectMD.nullVersionId;
|
|
||||||
if (objectMD.nullUploadId) {
|
|
||||||
options.replayId = objectMD.nullUploadId;
|
|
||||||
}
|
}
|
||||||
return callback(null, options);
|
|
||||||
|
if (objMD.acl) {
|
||||||
|
metadataStoreParams.acl = objMD.acl;
|
||||||
}
|
}
|
||||||
// null version does not exist, no deletion
|
|
||||||
// TODO check AWS behaviour for no deletion (seems having no error)
|
metadataStoreParams.creationTime = objMD['creation-time'];
|
||||||
return callback(errors.NoSuchKey);
|
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
||||||
|
metadataStoreParams.taggingCopy = objMD.tags;
|
||||||
}
|
}
|
||||||
// not deleting any specific version, making a delete marker instead
|
|
||||||
return callback(null, options);
|
/** overwritingVersioning - return versioning information for S3 to handle
|
||||||
|
* storing version metadata with a specific version id.
|
||||||
|
* @param {object} objMD - obj metadata
|
||||||
|
* @param {object} metadataStoreParams - custom built object containing resource details.
|
||||||
|
* @return {object} options
|
||||||
|
* options.versionId - specific versionId to overwrite in metadata
|
||||||
|
* options.isNull - (true/undefined) whether new version is null or not
|
||||||
|
* options.nullVersionId - if storing a null version in version history, the
|
||||||
|
* version id of the null version
|
||||||
|
*/
|
||||||
|
function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
|
metadataStoreParams.updateMicroVersionId = true;
|
||||||
|
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
|
||||||
|
|
||||||
|
// set correct originOp
|
||||||
|
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
||||||
|
|
||||||
|
// update restore
|
||||||
|
const days = objMD.archive?.restoreRequestedDays;
|
||||||
|
const now = Date.now();
|
||||||
|
metadataStoreParams.archive = {
|
||||||
|
archiveInfo: objMD.archive?.archiveInfo,
|
||||||
|
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
||||||
|
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
||||||
|
restoreCompletedAt: new Date(now),
|
||||||
|
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
|
||||||
|
};
|
||||||
|
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
|
||||||
|
const versionId = objMD.versionId || undefined;
|
||||||
|
const options = {
|
||||||
|
versionId,
|
||||||
|
isNull: objMD.isNull,
|
||||||
|
};
|
||||||
|
if (objMD.nullVersionId) {
|
||||||
|
options.extraMD = {
|
||||||
|
nullVersionId: objMD.nullVersionId,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
restoreMetadata(objMD, metadataStoreParams);
|
||||||
|
|
||||||
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
@ -377,5 +554,8 @@ module.exports = {
|
||||||
processVersioningState,
|
processVersioningState,
|
||||||
getMasterState,
|
getMasterState,
|
||||||
versioningPreprocessing,
|
versioningPreprocessing,
|
||||||
|
getVersionSpecificMetadataOptions,
|
||||||
preprocessingVersioningDelete,
|
preprocessingVersioningDelete,
|
||||||
|
overwritingVersioning,
|
||||||
|
decodeVID,
|
||||||
};
|
};
|
||||||
|
|
|
@ -101,8 +101,33 @@ function validateWebsiteHeader(header) {
|
||||||
header.startsWith('http://') || header.startsWith('https://'));
|
header.startsWith('http://') || header.startsWith('https://'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* appendWebsiteIndexDocument - append index to objectKey if necessary
|
||||||
|
* @param {object} request - normalized request object
|
||||||
|
* @param {string} indexDocumentSuffix - index document from website config
|
||||||
|
* @param {boolean} force - flag to force append index
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
|
||||||
|
const reqObjectKey = request.objectKey ? request.objectKey : '';
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
|
||||||
|
// find index document if "directory" sent in request
|
||||||
|
if (reqObjectKey.endsWith('/')) {
|
||||||
|
request.objectKey += indexDocumentSuffix;
|
||||||
|
// find index document if no key provided
|
||||||
|
} else if (reqObjectKey === '') {
|
||||||
|
request.objectKey = indexDocumentSuffix;
|
||||||
|
// force for redirect 302 on folder without trailing / that has an index
|
||||||
|
} else if (force) {
|
||||||
|
request.objectKey += `/${indexDocumentSuffix}`;
|
||||||
|
}
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
findRoutingRule,
|
findRoutingRule,
|
||||||
extractRedirectInfo,
|
extractRedirectInfo,
|
||||||
validateWebsiteHeader,
|
validateWebsiteHeader,
|
||||||
|
appendWebsiteIndexDocument,
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,314 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const monitoring = require('../../../utilities/monitoringHandler');
|
||||||
|
const {
|
||||||
|
actionNeedQuotaCheckCopy,
|
||||||
|
actionNeedQuotaCheck,
|
||||||
|
actionWithDataDeletion,
|
||||||
|
} = require('arsenal').policies;
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const QuotaService = require('../../../quotas/quotas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process the bytes to write based on the request and object metadata
|
||||||
|
* @param {string} apiMethod - api method
|
||||||
|
* @param {BucketInfo} bucket - bucket info
|
||||||
|
* @param {string} versionId - version id of the object
|
||||||
|
* @param {number} contentLength - content length of the object
|
||||||
|
* @param {object} objMD - object metadata
|
||||||
|
* @param {object} destObjMD - destination object metadata
|
||||||
|
* @return {number} processed content length
|
||||||
|
*/
|
||||||
|
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
|
||||||
|
let bytes = contentLength;
|
||||||
|
if (apiMethod === 'objectRestore') {
|
||||||
|
// object is being restored
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (!bytes && objMD?.['content-length']) {
|
||||||
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
|
if (!destObjMD || bucket.isVersioningEnabled()) {
|
||||||
|
// object is being copied
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (!bucket.isVersioningEnabled()) {
|
||||||
|
// object is being copied and replaces the target
|
||||||
|
bytes = Number.parseInt(objMD['content-length'], 10) -
|
||||||
|
Number.parseInt(destObjMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
|
||||||
|
// object is being deleted
|
||||||
|
bytes = -Number.parseInt(objMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
|
||||||
|
// object is being replaced: store the diff, if the bucket is not versioned
|
||||||
|
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
|
||||||
|
}
|
||||||
|
return bytes || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a metric is stale based on the provided parameters.
|
||||||
|
*
|
||||||
|
* @param {Object} metric - The metric object to check.
|
||||||
|
* @param {string} resourceType - The type of the resource.
|
||||||
|
* @param {string} resourceName - The name of the resource.
|
||||||
|
* @param {string} action - The action being performed.
|
||||||
|
* @param {number} inflight - The number of inflight requests.
|
||||||
|
* @param {Object} log - The logger object.
|
||||||
|
* @returns {boolean} Returns true if the metric is stale, false otherwise.
|
||||||
|
*/
|
||||||
|
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
|
||||||
|
if (metric.date && Date.now() - new Date(metric.date).getTime() >
|
||||||
|
QuotaService.maxStaleness) {
|
||||||
|
log.warn('Stale metrics from the quota service, allowing the request', {
|
||||||
|
resourceType,
|
||||||
|
resourceName,
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
});
|
||||||
|
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Evaluates quotas for a bucket and an account and update inflight count.
|
||||||
|
*
|
||||||
|
* @param {number} bucketQuota - The quota limit for the bucket.
|
||||||
|
* @param {number} accountQuota - The quota limit for the account.
|
||||||
|
* @param {object} bucket - The bucket object.
|
||||||
|
* @param {object} account - The account object.
|
||||||
|
* @param {number} inflight - The number of inflight requests.
|
||||||
|
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
||||||
|
* @param {string} action - The action being performed.
|
||||||
|
* @param {object} log - The logger object.
|
||||||
|
* @param {function} callback - The callback function to be called when evaluation is complete.
|
||||||
|
* @returns {object} - The result of the evaluation.
|
||||||
|
*/
|
||||||
|
function _evaluateQuotas(
|
||||||
|
bucketQuota,
|
||||||
|
accountQuota,
|
||||||
|
bucket,
|
||||||
|
account,
|
||||||
|
inflight,
|
||||||
|
inflightForCheck,
|
||||||
|
action,
|
||||||
|
log,
|
||||||
|
callback,
|
||||||
|
) {
|
||||||
|
let bucketQuotaExceeded = false;
|
||||||
|
let accountQuotaExceeded = false;
|
||||||
|
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
||||||
|
return async.parallel({
|
||||||
|
bucketQuota: parallelDone => {
|
||||||
|
if (bucketQuota > 0) {
|
||||||
|
return QuotaService.getUtilizationMetrics('bucket',
|
||||||
|
`${bucket.getName()}_${creationDate}`, null, {
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
}, (err, bucketMetrics) => {
|
||||||
|
if (err || inflight < 0) {
|
||||||
|
return parallelDone(err);
|
||||||
|
}
|
||||||
|
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
||||||
|
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
||||||
|
log.debug('Bucket quota exceeded', {
|
||||||
|
bucket: bucket.getName(),
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
quota: bucketQuota,
|
||||||
|
bytesTotal: bucketMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
bucketQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
},
|
||||||
|
accountQuota: parallelDone => {
|
||||||
|
if (accountQuota > 0 && account?.account) {
|
||||||
|
return QuotaService.getUtilizationMetrics('account',
|
||||||
|
account.account, null, {
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
}, (err, accountMetrics) => {
|
||||||
|
if (err || inflight < 0) {
|
||||||
|
return parallelDone(err);
|
||||||
|
}
|
||||||
|
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
||||||
|
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
||||||
|
log.debug('Account quota exceeded', {
|
||||||
|
accountId: account.account,
|
||||||
|
action,
|
||||||
|
inflight,
|
||||||
|
quota: accountQuota,
|
||||||
|
bytesTotal: accountMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
accountQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return parallelDone();
|
||||||
|
},
|
||||||
|
}, err => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('Error evaluating quotas', {
|
||||||
|
error: err.name,
|
||||||
|
description: err.message,
|
||||||
|
isInflightDeletion: inflight < 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Monitors the duration of quota evaluation for a specific API method.
|
||||||
|
*
|
||||||
|
* @param {string} apiMethod - The name of the API method being monitored.
|
||||||
|
* @param {string} type - The type of quota being evaluated.
|
||||||
|
* @param {string} code - The code associated with the quota being evaluated.
|
||||||
|
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
|
||||||
|
* @returns {undefined} - Returns nothing.
|
||||||
|
*/
|
||||||
|
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
||||||
|
monitoring.quotaEvaluationDuration.labels({
|
||||||
|
action: apiMethod,
|
||||||
|
type,
|
||||||
|
code,
|
||||||
|
}).observe(duration / 1e9);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {Request} request - request object
|
||||||
|
* @param {BucketInfo} bucket - bucket object
|
||||||
|
* @param {Account} account - account object
|
||||||
|
* @param {array} apiNames - action names: operations to authorize
|
||||||
|
* @param {string} apiMethod - the main API call
|
||||||
|
* @param {number} inflight - inflight bytes
|
||||||
|
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
||||||
|
* the incoming bytes, are under the limit.
|
||||||
|
* @param {Logger} log - logger
|
||||||
|
* @param {function} callback - callback function
|
||||||
|
* @returns {boolean} - true if the quota is valid, false otherwise
|
||||||
|
*/
|
||||||
|
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
||||||
|
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
||||||
|
return callback(null);
|
||||||
|
}
|
||||||
|
let type;
|
||||||
|
let bucketQuotaExceeded = false;
|
||||||
|
let accountQuotaExceeded = false;
|
||||||
|
let quotaEvaluationDuration;
|
||||||
|
const requestStartTime = process.hrtime.bigint();
|
||||||
|
const bucketQuota = bucket.getQuota();
|
||||||
|
const accountQuota = account?.quota || 0;
|
||||||
|
const shouldSendInflights = config.isQuotaInflightEnabled();
|
||||||
|
|
||||||
|
if (bucketQuota && accountQuota) {
|
||||||
|
type = 'bucket+account';
|
||||||
|
} else if (bucketQuota) {
|
||||||
|
type = 'bucket';
|
||||||
|
} else {
|
||||||
|
type = 'account';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (actionWithDataDeletion[apiMethod]) {
|
||||||
|
type = 'delete';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
|
||||||
|
if (bucketQuota > 0 || accountQuota > 0) {
|
||||||
|
log.warn('quota is set for a bucket, but the quota service is disabled', {
|
||||||
|
bucketName: bucket.getName(),
|
||||||
|
});
|
||||||
|
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
||||||
|
}
|
||||||
|
return callback(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isStorageReserved) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
inflight = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return async.forEach(apiNames, (apiName, done) => {
|
||||||
|
// Object copy operations first check the target object,
|
||||||
|
// meaning the source object, containing the current bytes,
|
||||||
|
// is checked second. This logic handles these APIs calls by
|
||||||
|
// ensuring the bytes are positives (i.e., not an object
|
||||||
|
// replacement).
|
||||||
|
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
inflight = Math.abs(inflight);
|
||||||
|
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
|
||||||
|
return done();
|
||||||
|
}
|
||||||
|
// When inflights are disabled, the sum of the current utilization metrics
|
||||||
|
// and the current bytes are compared with the quota. The current bytes
|
||||||
|
// are not sent to the utilization service. When inflights are enabled,
|
||||||
|
// the sum of the current utilization metrics only are compared with the
|
||||||
|
// quota. They include the current inflight bytes sent in the request.
|
||||||
|
let _inflights = shouldSendInflights ? inflight : undefined;
|
||||||
|
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
||||||
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
|
inflightForCheck, apiName, log,
|
||||||
|
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
||||||
|
if (err) {
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketQuotaExceeded = _bucketQuotaExceeded;
|
||||||
|
accountQuotaExceeded = _accountQuotaExceeded;
|
||||||
|
|
||||||
|
// Inflights are inverted: in case of cleanup, we just re-issue
|
||||||
|
// the same API call.
|
||||||
|
if (_inflights) {
|
||||||
|
_inflights = -_inflights;
|
||||||
|
}
|
||||||
|
|
||||||
|
request.finalizerHooks.push((errorFromAPI, _done) => {
|
||||||
|
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
|
||||||
|
const quotaCleanUpStartTime = process.hrtime.bigint();
|
||||||
|
// Quotas are cleaned only in case of error in the API
|
||||||
|
async.waterfall([
|
||||||
|
cb => {
|
||||||
|
if (errorFromAPI) {
|
||||||
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
|
null, apiName, log, cb);
|
||||||
|
}
|
||||||
|
return cb();
|
||||||
|
},
|
||||||
|
], () => {
|
||||||
|
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
|
||||||
|
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
|
||||||
|
return _done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
}, err => {
|
||||||
|
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
|
||||||
|
if (err) {
|
||||||
|
log.warn('Error getting metrics from the quota service, allowing the request', {
|
||||||
|
error: err.name,
|
||||||
|
description: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (!actionWithDataDeletion[apiMethod] &&
|
||||||
|
(bucketQuotaExceeded || accountQuotaExceeded)) {
|
||||||
|
return callback(errors.QuotaExceeded);
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
processBytesToWrite,
|
||||||
|
isMetricStale,
|
||||||
|
validateQuotas,
|
||||||
|
};
|
|
@ -0,0 +1,117 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const constants = require('../../../constants');
|
||||||
|
const services = require('../../services');
|
||||||
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
|
const { getLocationConstraintErrorMessage, processCurrents,
|
||||||
|
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
|
const { config } = require('../../Config');
|
||||||
|
|
||||||
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, isBucketVersioned, log, callback) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
const res = processCurrents(bucketName, listParams, isBucketVersioned, list);
|
||||||
|
|
||||||
|
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
|
||||||
|
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
|
||||||
|
return callback(null, res);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* listLifecycleCurrents - Return list of current versions/masters in bucket
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
|
* requester's info
|
||||||
|
* @param {array} locationConstraints - array of location contraint
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {function} log - Werelogs request logger
|
||||||
|
* @param {function} callback - callback to respond to http request
|
||||||
|
* with either error code or xml response body
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function listLifecycleCurrents(authInfo, locationConstraints, request, log, callback) {
|
||||||
|
const params = request.query;
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
|
log.debug('processing request', { method: 'listLifecycleCurrents' });
|
||||||
|
const requestMaxKeys = params['max-keys'] ?
|
||||||
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, 400, 'listLifecycleCurrents');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
|
const minEntriesToBeScanned = 1;
|
||||||
|
const { isValid, maxScannedLifecycleListingEntries } =
|
||||||
|
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
||||||
|
if (!isValid) {
|
||||||
|
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
|
||||||
|
const excludedDataStoreName = params['excluded-data-store-name'];
|
||||||
|
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
|
||||||
|
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
|
||||||
|
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
|
||||||
|
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
||||||
|
|
||||||
|
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
|
||||||
|
}
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: 'listLifecycleCurrents',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
const listParams = {
|
||||||
|
listingType: 'DelimiterCurrent',
|
||||||
|
maxKeys: actualMaxKeys,
|
||||||
|
prefix: params.prefix,
|
||||||
|
beforeDate: params['before-date'],
|
||||||
|
marker: params.marker,
|
||||||
|
excludedDataStoreName,
|
||||||
|
maxScannedLifecycleListingEntries,
|
||||||
|
};
|
||||||
|
|
||||||
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleCurrents');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vcfg = bucket.getVersioningConfiguration();
|
||||||
|
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
|
||||||
|
|
||||||
|
if (!requestMaxKeys) {
|
||||||
|
const emptyList = {
|
||||||
|
Contents: [],
|
||||||
|
IsTruncated: false,
|
||||||
|
};
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, emptyList, isBucketVersioned, log, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
return services.getLifecycleListing(bucketName, listParams, log,
|
||||||
|
(err, list) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleCurrents');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, isBucketVersioned, log, callback);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
listLifecycleCurrents,
|
||||||
|
};
|
|
@ -0,0 +1,127 @@
|
||||||
|
const { errors, versioning } = require('arsenal');
|
||||||
|
const constants = require('../../../constants');
|
||||||
|
const services = require('../../services');
|
||||||
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
|
const { getLocationConstraintErrorMessage, processNonCurrents,
|
||||||
|
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
|
const { config } = require('../../Config');
|
||||||
|
|
||||||
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, log, callback) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
const res = processNonCurrents(bucketName, listParams, list);
|
||||||
|
|
||||||
|
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
|
||||||
|
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleNonCurrents');
|
||||||
|
return callback(null, res);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* listLifecycleNonCurrents - Return list of non-current versions in bucket
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
|
* requester's info
|
||||||
|
* @param {array} locationConstraints - array of location contraint
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {function} log - Werelogs request logger
|
||||||
|
* @param {function} callback - callback to respond to http request
|
||||||
|
* with either error code or xml response body
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, callback) {
|
||||||
|
const params = request.query;
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
|
log.debug('processing request', { method: 'listLifecycleNonCurrents' });
|
||||||
|
const requestMaxKeys = params['max-keys'] ?
|
||||||
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, 400, 'listLifecycleNonCurrents');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
|
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
|
||||||
|
// Two entries representing the master key and the following one representing the non-current version.
|
||||||
|
const minEntriesToBeScanned = 3;
|
||||||
|
const { isValid, maxScannedLifecycleListingEntries } =
|
||||||
|
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
||||||
|
if (!isValid) {
|
||||||
|
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
|
||||||
|
const excludedDataStoreName = params['excluded-data-store-name'];
|
||||||
|
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
|
||||||
|
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
|
||||||
|
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
|
||||||
|
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
||||||
|
|
||||||
|
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
|
||||||
|
}
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: 'listLifecycleNonCurrents',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
const listParams = {
|
||||||
|
listingType: 'DelimiterNonCurrent',
|
||||||
|
maxKeys: actualMaxKeys,
|
||||||
|
prefix: params.prefix,
|
||||||
|
beforeDate: params['before-date'],
|
||||||
|
keyMarker: params['key-marker'],
|
||||||
|
excludedDataStoreName,
|
||||||
|
maxScannedLifecycleListingEntries,
|
||||||
|
};
|
||||||
|
|
||||||
|
listParams.versionIdMarker = params['version-id-marker'] ?
|
||||||
|
versionIdUtils.decode(params['version-id-marker']) : undefined;
|
||||||
|
|
||||||
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vcfg = bucket.getVersioningConfiguration();
|
||||||
|
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
|
||||||
|
if (!isBucketVersioned) {
|
||||||
|
log.debug('bucket is not versioned');
|
||||||
|
return callback(errors.InvalidRequest.customizeDescription(
|
||||||
|
'bucket is not versioned'), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!requestMaxKeys) {
|
||||||
|
const emptyList = {
|
||||||
|
Contents: [],
|
||||||
|
IsTruncated: false,
|
||||||
|
};
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, emptyList, log, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
return services.getLifecycleListing(bucketName, listParams, log,
|
||||||
|
(err, list) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, log, callback);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
listLifecycleNonCurrents,
|
||||||
|
};
|
|
@ -0,0 +1,112 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const constants = require('../../../constants');
|
||||||
|
const services = require('../../services');
|
||||||
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
|
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
|
const { config } = require('../../Config');
|
||||||
|
|
||||||
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, log, callback) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
const res = processOrphans(bucketName, listParams, list);
|
||||||
|
|
||||||
|
pushMetric('listLifecycleOrphanDeleteMarkers', log, { authInfo, bucket: bucketName });
|
||||||
|
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphanDeleteMarkers');
|
||||||
|
return callback(null, res);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* listLifecycleOrphanDeleteMarkers - Return list of expired object delete marker in bucket
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
|
* requester's info
|
||||||
|
* @param {array} locationConstraints - array of location contraint
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {function} log - Werelogs request logger
|
||||||
|
* @param {function} callback - callback to respond to http request
|
||||||
|
* with either error code or xml response body
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request, log, callback) {
|
||||||
|
const params = request.query;
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
|
log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' });
|
||||||
|
const requestMaxKeys = params['max-keys'] ?
|
||||||
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
|
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
|
||||||
|
// Two entries representing the master key and the following one representing the non-current version.
|
||||||
|
const minEntriesToBeScanned = 3;
|
||||||
|
const { isValid, maxScannedLifecycleListingEntries } =
|
||||||
|
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
||||||
|
if (!isValid) {
|
||||||
|
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: 'listLifecycleOrphanDeleteMarkers',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
const listParams = {
|
||||||
|
listingType: 'DelimiterOrphanDeleteMarker',
|
||||||
|
maxKeys: actualMaxKeys,
|
||||||
|
prefix: params.prefix,
|
||||||
|
beforeDate: params['before-date'],
|
||||||
|
marker: params.marker,
|
||||||
|
maxScannedLifecycleListingEntries,
|
||||||
|
};
|
||||||
|
|
||||||
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vcfg = bucket.getVersioningConfiguration();
|
||||||
|
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
|
||||||
|
if (!isBucketVersioned) {
|
||||||
|
log.debug('bucket is not versioned or suspended');
|
||||||
|
return callback(errors.InvalidRequest.customizeDescription(
|
||||||
|
'bucket is not versioned'), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!requestMaxKeys) {
|
||||||
|
const emptyList = {
|
||||||
|
Contents: [],
|
||||||
|
IsTruncated: false,
|
||||||
|
};
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, emptyList, log, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
return services.getLifecycleListing(bucketName, listParams, log,
|
||||||
|
(err, list) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { error: err });
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
|
||||||
|
return callback(err, null);
|
||||||
|
}
|
||||||
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
bucketName, list, log, callback);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
listLifecycleOrphanDeleteMarkers,
|
||||||
|
};
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return metadataValidateBucket(metadataValParams, log,
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
(err, bucketMD) => {
|
(err, bucketMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucketMD);
|
request.method, bucketMD);
|
||||||
|
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
|
||||||
log.trace('passed checks',
|
log.trace('passed checks',
|
||||||
{ method: 'metadataValidateBucket' });
|
{ method: 'metadataValidateBucket' });
|
||||||
return deleteBucket(authInfo, bucketMD, bucketName,
|
return deleteBucket(authInfo, bucketMD, bucketName,
|
||||||
authInfo.getCanonicalID(), log, err => {
|
authInfo.getCanonicalID(), request, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'DELETE', bucketName, err.code, 'deleteBucket');
|
'DELETE', bucketName, err.code, 'deleteBucket');
|
||||||
|
|
|
@ -38,7 +38,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
log.trace('found bucket in metadata');
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
||||||
|
authInfo, log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketDeleteCors',
|
method: 'bucketDeleteCors',
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
|
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
|
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketDeleteEncryption',
|
requestType: request.apiMethods || 'bucketDeleteEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucket(metadataValParams, log, next),
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
|
||||||
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
||||||
(bucket, next) => {
|
(bucket, next) => {
|
||||||
const sseConfig = bucket.getServerSideEncryption();
|
const sseConfig = bucket.getServerSideEncryption();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketDeleteLifecycle',
|
requestType: request.apiMethods || 'bucketDeleteLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketDeletePolicy',
|
requestType: request.apiMethods || 'bucketDeletePolicy',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
const requestType = 'bucketDeleteQuota';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Update Quota - Update bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketDeleteQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketDeleteQuota' });
|
||||||
|
|
||||||
|
const { bucketName } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || requestType,
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, bucket) => next(err, bucket)),
|
||||||
|
(bucket, next) => {
|
||||||
|
bucket.setQuota(0);
|
||||||
|
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
||||||
|
next(err, bucket));
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketDeleteQuota'
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, err.code,
|
||||||
|
'bucketDeleteQuota');
|
||||||
|
return callback(err, err.code, corsHeaders);
|
||||||
|
}
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'DELETE', bucketName, '204', 'bucketDeleteQuota');
|
||||||
|
pushMetric('bucketDeleteQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, 204, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteQuota;
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketDeleteReplication',
|
requestType: request.apiMethods || 'bucketDeleteReplication',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Delete Tagging - Delete a bucket's Tagging
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketDeleteTagging(authInfo, request, log, callback) {
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
log.debug('processing request', { method: 'bucketDeleteTagging', bucketName });
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketDeleteTagging',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
|
||||||
|
let bucket = null;
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, b) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
bucket = b;
|
||||||
|
bucket.setTags([]);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => metadata.updateBucket(bucket.getName(), bucket, log, next),
|
||||||
|
], err => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.error('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'deleteBucketTagging',
|
||||||
|
bucketName
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, err.code,
|
||||||
|
'deleteBucketTagging');
|
||||||
|
return callback(err, corsHeaders);
|
||||||
|
}
|
||||||
|
pushMetric('deleteBucketTagging', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'DELETE', bucketName, '200', 'deleteBucketTagging');
|
||||||
|
return callback(err, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteTagging;
|
|
@ -30,7 +30,8 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
log.trace('found bucket in metadata');
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
||||||
|
authInfo, log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketDeleteWebsite',
|
method: 'bucketDeleteWebsite',
|
||||||
|
|
|
@ -2,7 +2,7 @@ const querystring = require('querystring');
|
||||||
const { errors, versioning, s3middleware } = require('arsenal');
|
const { errors, versioning, s3middleware } = require('arsenal');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const escapeForXml = s3middleware.escapeForXml;
|
const escapeForXml = s3middleware.escapeForXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
@ -10,7 +10,6 @@ const versionIdUtils = versioning.VersionID;
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { generateToken, decryptToken }
|
const { generateToken, decryptToken }
|
||||||
= require('../api/apiUtils/object/continueToken');
|
= require('../api/apiUtils/object/continueToken');
|
||||||
const { config } = require('../Config');
|
|
||||||
|
|
||||||
// do not url encode the continuation tokens
|
// do not url encode the continuation tokens
|
||||||
const skipUrlEncoding = new Set([
|
const skipUrlEncoding = new Set([
|
||||||
|
@ -105,7 +104,7 @@ const skipUrlEncoding = new Set([
|
||||||
*/
|
*/
|
||||||
/* eslint-enable max-len */
|
/* eslint-enable max-len */
|
||||||
|
|
||||||
function processVersions(bucketName, listParams, list, encType) {
|
function processVersions(bucketName, listParams, list) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
|
@ -130,7 +129,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
xmlParams.forEach(p => {
|
xmlParams.forEach(p => {
|
||||||
if (p.value) {
|
if (p.value) {
|
||||||
const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ?
|
const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ?
|
||||||
p.value : versionIdUtils.encode(p.value, encType);
|
p.value : versionIdUtils.encode(p.value);
|
||||||
xml.push(`<${p.tag}>${escapeXmlFn(val)}</${p.tag}>`);
|
xml.push(`<${p.tag}>${escapeXmlFn(val)}</${p.tag}>`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -146,7 +145,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
`<Key>${objectKey}</Key>`,
|
`<Key>${objectKey}</Key>`,
|
||||||
'<VersionId>',
|
'<VersionId>',
|
||||||
(v.IsNull || v.VersionId === undefined) ?
|
(v.IsNull || v.VersionId === undefined) ?
|
||||||
'null' : versionIdUtils.encode(v.VersionId, encType),
|
'null' : versionIdUtils.encode(v.VersionId),
|
||||||
'</VersionId>',
|
'</VersionId>',
|
||||||
`<IsLatest>${isLatest}</IsLatest>`,
|
`<IsLatest>${isLatest}</IsLatest>`,
|
||||||
`<LastModified>${v.LastModified}</LastModified>`,
|
`<LastModified>${v.LastModified}</LastModified>`,
|
||||||
|
@ -183,6 +182,7 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
{ tag: 'EncodingType', value: listParams.encoding },
|
{ tag: 'EncodingType', value: listParams.encoding },
|
||||||
{ tag: 'IsTruncated', value: isTruncated },
|
{ tag: 'IsTruncated', value: isTruncated },
|
||||||
];
|
];
|
||||||
|
|
||||||
if (listParams.v2) {
|
if (listParams.v2) {
|
||||||
xmlParams.push(
|
xmlParams.push(
|
||||||
{ tag: 'StartAfter', value: listParams.startAfter || '' });
|
{ tag: 'StartAfter', value: listParams.startAfter || '' });
|
||||||
|
@ -210,12 +210,13 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
xmlParams.forEach(p => {
|
xmlParams.forEach(p => {
|
||||||
if (p.value && skipUrlEncoding.has(p.tag)) {
|
if (p.value && skipUrlEncoding.has(p.tag)) {
|
||||||
xml.push(`<${p.tag}>${p.value}</${p.tag}>`);
|
xml.push(`<${p.tag}>${p.value}</${p.tag}>`);
|
||||||
} else if (p.value || p.tag === 'KeyCount') {
|
} else if (p.value || p.tag === 'KeyCount' || p.tag === 'MaxKeys') {
|
||||||
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
|
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
|
||||||
} else if (p.tag !== 'NextMarker' &&
|
} else if (p.tag !== 'NextMarker' &&
|
||||||
p.tag !== 'EncodingType' &&
|
p.tag !== 'EncodingType' &&
|
||||||
p.tag !== 'Delimiter' &&
|
p.tag !== 'Delimiter' &&
|
||||||
p.tag !== 'StartAfter') {
|
p.tag !== 'StartAfter' &&
|
||||||
|
p.tag !== 'NextContinuationToken') {
|
||||||
xml.push(`<${p.tag}/>`);
|
xml.push(`<${p.tag}/>`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -262,8 +263,7 @@ function handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||||
listParams.encoding = encoding;
|
listParams.encoding = encoding;
|
||||||
let res;
|
let res;
|
||||||
if (listParams.listingType === 'DelimiterVersions') {
|
if (listParams.listingType === 'DelimiterVersions') {
|
||||||
res = processVersions(bucketName, listParams, list,
|
res = processVersions(bucketName, listParams, list);
|
||||||
config.versionIdEncodingType);
|
|
||||||
} else {
|
} else {
|
||||||
res = processMasterVersions(bucketName, listParams, list);
|
res = processMasterVersions(bucketName, listParams, list);
|
||||||
}
|
}
|
||||||
|
@ -322,15 +322,19 @@ function bucketGet(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGet',
|
requestType: request.apiMethods || 'bucketGet',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const listParams = {
|
const listParams = {
|
||||||
listingType: 'DelimiterMaster',
|
listingType: 'DelimiterMaster',
|
||||||
maxKeys: actualMaxKeys,
|
maxKeys: actualMaxKeys,
|
||||||
delimiter: params.delimiter,
|
|
||||||
prefix: params.prefix,
|
prefix: params.prefix,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (params.delimiter) {
|
||||||
|
listParams.delimiter = params.delimiter;
|
||||||
|
}
|
||||||
|
|
||||||
if (v2) {
|
if (v2) {
|
||||||
listParams.v2 = true;
|
listParams.v2 = true;
|
||||||
listParams.startAfter = params['start-after'];
|
listParams.startAfter = params['start-after'];
|
||||||
|
@ -341,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
|
||||||
listParams.marker = params.marker;
|
listParams.marker = params.marker;
|
||||||
}
|
}
|
||||||
|
|
||||||
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const aclUtils = require('../utilities/aclUtils');
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetACL',
|
requestType: request.apiMethods || 'bucketGetACL',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const grantInfo = {
|
const grantInfo = {
|
||||||
|
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -39,7 +39,8 @@ function bucketGetCors(authInfo, request, log, callback) {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
||||||
|
authInfo, log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketGetCors',
|
method: 'bucketGetCors',
|
||||||
|
|
|
@ -4,7 +4,7 @@ const async = require('async');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const escapeForXml = s3middleware.escapeForXml;
|
const escapeForXml = s3middleware.escapeForXml;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetEncryption',
|
requestType: request.apiMethods || 'bucketGetEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucket(metadataValParams, log, next),
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
|
||||||
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
||||||
(bucket, next) => {
|
(bucket, next) => {
|
||||||
// If sseInfo is present but the `mandatory` flag is not set
|
// If sseInfo is present but the `mandatory` flag is not set
|
||||||
|
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
const LifecycleConfiguration =
|
const LifecycleConfiguration =
|
||||||
require('arsenal').models.LifecycleConfiguration;
|
require('arsenal').models.LifecycleConfiguration;
|
||||||
|
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetLifecycle',
|
requestType: request.apiMethods || 'bucketGetLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -41,7 +41,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
||||||
|
authInfo, log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for account on bucket', {
|
log.debug('access denied for account on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketGetLocation',
|
method: 'bucketGetLocation',
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { NotificationConfiguration } = require('arsenal').models;
|
const { NotificationConfiguration } = require('arsenal').models;
|
||||||
|
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetNotification',
|
requestType: request.apiMethods || 'bucketGetNotification',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const ObjectLockConfiguration =
|
const ObjectLockConfiguration =
|
||||||
|
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetObjectLock',
|
requestType: request.apiMethods || 'bucketGetObjectLock',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetPolicy',
|
requestType: request.apiMethods || 'bucketGetPolicy',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue