Compare commits

..

924 Commits

Author SHA1 Message Date
Vitaliy Filippov facc276e8b move away require libv2/config from libv2/redis 2024-08-13 02:17:04 +03:00
Vitaliy Filippov c8e3999fb3 Require defaults.json instead of fs.readFileSync 2024-08-13 01:14:02 +03:00
Vitaliy Filippov 9fa777cdba Split require utils to help webpack remove libV2 2024-08-13 01:10:22 +03:00
Vitaliy Filippov e6d48f3b47 Make vault client optional / support receiving its instance from outside 2024-07-23 19:22:54 +03:00
Vitaliy Filippov 0050625f81 Change git dependency URLs 2024-07-21 18:12:40 +03:00
Vitaliy Filippov 0a66c57a0a Remove yarn lock 2024-07-21 17:34:07 +03:00
Vitaliy Filippov 6711c4241a Forget LFS object 2024-07-21 17:34:07 +03:00
Jonathan Gramain 3800e4b185 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes' into w/8.1/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:09:15 -07:00
Jonathan Gramain 20667ff741 Merge remote-tracking branch 'origin/bugfix/UTAPI-105-useListOfSentinelNodes' into w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:06:43 -07:00
Jonathan Gramain 88d18f3eb6 UTAPI-105 bump version 2024-06-25 15:10:02 -07:00
Jonathan Gramain 426dfd0860 bf: UTAPI-105 UtapiReindex: use list of redis sentinels
Use a list of Redis sentinels that are running on stateful nodes only,
instead of localhost.

Previously, a stateless-only node wouldn't have a local sentinel node
running, causing UtapiReindex to fail.

Added a failover mechanism in case of connection error on the current
sentinel, to try each other one in turn.
2024-06-25 15:10:02 -07:00
bert-e ac4fd2c5f5 Merge branch 'improvement/UTAPI-103/support_reindex_by_account' into tmp/octopus/w/8.1/improvement/UTAPI-103/support_reindex_by_account 2024-06-12 18:28:11 +00:00
Taylor McKinnon 69b94c57aa impr(UTAPI-103): Remove undeclared variable from log message 2024-06-12 11:27:16 -07:00
Taylor McKinnon f5262b7875 impr(UTAPI-103): Support reindexing by acccount 2024-06-12 11:27:16 -07:00
Taylor McKinnon ee1c0fcd1b impr(UTAPI-103): Support multiple specified buckets and prep for account support 2024-06-12 11:27:16 -07:00
Taylor McKinnon 5efb70dc63 impr(UTAPI-103): Add --dry-run option 2024-06-12 11:27:16 -07:00
Taylor McKinnon 210ba2fd82 impr(UTAPI-103): Add BucketDClient.get_bucket_md() 2024-06-06 12:10:40 -07:00
Taylor McKinnon 34af848b93 impr(UTAPI-103): Add BucketNotFound Exeception for _get_bucket_attributes 2024-06-06 12:08:40 -07:00
Taylor McKinnon 402fd406e3 impr(UTAPI-103): Add small LRU cache to BucketDClient._get_bucket_attributes 2024-06-06 12:06:46 -07:00
bert-e f9ae694c0c Merge branch 'w/7.70/bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/8.1/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
bert-e 960d990e89 Merge branch 'bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/7.70/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
Taylor McKinnon 7fde3488b9 impr(UTAPI-101): Remove secrets: inherit from release workflow 2024-05-15 10:32:38 -07:00
Taylor McKinnon 79c2ff0c72 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-100/utapi_python_version_fix' into w/8.1/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:56:37 -07:00
Taylor McKinnon ae904b89bf Merge remote-tracking branch 'origin/bugfix/UTAPI-100/utapi_python_version_fix' into w/7.70/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:55:23 -07:00
Taylor McKinnon 60db367054 bf(UTAPI-100): Bump version 2024-05-06 11:20:17 -07:00
Taylor McKinnon c9ba521b6d bf(UTAPI-100): Remove use of 3.7+ only parameter 2024-05-06 11:16:58 -07:00
Francois Ferrand ce89418788
Update Release.md for ghcr migration
Issue: UTAPI-99
2024-04-18 15:55:13 +02:00
Francois Ferrand 5faaf493a5
Merge branch 'w/7.70/improvement/VAULT-567' into w/8.1/improvement/VAULT-567 2024-04-18 15:54:58 +02:00
Francois Ferrand da143dba67
Merge branch 'w/7.10/improvement/VAULT-567' into w/7.70/improvement/VAULT-567 2024-04-18 15:54:35 +02:00
Francois Ferrand 6e0ec16f00
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:54:04 +02:00
Francois Ferrand 4449f44c9a
Bump github actions
- docker-build@v2
- checkout@v4
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2
- ssh-to-runner@1.7.0

Issue: UTAPI-99
2024-04-18 15:53:26 +02:00
Francois Ferrand c4e786d6cd
Migrate to ghcr
Issue: UTAPI-99
2024-04-18 15:53:20 +02:00
Francois Ferrand bdb483e6b4
Merge branch 'improvement/UTAPI-99' into w/7.10/improvement/VAULT-567 2024-04-18 15:52:47 +02:00
Francois Ferrand 20916c6f0e
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:47:05 +02:00
Francois Ferrand 5976018d0e
Bump github actions
- checkout@v4
- setup-qemu@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2

Issue: UTAPI-99
2024-04-17 15:02:44 +02:00
Francois Ferrand 9e1f14ed17
Migrate to ghcr
Issue: UTAPI-99
2024-04-17 14:42:58 +02:00
bert-e 34699432ee Merge branch 'w/7.70/improvement/UTAPI-98/bump-redis' into tmp/octopus/w/8.1/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:38 +00:00
bert-e 438a25982d Merge branch 'improvement/UTAPI-98/bump-redis' into tmp/octopus/w/7.70/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:37 +00:00
Nicolas Humbert 8804e9ff69 UTAPI-98 Bump Redis version 2024-01-22 16:36:01 +01:00
Taylor McKinnon 27e1c44829 Merge remote-tracking branch 'origin/w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/8.1/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:38:41 -08:00
Taylor McKinnon e8882a28cc Merge remote-tracking branch 'origin/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:37:25 -08:00
Taylor McKinnon b93998118c impr(UTAPI-97): Bump version 2023-12-11 09:25:01 -08:00
Taylor McKinnon 9195835f70 impr(UTAPI-97): Add config option to reindex only latest version in object locked buckets 2023-12-11 09:25:01 -08:00
bert-e 8dfb06cdbc Merge branch 'w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/8.1/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
bert-e 934136635e Merge branch 'improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
Taylor McKinnon 9f36624799 impr(UTAPI-96): Switch to scality/actions/action-ssh-to-runner 2023-10-09 09:30:34 -07:00
Taylor McKinnon 59aa9b9ab9 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-92/bump_utapi_version' into w/8.1/bugfix/UTAPI-92/bump_utapi_version 2023-05-31 13:45:38 -07:00
Taylor McKinnon 9eecef0a24 Merge remote-tracking branch 'origin/bugfix/UTAPI-92/bump_utapi_version' into w/7.70/bugfix/UTAPI-92/bump_utapi_version 2023-05-31 13:44:34 -07:00
Taylor McKinnon c29af16e46 UTAPI-92: Bump version 2023-05-31 13:43:04 -07:00
bert-e 8757ac8bb0 Merge branch 'w/7.70/bugfix/UTAPI-92/fix_redis_password_config' into tmp/octopus/w/8.1/bugfix/UTAPI-92/fix_redis_password_config 2023-05-26 17:39:02 +00:00
bert-e 34ceac8563 Merge branch 'bugfix/UTAPI-92/fix_redis_password_config' into tmp/octopus/w/7.70/bugfix/UTAPI-92/fix_redis_password_config 2023-05-26 17:39:02 +00:00
Taylor McKinnon 7f9c9aa202 bf(UTAPI-92): Fix redis password loading 2023-05-25 15:03:36 -07:00
Taylor McKinnon 41b690aa5d Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-88/bump_version_7_10_13' into w/8.1/bugfix/UTAPI-88/bump_version_7_10_13 2023-04-11 16:10:46 -07:00
Taylor McKinnon 3f08327fe6 Merge remote-tracking branch 'origin/bugfix/UTAPI-88/bump_version_7_10_13' into w/7.70/bugfix/UTAPI-88/bump_version_7_10_13 2023-04-11 16:09:35 -07:00
Taylor McKinnon 84bc7e180f bf(UTAPI-88): Release 7.10.13 2023-04-11 16:07:23 -07:00
bert-e e328095606 Merge branches 'w/8.1/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' and 'q/1279/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/q/8.1 2023-04-10 23:34:43 +00:00
bert-e cb9d2b8d2b Merge branches 'w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' and 'q/1279/7.10/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/q/7.70 2023-04-10 23:34:42 +00:00
bert-e de73fe9ee0 Merge branch 'bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into q/7.10 2023-04-10 23:34:42 +00:00
bert-e 0d33f81e35 Merge branch 'w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/w/8.1/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric 2023-04-10 23:28:07 +00:00
bert-e 13fb668d94 Merge branch 'bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric 2023-04-10 23:28:07 +00:00
scality-gelbart 0fc08f3d7d bf(UTAPI-88): Replace transient state API error with info log message and 200 response 2023-04-10 16:27:21 -07:00
Naren 334c4c26a1 Merge remote-tracking branch 'origin/improvement/UTAPI-91-release-7-70-0' into w/8.1/improvement/UTAPI-91-release-7-70-0 2023-03-28 18:36:52 -07:00
Naren 5319a24704 impr: UTAPI-91 bump version to 7.70.0 2023-03-28 18:05:13 -07:00
Naren ed3628ef01 impr: UTAPI-90 bump version to 8.1.10 2023-03-15 11:20:42 -07:00
Naren 34e881f0e9 impr: UTAPI-90 upgrade bucketclient and vaultclient 2023-03-15 11:19:21 -07:00
Naren 13befbd535 Merge remote-tracking branch 'origin/improvement/UTAPI-90-upgrade-prom-client' into w/8.1/improvement/UTAPI-90-upgrade-prom-client 2023-03-15 11:12:08 -07:00
Naren 347cf3c1cb impr: UTAPI-90 bump version to 7.10.12 2023-03-15 11:03:06 -07:00
Naren 9b5fe56f48 impr: UTAPI-90 upgrade bucketclient and vaultclient 2023-03-15 11:02:22 -07:00
Naren 988f478957 impr: UTAPI-90 upgrade arsenal for prom-client upgrade 2023-03-14 18:54:16 -07:00
bert-e 5f24e749ea Merge branch 'improvement/UTAPI-89-update-metric-names' into tmp/octopus/w/8.1/improvement/UTAPI-89-update-metric-names 2023-02-28 16:56:12 +00:00
Naren 480bde079b impr UTAPI-89 update metric names 2023-02-28 08:54:25 -08:00
Taylor McKinnon 0ba5a02ba7
Bump version to 8.1.9 2022-10-26 11:47:44 -07:00
Taylor McKinnon e75ce33f35 Merge remote-tracking branch 'origin/bugfix/UTAPI-87/handle_zero_byte_objs_in_ver_susp_buck' into w/8.1/bugfix/UTAPI-87/handle_zero_byte_objs_in_ver_susp_buck 2022-10-25 13:36:06 -07:00
Taylor McKinnon 3ec818bca1 bf(UTAPI-87): Bump version to 7.10.11 2022-10-25 13:34:21 -07:00
Taylor McKinnon c3111dfadf bf(UTAPI-87): Handle deleting zero byte objects in version suspended buckets 2022-10-25 13:34:21 -07:00
Taylor McKinnon 451f88d27e Merge remote-tracking branch 'origin/bugfix/UTAPI-85/bump_version' into w/8.1/bugfix/UTAPI-85/bump_version 2022-10-17 14:47:06 -07:00
Taylor McKinnon 71f162169d bf(UTAPI-85): Bump version to 7.10.10 2022-10-17 14:45:26 -07:00
bert-e c0abf3e53f Merge branches 'w/8.1/bugfix/UTAPI-85/allow_host_port_override' and 'q/1271/7.10/bugfix/UTAPI-85/allow_host_port_override' into tmp/octopus/q/8.1 2022-10-17 21:24:40 +00:00
bert-e 3e740a2f6a Merge branch 'bugfix/UTAPI-85/allow_host_port_override' into q/7.10 2022-10-17 21:24:40 +00:00
bert-e 93134e6ccb Merge branches 'w/8.1/bugfix/UTAPI-82-v1-delete-inconsistency' and 'q/1267/7.10/bugfix/UTAPI-82-v1-delete-inconsistency' into tmp/octopus/q/8.1 2022-10-15 00:09:09 +00:00
bert-e b4b52c0de7 Merge branch 'bugfix/UTAPI-82-v1-delete-inconsistency' into q/7.10 2022-10-15 00:09:09 +00:00
Artem Bakalov 4faac178ef Merge remote-tracking branch 'origin/bugfix/UTAPI-82-v1-delete-inconsistency' into w/8.1/bugfix/UTAPI-82-v1-delete-inconsistency 2022-10-14 17:01:02 -07:00
Artem Bakalov 193d1a5d92 UTAPI-82 fix delete inconsistency 2022-10-14 16:55:16 -07:00
bert-e f90213d3d5 Merge branch 'bugfix/UTAPI-85/allow_host_port_override' into tmp/octopus/w/8.1/bugfix/UTAPI-85/allow_host_port_override 2022-10-13 18:24:33 +00:00
Taylor McKinnon 7eb35d51f4 bf(UTAPI-85): Allow host and port to be overridden 2022-10-13 11:02:31 -07:00
bert-e 2e04a5cc44 Merge branch 'improvement/UTAPI-83/provide_warp10_image' into tmp/octopus/w/8.1/improvement/UTAPI-83/provide_warp10_image 2022-10-05 20:17:37 +00:00
Taylor McKinnon 52520e4de1 impr(UTAPI-83): Add warp 10 release workflow 2022-10-05 13:17:05 -07:00
Taylor McKinnon 3391130d43 Merge remote-tracking branch 'origin/bugfix/UTAPI-84/fix_nodesvc_base_config' into w/8.1/bugfix/UTAPI-84/fix_nodesvc_base_config 2022-10-03 15:34:56 -07:00
Taylor McKinnon f3a9a57f58 bf(UTAPI-840): Fix nodesvc-base image config 2022-10-03 15:33:31 -07:00
Taylor McKinnon c0aa52beab Merge remote-tracking branch 'origin/feature/UTAPI-71/add_nodesvc_based_image_and_release_workflow' into w/8.1/feature/UTAPI-71/add_nodesvc_based_image_and_release_workflow 2022-09-23 10:46:41 -07:00
Taylor McKinnon 0ae108f15e ft(UTAPI-71): Rework release workflow to support S3C releases 2022-09-23 10:45:16 -07:00
Taylor McKinnon 2f99e1ddd5 ft(UTAPI-71): Split v2 tests into with/without sensision enabled 2022-09-23 10:45:16 -07:00
Taylor McKinnon cbeae49d47 ft(UTAPI-71): Fix sensision inside warp 10 image 2022-09-23 10:45:16 -07:00
Taylor McKinnon 64d3ecb10f ft(UTAPI-71): Call build-ci from tests 2022-09-23 10:45:16 -07:00
Taylor McKinnon df57f68b9a ft(UTAPI-71): Add build workflows 2022-09-23 10:45:16 -07:00
Taylor McKinnon db5a43f412 ft(UTAPI-71): Backport Dockerfile from development/8.1 branch 2022-09-22 10:52:20 -07:00
Taylor McKinnon 116a2108b0 ft(UTAPI-71): Add nodesvc-base based image 2022-09-22 10:52:20 -07:00
Taylor McKinnon 750cabc565 Merge remote-tracking branch 'origin/bugfix/UTAPI-81/add_bucket_tagging_methods' into w/8.1/bugfix/UTAPI-81/add_bucket_tagging_methods 2022-08-04 12:53:02 -07:00
Taylor McKinnon 469b862a69 bf(UTAPI-81): Add bucket tagging operations 2022-08-04 12:49:23 -07:00
Taylor McKinnon 62bf4d86e6 Merge remote-tracking branch 'origin/improvement/UTAPI-80/release_7_10_7' into w/8.1/improvement/UTAPI-80/release_7_10_7 2022-07-22 11:19:18 -07:00
Taylor McKinnon a072535050 impr(UTAPI-80): Release 7.10.7 2022-07-22 11:17:56 -07:00
bert-e 29b52a0346 Merge branch 'bugfix/UTAPI-78/fix_user_auth_with_no_resources' into tmp/octopus/w/8.1/bugfix/UTAPI-78/fix_user_auth_with_no_resources 2022-07-21 16:38:05 +00:00
Taylor McKinnon 1168720f98 bf(UTAPI-78): Fix second stage user auth with no resources 2022-07-20 09:37:34 -07:00
Jonathan Gramain ff5a75bb11 Merge remote-tracking branch 'origin/bugfix/UTAPI-77-bumpOasTools' into w/8.1/bugfix/UTAPI-77-bumpOasTools 2022-06-20 15:10:40 -07:00
Jonathan Gramain 84a025d430 bugfix: UTAPI-77 bump oas-tools to 2.2.2
Bump the dependency version of oas-tools to version 2.2.2, to fix a
vulnerability with mpath@0.5.0
2022-06-20 13:32:26 -07:00
bert-e 65726f6d0b Merge branches 'w/8.1/feature/UTAPI-76/breakout_leveldb_and_datalog' and 'q/1251/7.10/feature/UTAPI-76/breakout_leveldb_and_datalog' into tmp/octopus/q/8.1 2022-06-08 21:59:45 +00:00
bert-e 4fbcd109a7 Merge branch 'feature/UTAPI-76/breakout_leveldb_and_datalog' into q/7.10 2022-06-08 21:59:45 +00:00
bert-e eed137768d Merge branches 'w/8.1/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' and 'q/1249/7.10/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into tmp/octopus/q/8.1 2022-06-07 22:32:44 +00:00
bert-e a71e4d48d0 Merge branch 'feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into q/7.10 2022-06-07 22:32:44 +00:00
bert-e 0257e97bc2 Merge branch 'feature/UTAPI-76/breakout_leveldb_and_datalog' into tmp/octopus/w/8.1/feature/UTAPI-76/breakout_leveldb_and_datalog 2022-06-07 22:32:07 +00:00
Taylor McKinnon 7e596598fb ft(UTAPI-76): Breakout disk usage for leveldb and datalog 2022-06-07 15:31:27 -07:00
bert-e 55b640faba Merge branch 'feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into tmp/octopus/w/8.1/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps 2022-06-03 16:43:12 +00:00
Taylor McKinnon fd5bea5301 ft(UTAPI-75): Add metrics for latest checkpoint and snapshot 2022-06-03 09:40:27 -07:00
bert-e 54516db267 Merge branch 'feature/UTAPI-70/add_metrics_to_http_server' into tmp/octopus/w/8.1/feature/UTAPI-70/add_metrics_to_http_server 2022-05-26 16:50:41 +00:00
Taylor McKinnon 39eee54045 ft(UTAPI-70): Add http server metrics 2022-05-26 09:50:12 -07:00
bert-e c2f121d0d3 Merge branch 'feature/UTAPI-69/Add_async_task_metrics' into q/7.10 2022-05-26 16:32:49 +00:00
bert-e 1c6c159423 Merge branches 'w/8.1/feature/UTAPI-69/Add_async_task_metrics' and 'q/1239/7.10/feature/UTAPI-69/Add_async_task_metrics' into tmp/octopus/q/8.1 2022-05-26 16:32:49 +00:00
bert-e 22805fe7e7 Merge branch 'feature/UTAPI-69/Add_async_task_metrics' into tmp/octopus/w/8.1/feature/UTAPI-69/Add_async_task_metrics 2022-05-26 16:23:04 +00:00
Taylor McKinnon fbc7f3f442 ft(UTAPI-69): Add metrics for async tasks 2022-05-26 09:22:42 -07:00
bert-e 9e1761b0a4 Merge branch 'feature/UTAPI-67/Add_base_prometheus_framework' into q/7.10 2022-05-24 17:46:30 +00:00
bert-e ca82189fd7 Merge branches 'w/8.1/feature/UTAPI-67/Add_base_prometheus_framework' and 'q/1235/7.10/feature/UTAPI-67/Add_base_prometheus_framework' into tmp/octopus/q/8.1 2022-05-24 17:46:30 +00:00
bert-e 2f26d380f6 Merge branch 'feature/UTAPI-67/Add_base_prometheus_framework' into tmp/octopus/w/8.1/feature/UTAPI-67/Add_base_prometheus_framework 2022-05-24 17:12:06 +00:00
Taylor McKinnon 50a3ba2f18 ft(UTAPI-67): Add metrics framework to BaseTask 2022-05-24 10:07:48 -07:00
Taylor McKinnon 9f1552488c impr(UTAPI-66): Update Dockerfile with --network-concurrency 2022-05-18 10:06:08 -07:00
bert-e bf366e9472 Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:34:32 +00:00
Taylor McKinnon 5352f8467d remove unused require 2022-05-18 09:34:26 -07:00
bert-e 002a7ad1ca Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:33:37 +00:00
Taylor McKinnon a8f54966bc f 2022-05-18 09:33:32 -07:00
bert-e c2bff35bc6 Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:30:07 +00:00
Taylor McKinnon b92102eb65
Apply suggestions from code review
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2022-05-18 09:30:02 -07:00
Taylor McKinnon 280c4bae3a Merge remote-tracking branch 'origin/improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-17 13:39:36 -07:00
Taylor McKinnon d9901609ae impr(UTAPI-66): Convert v2 code 2022-05-17 11:03:23 -07:00
Taylor McKinnon 4448f79088 impr(UTAPI-66): Convert v1 code 2022-05-17 11:01:30 -07:00
Taylor McKinnon 40fa94f0d7 impr(UTAPI-66): Update arsenal to 7.10.24 2022-05-17 10:56:15 -07:00
bert-e 3c09767315 Merge branch 'bugfix/UTAPI-72/add_missing_await_to_pushMetric' into tmp/octopus/w/8.1/bugfix/UTAPI-72/add_missing_await_to_pushMetric 2022-05-06 20:13:26 +00:00
Taylor McKinnon 43ca83cab7 bf(UTAPI-72): Fix CacheClient.pushMetric() to `await` this._cacheBackend.addToShard() 2022-05-06 12:46:35 -07:00
Erwan Bernard 2c1d25a50e Merge remote-tracking branch 'origin/w/7.10/feature/RELENG-5645/patch-usage-of-action-gh-release' into w/8.1/feature/RELENG-5645/patch-usage-of-action-gh-release 2022-04-01 17:01:05 +02:00
bert-e 7dd49ca418 Merge branch 'feature/RELENG-5645/patch-usage-of-action-gh-release' into tmp/octopus/w/7.10/feature/RELENG-5645/patch-usage-of-action-gh-release 2022-04-01 14:58:01 +00:00
Erwan Bernard 87cba51d75 [RELENG-5645] Patch usage of actions gh release 2022-04-01 15:56:31 +02:00
Xin LI 3eed7b295d bugfix: UTAPI-64 update vaultclient, bucketclient, oas-tools to fix critical 2022-03-31 19:45:00 +02:00
bert-e c359ddee7e Merge branch 'w/7.10/bugfix/UTAPI-63/fix_arsenal_require_for_dhparam' into tmp/octopus/w/8.1/bugfix/UTAPI-63/fix_arsenal_require_for_dhparam 2022-03-11 18:09:51 +00:00
Taylor McKinnon f6215a1b08 bf(UTAPI-63): Fix dhparam require 2022-03-11 10:05:34 -08:00
Naren 7d7b46bc5e feature: UTAPI-59 update yarn.lock 2022-02-07 16:18:03 -08:00
Naren b82bed39db Merge remote-tracking branch 'origin/w/7.10/feature/UTAPI-59-update-version-and-deps' into w/8.1/feature/UTAPI-59-update-version-and-deps 2022-02-07 16:15:18 -08:00
Naren d9838f4198 feature: UTAPI-59 update eslintrc rules 2022-02-07 15:39:37 -08:00
Naren a6bd7e348c Merge remote-tracking branch 'origin/feature/UTAPI-59-update-version-and-deps' into w/7.10/feature/UTAPI-59-update-version-and-deps 2022-02-07 14:39:43 -08:00
Naren 5683075f59 feature: UTAPI-59 update version and deps 2022-02-07 14:31:45 -08:00
Naren cc3bceebcf Merge remote-tracking branch 'origin/w/7.10/feature/UTAPI-59/UpgradeToNode16' into w/8.1/feature/UTAPI-59/UpgradeToNode16 2022-02-07 14:22:20 -08:00
Naren 5ad53486c3 Merge remote-tracking branch 'origin/feature/UTAPI-59/UpgradeToNode16' into w/7.10/feature/UTAPI-59/UpgradeToNode16 2022-02-07 14:08:27 -08:00
Nicolas Humbert 20479f0dfa Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/UTAPI-59/UpgradeToNode16 2022-02-04 16:56:05 +01:00
Nicolas Humbert da04548b2a Merge remote-tracking branch 'origin/development/7.10' into w/7.10/feature/UTAPI-59/UpgradeToNode16 2022-02-04 16:42:26 +01:00
Nicolas Humbert e86423f88c Merge remote-tracking branch 'origin/development/7.4' into feature/UTAPI-59/UpgradeToNode16 2022-02-04 16:31:58 +01:00
bert-e 7179917edc Merge branches 'w/8.1/feature/UTAPI-44-migrate-github-actions' and 'q/1173/7.10/feature/UTAPI-44-migrate-github-actions' into tmp/octopus/q/8.1 2022-02-03 22:18:40 +00:00
bert-e fb8be0601e Merge branch 'w/7.10/feature/UTAPI-44-migrate-github-actions' into tmp/octopus/q/7.10 2022-02-03 22:18:39 +00:00
Naren 92adb8c320 Merge remote-tracking branch 'origin/improvement/UTAPI-61-lock-bucket-client-version' into w/8.1/improvement/UTAPI-61-lock-bucket-client-version 2022-02-02 16:14:42 -08:00
Naren 2182593c4c improvement: UTAPI-61 lock bucketclient to a version 2022-02-02 15:40:00 -08:00
Thomas Carmet b770331a12 Merge remote-tracking branch 'origin/w/7.10/feature/UTAPI-44-migrate-github-actions' into w/8.1/feature/UTAPI-44-migrate-github-actions 2022-02-02 11:09:09 -08:00
Thomas Carmet 664d7fba55 Add healthcheck to all services 2022-02-02 11:08:21 -08:00
Thomas Carmet e5843723f6 Wait for warp10 to boot before starting pipeline 2022-02-02 11:08:21 -08:00
Taylor McKinnon 0285e4002b pass path to sub process 2022-01-28 16:51:11 -08:00
bert-e 765b149cbf Merge branch 'w/7.10/feature/UTAPI-59/UpgradeToNode16' into tmp/octopus/w/8.1/feature/UTAPI-59/UpgradeToNode16 2022-01-26 23:38:35 +00:00
Nicolas Humbert 7dbbfc5ee1 Update engines node to 16 2022-01-26 18:38:21 -05:00
Nicolas Humbert 1fc6c29864 update Docker node image 2022-01-26 15:50:19 -05:00
Nicolas Humbert 135581fa63 Merge remote-tracking branch 'origin/w/7.10/feature/UTAPI-59/UpgradeToNode16' into w/8.1/feature/UTAPI-59/UpgradeToNode16 2022-01-26 15:47:41 -05:00
Nicolas Humbert 1640b641a9 Merge remote-tracking branch 'origin/feature/UTAPI-59/UpgradeToNode16' into w/7.10/feature/UTAPI-59/UpgradeToNode16 2022-01-26 15:41:45 -05:00
Nicolas Humbert a018192741 UTAPI-59 upgrade to Node 16 2022-01-26 15:24:21 -05:00
Artem Bakalov c27442cc89 S3C-5397 - adds exponential backoff to metadata requests to prevent failures during leader elections 2022-01-26 11:02:26 -08:00
Thomas Carmet 6642681b58 Merge remote-tracking branch 'origin/feature/UTAPI-44-migrate-github-actions' into w/7.10/feature/UTAPI-44-migrate-github-actions 2022-01-18 15:35:27 -08:00
Thomas Carmet bffd1d2a32 ability to overwrite python interpreter 2022-01-17 11:17:01 -08:00
Thomas Carmet 3b13638b27 UTAPI-44 migrate to github actions 2022-01-17 11:17:01 -08:00
bert-e 89bb6c6e5d Merge branch 'improvement/UTAPI-58/limit_max_size_of_snapshots' into tmp/octopus/w/8.1/improvement/UTAPI-58/limit_max_size_of_snapshots 2022-01-07 19:45:00 +00:00
Taylor McKinnon 5cebcedead (f) remove extra line 2022-01-07 11:44:42 -08:00
bert-e 792580c6d6 Merge branch 'improvement/UTAPI-58/limit_max_size_of_snapshots' into tmp/octopus/w/8.1/improvement/UTAPI-58/limit_max_size_of_snapshots 2022-01-06 20:00:26 +00:00
Taylor McKinnon 41f9cc7c7d impr(UTAPI-58): Limit maximum size of snapshots 2022-01-06 11:53:00 -08:00
bert-e 29475f1b9a Merge branches 'w/8.1/improvement/UTAPI-55/warp10_request_error_logging' and 'q/1202/7.10/improvement/UTAPI-55/warp10_request_error_logging' into tmp/octopus/q/8.1 2021-12-06 21:47:28 +00:00
bert-e dec503fda3 Merge branch 'improvement/UTAPI-55/warp10_request_error_logging' into q/7.10 2021-12-06 21:47:28 +00:00
bert-e 255f428b84 Merge branch 'improvement/UTAPI-55/warp10_request_error_logging' into tmp/octopus/w/8.1/improvement/UTAPI-55/warp10_request_error_logging 2021-12-06 20:51:32 +00:00
Taylor McKinnon 2d7274c559 impr(UTAPI-55): Improve warp 10 request error logging 2021-12-06 12:49:28 -08:00
bert-e 202dc39eb5 Merge branch 'feature/UTAPI-56/expose_warp10_request_timeouts_in_config' into tmp/octopus/w/8.1/feature/UTAPI-56/expose_warp10_request_timeouts_in_config 2021-12-03 19:07:23 +00:00
Taylor McKinnon c96bc06a4b ft(UTAPI-56): Expose warp 10 request timeouts in config 2021-12-03 11:05:09 -08:00
bert-e 25bd285d35 Merge branch 'bugfix/UTAPI-54/fix_service_user_test' into tmp/octopus/w/8.1/bugfix/UTAPI-54/fix_service_user_test 2021-11-29 20:44:52 +00:00
Taylor McKinnon 19b1974e18 bf(UTAPI-54): create service user using prefix during test setup 2021-11-29 12:44:23 -08:00
Taylor McKinnon 5a4ba9f72a bf: add yarn.lock to image 2021-11-23 10:19:41 -08:00
bert-e 494381beec Merge branch 'bugfix/UTAPI-53/handle_missing_content_length' into tmp/octopus/w/8.1/bugfix/UTAPI-53/handle_missing_content_length 2021-11-23 17:58:18 +00:00
Taylor McKinnon 6c1a2c87fb bf(UTAPI-53): skip objects without a content-length during reindex 2021-11-23 09:56:52 -08:00
Taylor McKinnon be10ca2ba8 Merge remote-tracking branch 'origin/feature/UTAPI-50/bump_version_to_7.10.5' into w/8.1/feature/UTAPI-50/bump_version_to_7.10.5 2021-11-18 10:05:37 -08:00
Taylor McKinnon 5e68e14d02 ft(UTAPI-50): Bump version to 7.10.5 2021-11-18 09:58:58 -08:00
bert-e 1800407606 Merge branch 'bugfix/UTAPI-49/fix_config_file_schema_event_filter' into tmp/octopus/w/8.1/bugfix/UTAPI-49/fix_config_file_schema_event_filter 2021-11-17 22:39:27 +00:00
Taylor McKinnon 622026e0c6 bf(UTAPI-49): Fix event filter config schema 2021-11-17 14:36:49 -08:00
Taylor McKinnon 029fe17019 Merge remote-tracking branch 'origin/feature/UTAPI-48/bump_version_to_7.10.4' into w/8.1/feature/UTAPI-48/bump_version_to_7.10.4 2021-11-17 09:46:16 -08:00
Taylor McKinnon 9600a1ce59 ft(UTAPI-48): Bump version to 7.10.4 2021-11-17 09:34:11 -08:00
bert-e ada9e2bf55 Merge branches 'w/8.1/bugfix/UTAPI-46-redisv2-backoff' and 'q/1180/7.10/bugfix/UTAPI-46-redisv2-backoff' into tmp/octopus/q/8.1 2021-11-16 23:27:56 +00:00
bert-e d217850863 Merge branch 'bugfix/UTAPI-46-redisv2-backoff' into q/7.10 2021-11-16 23:27:56 +00:00
bert-e 260d2f83ef Merge branch 'bugfix/UTAPI-46-redisv2-backoff' into tmp/octopus/w/8.1/bugfix/UTAPI-46-redisv2-backoff 2021-11-16 22:55:55 +00:00
Rached Ben Mustapha 27a36b3c51
bugfix: pin python redis client for tests 2021-11-16 14:08:03 -08:00
Rached Ben Mustapha 34ed98b7fb
bugfix: support redis retry params in v2
(cherry picked from commit 5908d15cd6bb7da551bd7392c17675e07bef3456)
2021-11-16 12:16:38 -08:00
bert-e bbb6764aa7 Merge branches 'w/8.1/feature/UTAPI-43/event_allow_deny_filter' and 'q/1174/7.10/feature/UTAPI-43/event_allow_deny_filter' into tmp/octopus/q/8.1 2021-11-16 17:50:25 +00:00
bert-e 1f672d343b Merge branch 'feature/UTAPI-43/event_allow_deny_filter' into q/7.10 2021-11-16 17:50:25 +00:00
bert-e e63f9c9009 Merge branch 'feature/UTAPI-43/event_allow_deny_filter' into tmp/octopus/w/8.1/feature/UTAPI-43/event_allow_deny_filter 2021-11-16 17:35:45 +00:00
Taylor McKinnon 6c53e19ce2 ft(UTAPI-43): Add allow/deny filter for events 2021-11-16 09:35:00 -08:00
Rached Ben Mustapha 5650b072ce Merge remote-tracking branch 'origin/bugfix/UTAPI-38-wait-for-redis-ready-main' into w/8.1/bugfix/UTAPI-38-wait-for-redis-ready-main 2021-11-08 18:25:43 +00:00
Rached Ben Mustapha 8358eb7166 chore: bump version 2021-11-08 17:55:05 +00:00
Rached Ben Mustapha 1ef954975e Merge remote-tracking branch 'origin/bugfix/UTAPI-38-wait-for-redis-ready-main' into w/8.1/bugfix/UTAPI-38-wait-for-redis-ready-main 2021-11-05 02:47:51 +00:00
Rached Ben Mustapha 5afbaca3df bugfix: try and recover from redis connection errors 2021-11-05 02:25:28 +00:00
Rached Ben Mustapha c7c4fcf8dc improvement: install backo 2021-11-05 02:25:28 +00:00
Rached Ben Mustapha d01f348867 improvement: upgrade ioredis 2021-11-05 02:25:25 +00:00
bert-e 1ae4abee19 Merge branch 'feature/UTAPI-41/release_7_10_2' into q/7.10 2021-11-01 22:13:17 +00:00
bert-e 989715af95 Merge branches 'w/8.1/feature/UTAPI-41/release_7_10_2' and 'q/1169/7.10/feature/UTAPI-41/release_7_10_2' into tmp/octopus/q/8.1 2021-11-01 22:13:17 +00:00
Taylor McKinnon 87887e57a0 Merge remote-tracking branch 'origin/feature/UTAPI-41/release_7_10_2' into w/8.1/feature/UTAPI-41/release_7_10_2 2021-11-01 12:39:30 -07:00
Taylor McKinnon 7f03b7759c v7.10.2 2021-11-01 12:34:19 -07:00
bert-e e36d376dae Merge branch 'bugfix/UTAPI-42/change_upstream_warp10_repo' into q/7.10 2021-11-01 19:31:00 +00:00
bert-e a8bccfd261 Merge branches 'w/8.1/bugfix/UTAPI-42/change_upstream_warp10_repo' and 'q/1167/7.10/bugfix/UTAPI-42/change_upstream_warp10_repo' into tmp/octopus/q/8.1 2021-11-01 19:31:00 +00:00
bert-e 89bd9751e4 Merge branch 'bugfix/UTAPI-42/change_upstream_warp10_repo' into tmp/octopus/w/8.1/bugfix/UTAPI-42/change_upstream_warp10_repo 2021-11-01 19:24:49 +00:00
Taylor McKinnon 7344a0801c bf(UTAPI-42): Update upstream warp 10 image repo 2021-11-01 12:22:45 -07:00
bert-e b1d55217a8 Merge branches 'w/8.1/bugfix/UTAPI-39/add_crr_metrics_for_v2' and 'q/1159/7.10/bugfix/UTAPI-39/add_crr_metrics_for_v2' into tmp/octopus/q/8.1 2021-11-01 19:17:43 +00:00
bert-e 67c6806e99 Merge branch 'bugfix/UTAPI-39/add_crr_metrics_for_v2' into q/7.10 2021-11-01 19:17:42 +00:00
bert-e 0bb15ae1c6 Merge branch 'bugfix/UTAPI-39/add_crr_metrics_for_v2' into tmp/octopus/w/8.1/bugfix/UTAPI-39/add_crr_metrics_for_v2 2021-11-01 19:10:24 +00:00
Taylor McKinnon 0001b23c32 bf(UTAPI-39): Add crr metric operations for Utapiv2 2021-11-01 12:09:38 -07:00
bert-e cb9e79be48 Merge branch 'w/7.10/feature/UTAPI-40/release_7_4_11' into tmp/octopus/w/8.1/feature/UTAPI-40/release_7_4_11 2021-10-29 23:04:25 +00:00
Taylor McKinnon 3390c910e3 Merge remote-tracking branch 'origin/feature/UTAPI-40/release_7_4_11' into w/7.10/feature/UTAPI-40/release_7_4_11 2021-10-29 16:03:58 -07:00
Taylor McKinnon fa4c353b67 ft(UTAPI-40): bump version to 7.4.11 2021-10-29 15:58:22 -07:00
bert-e c528f75d98 Merge branches 'w/8.1/bugfix/UTAPI-34-implementCRRActions' and 'q/1154/7.10/bugfix/UTAPI-34-implementCRRActions' into tmp/octopus/q/8.1 2021-10-29 18:40:12 +00:00
bert-e 38fb9fb139 Merge branches 'w/7.10/bugfix/UTAPI-34-implementCRRActions' and 'q/1154/7.4/bugfix/UTAPI-34-implementCRRActions' into tmp/octopus/q/7.10 2021-10-29 18:40:12 +00:00
bert-e 215376910c Merge branch 'bugfix/UTAPI-34-implementCRRActions' into q/7.4 2021-10-29 18:40:12 +00:00
bert-e 14779a24ec Merge branch 'w/7.10/bugfix/UTAPI-34-implementCRRActions' into tmp/octopus/w/8.1/bugfix/UTAPI-34-implementCRRActions 2021-10-28 23:08:57 +00:00
Taylor McKinnon d8aafb5b90 Merge remote-tracking branch 'origin/bugfix/UTAPI-34-implementCRRActions' into w/7.10/bugfix/UTAPI-34-implementCRRActions 2021-10-28 16:08:38 -07:00
Jonathan Gramain 67a8a9b94d bf(UTAPI-34): Add metric types for replication 2021-10-28 16:05:00 -07:00
bert-e d5947cd548 Merge branch 'improvement/UTAPI-36/bump_vault_cpu_req' into q/7.10 2021-10-22 16:04:50 +00:00
bert-e df79f65abf Merge branches 'w/8.1/improvement/UTAPI-36/bump_vault_cpu_req' and 'q/1152/7.10/improvement/UTAPI-36/bump_vault_cpu_req' into tmp/octopus/q/8.1 2021-10-22 16:04:50 +00:00
bert-e be1be375f3 Merge branch 'improvement/UTAPI-36/bump_vault_cpu_req' into tmp/octopus/w/8.1/improvement/UTAPI-36/bump_vault_cpu_req 2021-10-21 22:00:49 +00:00
Taylor McKinnon 5668d16c2e impr(UTAPI-36): Bump vault cpu request and limit 2021-10-21 14:59:06 -07:00
bert-e df29658a2f Merge branch 'bugfix/UTAPI-28/catch_all_listing_errors' into tmp/octopus/w/7.10/bugfix/UTAPI-28/catch_all_listing_errors 2021-10-20 23:41:11 +00:00
bert-e 3a5d379510 Merge branch 'w/7.10/bugfix/UTAPI-28/catch_all_listing_errors' into tmp/octopus/w/8.1/bugfix/UTAPI-28/catch_all_listing_errors 2021-10-20 23:41:11 +00:00
bert-e 3a80fb708e Merge branch 'w/7.10/bugfix/UTAPI-35/backport_fixes_for_74' into tmp/octopus/w/8.1/bugfix/UTAPI-35/backport_fixes_for_74 2021-10-19 20:46:32 +00:00
bert-e 5d173d3bc9 Merge branch 'bugfix/UTAPI-35/backport_fixes_for_74' into tmp/octopus/w/7.10/bugfix/UTAPI-35/backport_fixes_for_74 2021-10-19 20:46:32 +00:00
Taylor McKinnon a29fbf4bb1 bf(UTAPI-26): Catch all listing errors and reraise as InvalidListing 2021-10-19 13:42:38 -07:00
Taylor McKinnon 05ff10a343 bf(UTAPI-27): Fail bucketd request after repeated errors
(cherry picked from commit 8da7c90691)
2021-10-19 13:30:02 -07:00
Taylor McKinnon b2d725e2b8 bf(UTAPI-21): convert --workers flag to int
(cherry picked from commit d44b60ec0e)
2021-10-19 13:29:53 -07:00
scality-gelbart 4463a7172a Update s3_bucketd.py
(cherry picked from commit ba3dbb0100)
2021-10-19 13:29:45 -07:00
scality-gelbart 6d9b47cb79 Update s3_bucketd.py
(cherry picked from commit 588ccf7443)
2021-10-19 13:29:38 -07:00
Taylor McKinnon ec7d68075b bf(S3C-3505): Add support for --bucket flag to s3_reindex.py
(cherry picked from commit 1e4b7bd9f2)
2021-10-19 13:28:59 -07:00
Taylor McKinnon 945aa9665f Merge remote-tracking branch 'origin/feature/UTAPI-33/add_ensure_service_user' into w/8.1/feature/UTAPI-33/add_ensure_service_user 2021-10-15 13:20:12 -07:00
Taylor McKinnon 2d876c17cf ft(UTAPI-33): Add ensureServiceUser script 2021-10-15 13:12:42 -07:00
bert-e 3b2d4a18d4 Merge branch 'improvement/UTAPI-32/change_service_user_arnPrefix_to_full_arn' into tmp/octopus/w/8.1/improvement/UTAPI-32/change_service_user_arnPrefix_to_full_arn 2021-10-12 19:27:41 +00:00
Taylor McKinnon e8519ceebb impr(UTAPI-32): Change service user arnPrefix to full arn 2021-10-12 12:25:58 -07:00
Thomas Carmet fa3fb82e5c Merge branch 'w/7.10/feature/UTAPI-30-align-package-version' into w/8.1/feature/UTAPI-30-align-package-version 2021-10-07 10:36:17 -07:00
Thomas Carmet 5938c8be5c Merge branch 'feature/UTAPI-30-align-package-version' into w/7.10/feature/UTAPI-30-align-package-version 2021-10-07 10:35:08 -07:00
Thomas Carmet a979a40260 UTAPI-30 set package.json version accordingly 2021-10-07 10:33:31 -07:00
bert-e 0001d2218a Merge branch 'bugfix/UTAPI-29/fix_bucketd_tls_config' into tmp/octopus/w/8.1/bugfix/UTAPI-29/fix_bucketd_tls_config 2021-10-06 00:30:05 +00:00
Taylor McKinnon cf7b302414 bf(UTAPI-29): Fix bucketd tls config 2021-10-05 17:28:42 -07:00
bert-e b38000c771 Merge branch 'bugfix/UTAPI-27/max_retries_for_bucketd_requests' into tmp/octopus/w/8.1/bugfix/UTAPI-27/max_retries_for_bucketd_requests 2021-10-04 20:41:14 +00:00
Taylor McKinnon 8da7c90691 bf(UTAPI-27): Fail bucketd request after repeated errors 2021-10-04 13:40:39 -07:00
bert-e 34d38fb2b7 Merge branches 'w/8.1/feature/UTAPI-26/add_service_user' and 'q/1141/7.10/feature/UTAPI-26/add_service_user' into tmp/octopus/q/8.1 2021-10-01 17:43:00 +00:00
bert-e 6d6c455de4 Merge branch 'feature/UTAPI-26/add_service_user' into q/7.10 2021-10-01 17:43:00 +00:00
bert-e 6d99ac3dae Merge branch 'feature/UTAPI-26/add_service_user' into tmp/octopus/w/8.1/feature/UTAPI-26/add_service_user 2021-10-01 17:34:30 +00:00
bert-e 84a9485af6 Merge branch 'feature/UTAPI-24/limit_user_credentials_via_filtering' into q/7.10 2021-10-01 17:24:19 +00:00
bert-e 7755a3fa3d Merge branches 'w/8.1/feature/UTAPI-24/limit_user_credentials_via_filtering' and 'q/1138/7.10/feature/UTAPI-24/limit_user_credentials_via_filtering' into tmp/octopus/q/8.1 2021-10-01 17:24:19 +00:00
bert-e 9bca308db7 Merge branch 'feature/UTAPI-24/limit_user_credentials_via_filtering' into tmp/octopus/w/8.1/feature/UTAPI-24/limit_user_credentials_via_filtering 2021-10-01 17:18:46 +00:00
bert-e 3a2b345ada Merge branch 'feature/UTAPI-23/limit_account_keys_via_filtering' into q/7.10 2021-10-01 16:56:45 +00:00
bert-e 0159387ba9 Merge branch 'q/1137/7.10/feature/UTAPI-23/limit_account_keys_via_filtering' into tmp/normal/q/8.1 2021-10-01 16:56:45 +00:00
Taylor McKinnon 05017af754 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/UTAPI-23/limit_account_keys_via_filtering 2021-10-01 09:48:47 -07:00
Taylor McKinnon 5bef96367c ft(UTAPI-26): Add authorization for service user 2021-10-01 09:08:04 -07:00
Taylor McKinnon 774aaef0dd ft(UTAPI-24): Limit user credentials 2021-10-01 09:07:12 -07:00
bert-e 6544f118c0 Merge branch 'feature/UTAPI-23/limit_account_keys_via_filtering' into tmp/octopus/w/8.1/feature/UTAPI-23/limit_account_keys_via_filtering 2021-10-01 16:04:37 +00:00
Taylor McKinnon 0e947f255b ft(UTAPI-23): Limit account level credentials 2021-10-01 09:04:12 -07:00
bert-e 42d92e68ac Merge branch 'bugfix/UTAPI-25_add-bucket-lifecycle-operations' into q/7.10 2021-09-29 22:22:37 +00:00
bert-e b970847ca3 Merge branches 'w/8.1/bugfix/UTAPI-25_add-bucket-lifecycle-operations' and 'q/1140/7.10/bugfix/UTAPI-25_add-bucket-lifecycle-operations' into tmp/octopus/q/8.1 2021-09-29 22:22:37 +00:00
bert-e f19f6c7ab6 Merge branch 'bugfix/UTAPI-25_add-bucket-lifecycle-operations' into tmp/octopus/w/8.1/bugfix/UTAPI-25_add-bucket-lifecycle-operations 2021-09-29 16:31:47 +00:00
Ilke ba85f3e2a7 bf(S3C-4872): Support bucket lifecycle operations 2021-09-29 09:18:14 -07:00
bert-e 54362134de Merge branches 'w/8.1/feature/UTAPI-7-pin-arsenal' and 'q/1122/7.10/feature/UTAPI-7-pin-arsenal' into tmp/octopus/q/8.1 2021-09-14 20:03:52 +00:00
bert-e f8e85ee7cc Merge branch 'w/7.10/feature/UTAPI-7-pin-arsenal' into tmp/octopus/q/7.10 2021-09-14 20:03:51 +00:00
bert-e 959a26cc62 Merge branch 'bugfix/UTAPI-21__convert_reindex_workers_flag_to_int' into tmp/octopus/w/8.1/bugfix/UTAPI-21__convert_reindex_workers_flag_to_int 2021-09-13 16:54:30 +00:00
Taylor McKinnon d44b60ec0e bf(UTAPI-21): convert --workers flag to int 2021-09-13 09:47:28 -07:00
bert-e ea3fff30b8 Merge branch 'bugfix/S3C-4784_redis-connection-build-up' into q/7.10 2021-09-02 17:22:21 +00:00
bert-e 96e2b2c731 Merge branches 'w/8.1/bugfix/S3C-4784_redis-connection-build-up' and 'q/1128/7.10/bugfix/S3C-4784_redis-connection-build-up' into tmp/octopus/q/8.1 2021-09-02 17:22:21 +00:00
bert-e 85182216e4 Merge branch 'bugfix/S3C-4784_redis-connection-build-up' into tmp/octopus/w/8.1/bugfix/S3C-4784_redis-connection-build-up 2021-09-02 17:15:50 +00:00
= bc8c791170 disconnect stuck connection on failover before retry 2021-09-01 19:49:22 -07:00
Thomas Carmet 9532aec058 Merge branch 'w/7.10/feature/UTAPI-7-pin-arsenal' into w/8.1/feature/UTAPI-7-pin-arsenal 2021-09-01 14:08:24 -07:00
Thomas Carmet 37fa1e184f Merge branch 'feature/UTAPI-7-pin-arsenal' into w/7.10/feature/UTAPI-7-pin-arsenal 2021-09-01 14:07:23 -07:00
Thomas Carmet 6ea61e4c49 UTAPI-7 pin arsenal version 2021-09-01 14:06:17 -07:00
bert-e b815471663 Merge branch 'bugfix/S3C-4784_redis-connection-buildup-stabilization' into tmp/octopus/w/7.10/bugfix/S3C-4784_redis-connection-buildup-stabilization 2021-08-27 16:27:36 +00:00
bert-e 214bf4189f Merge branch 'w/7.10/bugfix/S3C-4784_redis-connection-buildup-stabilization' into tmp/octopus/w/8.1/bugfix/S3C-4784_redis-connection-buildup-stabilization 2021-08-27 16:27:36 +00:00
= 44fc07ade9 disconnect stuck connection on failover before retry 2021-08-27 09:11:06 -07:00
bert-e 89c5ae0560 Merge branch 'bugfix/UTAPI-6_warp10_leak_fix_jmx' into tmp/octopus/w/7.10/bugfix/UTAPI-6_warp10_leak_fix_jmx 2021-08-16 17:22:02 +00:00
bert-e 2e4c2c66d5 Merge branch 'w/7.10/bugfix/UTAPI-6_warp10_leak_fix_jmx' into tmp/octopus/w/8.1/bugfix/UTAPI-6_warp10_leak_fix_jmx 2021-08-16 17:22:02 +00:00
Taylor McKinnon c5f24d619a bf(UTAPI-6): Update to fixed version and add jmx exporter 2021-08-16 10:21:32 -07:00
bert-e b2e4683c5d Merge branch 'w/7.10/feature/UTAPI-5-bump-werelogs' into tmp/octopus/w/8.1/feature/UTAPI-5-bump-werelogs 2021-08-12 17:25:13 +00:00
Thomas Carmet bae63a036c Merge remote-tracking branch 'origin/feature/UTAPI-5-bump-werelogs' into w/7.10/feature/UTAPI-5-bump-werelogs 2021-08-12 10:23:24 -07:00
Thomas Carmet 2fc2531b97 UTAPI-5 update werelogs to tagged version 2021-08-12 10:19:07 -07:00
bert-e f96bc66c5e Merge branch 'feature/UTAPI-1_prometheus_metrics' into q/7.10 2021-07-31 00:40:12 +00:00
bert-e 5487001cee Merge branches 'w/8.1/feature/UTAPI-1_prometheus_metrics' and 'q/1093/7.10/feature/UTAPI-1_prometheus_metrics' into tmp/octopus/q/8.1 2021-07-31 00:40:12 +00:00
bert-e 3779c8c144 Merge branch 'feature/UTAPI-1_prometheus_metrics' into tmp/octopus/w/8.1/feature/UTAPI-1_prometheus_metrics 2021-07-31 00:30:42 +00:00
= 12a900f436 Prometheus Exporters for Nodejs, Redis, Warp10 2021-07-30 17:25:28 -07:00
bert-e ff23d1d5cd Merge branch 'bugfix/S3C-4550_avoid_reindex_diff_flapping' into q/7.10 2021-07-01 22:18:18 +00:00
bert-e 7f3f6bb753 Merge branches 'w/8.1/bugfix/S3C-4550_avoid_reindex_diff_flapping' and 'q/1082/7.10/bugfix/S3C-4550_avoid_reindex_diff_flapping' into tmp/octopus/q/8.1 2021-07-01 22:18:18 +00:00
bert-e 8715b0d096 Merge branch 'feature/S3C-4439_bucket-encryption-api-operations-to-utapi-v2' into tmp/octopus/w/8.1/feature/S3C-4439_bucket-encryption-api-operations-to-utapi-v2 2021-06-29 02:23:39 +00:00
artem bakalov db69b03879 add Encryption api funcs to metrics 2021-06-28 18:51:52 -07:00
bert-e 7e38de823a Merge branch 'bugfix/S3C-4550_avoid_reindex_diff_flapping' into tmp/octopus/w/8.1/bugfix/S3C-4550_avoid_reindex_diff_flapping 2021-06-24 19:47:21 +00:00
Taylor McKinnon f5b15573fe bf(S3C-4550): Do not take previous reindex diffs into account when calculating its own 2021-06-24 12:46:25 -07:00
bert-e 1695a01f9e Merge branch 'feature/S3C-4240_specific_bucxket_reindex_flag' into tmp/octopus/w/8.1/feature/S3C-4240_specific_bucxket_reindex_flag 2021-06-24 18:28:13 +00:00
Taylor McKinnon 1f6e2642d0 ft(S3C-4240): Allow reindexing a specific bucket 2021-06-24 11:27:52 -07:00
bert-e 77042591b6 Merge branch 'bugfix/S3C-4429_null_sizeD_during_reindex' into tmp/octopus/w/8.1/bugfix/S3C-4429_null_sizeD_during_reindex 2021-05-26 19:28:25 +00:00
Taylor McKinnon 13351ddcd8 bf(S3C-4429): null sizeD calculated during reindex 2021-05-26 12:23:04 -07:00
bert-e a2d1c47451 Merge branches 'w/8.1/bugfix/S3C-3692_fix_accountid_support_in_list_metrics_js' and 'q/1068/7.10/bugfix/S3C-3692_fix_accountid_support_in_list_metrics_js' into tmp/octopus/q/8.1 2021-05-25 19:10:04 +00:00
bert-e 3b6e7cecd0 Merge branch 'bugfix/S3C-3692_fix_accountid_support_in_list_metrics_js' into q/7.10 2021-05-25 19:10:04 +00:00
bert-e 0adc018e10 Merge branch 'bugfix/S3C-3692_fix_accountid_support_in_list_metrics_js' into tmp/octopus/w/8.1/bugfix/S3C-3692_fix_accountid_support_in_list_metrics_js 2021-05-25 17:15:57 +00:00
Taylor McKinnon 1357840566 ft(S3C-3692): Add accountId -> canonicalId conversion for ListRecentMetrics 2021-05-25 10:15:13 -07:00
bert-e 3af6c176b3 Merge branch 'improvement/S3C-4388_adjust_reindex_log_levels' into tmp/octopus/w/8.1/improvement/S3C-4388_adjust_reindex_log_levels 2021-05-25 00:14:37 +00:00
Taylor McKinnon 8ac1d1b212 impr(S3C-4388): Adjust reindex task log levels 2021-05-24 17:13:05 -07:00
Taylor McKinnon edbae18a62 Merge remote-tracking branch 'origin/bugfix/S3C-4424_switch_protobuf_ext_to_git_lfs' into w/8.1/bugfix/S3C-4424_switch_protobuf_ext_to_git_lfs 2021-05-24 16:32:17 -07:00
Taylor McKinnon 89fc600b81 bf(S3C-4424): Switch protobuf extension to use git lfs 2021-05-24 15:25:17 -07:00
bert-e 16c3782ca4 Merge branch 'bugfix/S3C-4151_fix_user_support' into tmp/octopus/w/8.1/bugfix/S3C-4151_fix_user_support 2021-04-15 21:12:11 +00:00
Taylor McKinnon db4424eb59 bf(S3C-4151): Correctly pass api method to Vault request context 2021-04-15 14:09:47 -07:00
bert-e d5967bcee1 Merge branch 'w/7.10/bugfix/S3C-3996-backport-7.4' into tmp/octopus/w/8.1/bugfix/S3C-3996-backport-7.4 2021-04-01 17:08:52 +00:00
bert-e 91a16f9a19 Merge branch 'w/7.9/bugfix/S3C-3996-backport-7.4' into tmp/octopus/w/7.10/bugfix/S3C-3996-backport-7.4 2021-04-01 17:08:52 +00:00
bert-e b7dce7b85c Merge branch 'bugfix/S3C-3996-backport-7.4' into tmp/octopus/w/7.9/bugfix/S3C-3996-backport-7.4 2021-04-01 17:08:52 +00:00
Gregoire Doumergue acbf8880f6 S3C-3996: Reduce logging amount from s3_bucketd.py 2021-04-01 19:03:13 +02:00
bert-e e0d816a759 Merge branch 'bugfix/S3C-3996/reduce-reindex-logging' into tmp/octopus/w/8.1/bugfix/S3C-3996/reduce-reindex-logging 2021-04-01 07:21:29 +00:00
Gregoire Doumergue 67545ef783 S3C-3996: Reduce logging amount from s3_bucketd.py 2021-03-31 08:53:11 +02:00
Alexander Chan 3d0b92f319 bugfix: ZENKO-3300 fix incrby call 2021-03-18 15:18:38 -07:00
bert-e 9e85797380 Merge branches 'w/8.1/bugfix/S3C-4061-missing-content-length-workaround' and 'q/1030/7.10/bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/q/8.1 2021-03-17 21:30:15 +00:00
bert-e 0591e5a0ef Merge branches 'w/7.9/bugfix/S3C-4061-missing-content-length-workaround' and 'q/1030/7.9.0/bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/q/7.9 2021-03-17 21:30:14 +00:00
bert-e 75e09d1a82 Merge branches 'w/7.10/bugfix/S3C-4061-missing-content-length-workaround' and 'q/1030/7.9/bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/q/7.10 2021-03-17 21:30:14 +00:00
bert-e 498044f2f6 Merge branch 'bugfix/S3C-4061-missing-content-length-workaround' into q/7.9.0 2021-03-17 21:30:14 +00:00
bert-e 2cce109bd6 Merge branch 'w/7.10/bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/w/8.1/bugfix/S3C-4061-missing-content-length-workaround 2021-03-17 21:18:52 +00:00
bert-e 31edc21241 Merge branch 'w/7.9/bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/w/7.10/bugfix/S3C-4061-missing-content-length-workaround 2021-03-17 21:18:52 +00:00
bert-e fd55afdfd8 Merge branch 'bugfix/S3C-4061-missing-content-length-workaround' into tmp/octopus/w/7.9/bugfix/S3C-4061-missing-content-length-workaround 2021-03-17 21:18:51 +00:00
bert-e 7eba6d84a7 Merge branch 'w/7.10/bugfix/S3C-4119-stale-bucket-workaround' into tmp/octopus/w/8.1/bugfix/S3C-4119-stale-bucket-workaround 2021-03-17 21:17:29 +00:00
bert-e 11b972b21c Merge branch 'bugfix/S3C-4119-stale-bucket-workaround' into tmp/octopus/w/7.9/bugfix/S3C-4119-stale-bucket-workaround 2021-03-17 21:17:29 +00:00
bert-e 41a8236fb6 Merge branch 'w/7.9/bugfix/S3C-4119-stale-bucket-workaround' into tmp/octopus/w/7.10/bugfix/S3C-4119-stale-bucket-workaround 2021-03-17 21:17:29 +00:00
scality-gelbart ba3dbb0100 Update s3_bucketd.py 2021-03-17 14:16:52 -07:00
scality-gelbart 588ccf7443 Update s3_bucketd.py 2021-03-17 14:16:17 -07:00
bert-e cecc13d63e Merge branch 'w/7.10/bugfix/S3C-4167_fix_typo_in_error_message' into tmp/octopus/w/8.1/bugfix/S3C-4167_fix_typo_in_error_message 2021-03-17 19:05:49 +00:00
bert-e 246ee6dcd0 Merge branch 'w/7.9/bugfix/S3C-4167_fix_typo_in_error_message' into tmp/octopus/w/7.10/bugfix/S3C-4167_fix_typo_in_error_message 2021-03-17 19:05:49 +00:00
bert-e 163d9bc9a1 Merge branch 'bugfix/S3C-4167_fix_typo_in_error_message' into tmp/octopus/w/7.9/bugfix/S3C-4167_fix_typo_in_error_message 2021-03-17 19:05:48 +00:00
Taylor McKinnon 0f489a88df bf(S3C-4167): Fix typo in error description 2021-03-17 12:04:47 -07:00
bert-e 361bc24c79 Merge branches 'w/8.1/bugfix/S3C-4145_fix_error_responses' and 'q/1023/7.10/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/q/8.1 2021-03-16 03:43:59 +00:00
bert-e 2d61b470d7 Merge branches 'w/7.10/bugfix/S3C-4145_fix_error_responses' and 'q/1023/7.9/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/q/7.10 2021-03-16 03:43:59 +00:00
bert-e daaa8d7391 Merge branches 'w/7.9/bugfix/S3C-4145_fix_error_responses' and 'q/1023/7.9.0/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/q/7.9 2021-03-16 03:43:59 +00:00
bert-e 8f6d8ea7e6 Merge branch 'bugfix/S3C-4145_fix_error_responses' into q/7.9.0 2021-03-16 03:43:59 +00:00
bert-e 6e66608d0e Merge branches 'w/8.1/feature/S3C-4100_manual_adjustment_task' and 'q/998/7.10/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/q/8.1 2021-03-16 03:43:45 +00:00
bert-e 67ec88259c Merge branches 'w/7.10/feature/S3C-4100_manual_adjustment_task' and 'q/998/7.9/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/q/7.10 2021-03-16 03:43:45 +00:00
bert-e 645510460f Merge branches 'w/7.9/feature/S3C-4100_manual_adjustment_task' and 'q/998/7.9.0/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/q/7.9 2021-03-16 03:43:44 +00:00
bert-e d456965c9b Merge branch 'feature/S3C-4100_manual_adjustment_task' into q/7.9.0 2021-03-16 03:43:44 +00:00
bert-e 00a3475bf6 Merge branch 'w/7.10/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/8.1/bugfix/S3C-4145_fix_error_responses 2021-03-16 01:53:05 +00:00
bert-e 679cad1397 Merge branch 'w/7.9/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/7.10/bugfix/S3C-4145_fix_error_responses 2021-03-16 01:53:05 +00:00
bert-e 0efe45930c Merge branch 'bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/7.9/bugfix/S3C-4145_fix_error_responses 2021-03-16 01:53:05 +00:00
Taylor McKinnon 30be7afac4 bf(S3C-4145): Fix error response correctly 2021-03-15 18:52:36 -07:00
bert-e 3fb8078677 Merge branches 'development/8.1' and 'w/7.10/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/8.1/feature/S3C-4100_manual_adjustment_task 2021-03-16 01:33:13 +00:00
bert-e f6654a266f Merge branches 'development/7.10' and 'w/7.9/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/7.10/feature/S3C-4100_manual_adjustment_task 2021-03-16 01:33:13 +00:00
bert-e ff57316fc7 Merge branches 'development/7.9' and 'feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/7.9/feature/S3C-4100_manual_adjustment_task 2021-03-16 01:33:12 +00:00
Taylor McKinnon 42eba88653 remove uneeded assert 2021-03-15 18:33:03 -07:00
bert-e 9f6c56d682 Merge branches 'w/8.1/bugfix/S3C-4139_allow_only_start_timestamp_in_req' and 'q/1013/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/q/8.1 2021-03-16 00:14:59 +00:00
bert-e c4e978b8ba Merge branches 'w/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req' and 'q/1013/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/q/7.10 2021-03-16 00:14:59 +00:00
bert-e 560c2e011b Merge branch 'bugfix/S3C-4139_allow_only_start_timestamp_in_req' into q/7.9.0 2021-03-16 00:14:58 +00:00
bert-e 0619a0316c Merge branches 'w/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req' and 'q/1013/7.9.0/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/q/7.9 2021-03-16 00:14:58 +00:00
bert-e 28618ac3d8 Merge branches 'w/8.1/bugfix/S3C-4137_add_opId_translation_to_ingest_route' and 'q/1002/7.10/bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/q/8.1 2021-03-16 00:14:27 +00:00
bert-e a183877a6a Merge branches 'w/7.10/bugfix/S3C-4137_add_opId_translation_to_ingest_route' and 'q/1002/7.9/bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/q/7.10 2021-03-16 00:14:26 +00:00
bert-e e995b6fc45 Merge branch 'bugfix/S3C-4137_add_opId_translation_to_ingest_route' into q/7.9.0 2021-03-16 00:14:26 +00:00
bert-e 90cda62c3e Merge branches 'w/7.9/bugfix/S3C-4137_add_opId_translation_to_ingest_route' and 'q/1002/7.9.0/bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/q/7.9 2021-03-16 00:14:26 +00:00
bert-e d467389474 Merge branch 'w/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/8.1/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 19:03:04 +00:00
bert-e fcee5e30ed Merge branch 'w/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 19:03:04 +00:00
bert-e 078fc12930 Merge branch 'bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 19:03:04 +00:00
Taylor McKinnon 360c5b33d7 linting 2021-03-15 12:02:49 -07:00
bert-e 167c5e36fe Merge branch 'w/7.10/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/8.1/bugfix/S3C-4145_fix_error_responses 2021-03-15 18:54:40 +00:00
bert-e cbf1ba99d8 Merge branch 'w/7.9/bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/7.10/bugfix/S3C-4145_fix_error_responses 2021-03-15 18:54:40 +00:00
bert-e 87d082fb71 Merge branch 'bugfix/S3C-4145_fix_error_responses' into tmp/octopus/w/7.9/bugfix/S3C-4145_fix_error_responses 2021-03-15 18:54:39 +00:00
Taylor McKinnon 35d1daba78 bf(S3C-4145): Fix error response 2021-03-15 11:54:15 -07:00
bert-e 8bd5e56ee9 Merge branch 'w/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/8.1/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 18:51:35 +00:00
bert-e 18445deff0 Merge branch 'w/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/7.10/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 18:51:35 +00:00
bert-e ed117c1090 Merge branch 'bugfix/S3C-4139_allow_only_start_timestamp_in_req' into tmp/octopus/w/7.9/bugfix/S3C-4139_allow_only_start_timestamp_in_req 2021-03-15 18:51:35 +00:00
Taylor McKinnon 88babe3060 bf(S3C-4137): Allow only start timestamp in listMetrics request 2021-03-15 11:51:02 -07:00
bert-e bd82e9ec8c Merge branch 'w/7.10/bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/w/8.1/bugfix/S3C-4137_add_opId_translation_to_ingest_route 2021-03-15 17:12:57 +00:00
bert-e 97dabd52dc Merge branch 'w/7.9/bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/w/7.10/bugfix/S3C-4137_add_opId_translation_to_ingest_route 2021-03-15 17:12:57 +00:00
bert-e 53c45f5318 Merge branch 'bugfix/S3C-4137_add_opId_translation_to_ingest_route' into tmp/octopus/w/7.9/bugfix/S3C-4137_add_opId_translation_to_ingest_route 2021-03-15 17:12:57 +00:00
Taylor McKinnon 10f081e689 bf(S3C-4137): Add operationId translsation to ingestion route for v1 compat 2021-03-15 10:12:30 -07:00
bert-e 037b0f5d17 Merge branch 'w/7.10/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/8.1/feature/S3C-4100_manual_adjustment_task 2021-03-13 00:55:12 +00:00
bert-e a71d76e85b Merge branch 'w/7.9/feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/7.10/feature/S3C-4100_manual_adjustment_task 2021-03-13 00:55:12 +00:00
bert-e 76e1a7e0c8 Merge branch 'feature/S3C-4100_manual_adjustment_task' into tmp/octopus/w/7.9/feature/S3C-4100_manual_adjustment_task 2021-03-13 00:55:11 +00:00
Taylor McKinnon 6cf59e2744 ft(S3C-4100): Add task for manual metric adjustment 2021-03-12 16:53:35 -08:00
bert-e a1002dc126 Merge branches 'w/8.1/bugfix/S3C-4085_handle_unauthorized' and 'q/985/7.10/bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/q/8.1 2021-03-09 18:47:12 +00:00
bert-e 2b15684c6c Merge branches 'w/7.10/bugfix/S3C-4085_handle_unauthorized' and 'q/985/7.9/bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/q/7.10 2021-03-09 18:47:12 +00:00
bert-e df40c01b20 Merge branches 'w/7.9/bugfix/S3C-4085_handle_unauthorized' and 'q/985/7.9.0/bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/q/7.9 2021-03-09 18:47:11 +00:00
bert-e 11868de367 Merge branch 'bugfix/S3C-4085_handle_unauthorized' into q/7.9.0 2021-03-09 18:47:11 +00:00
bert-e 6c02f3e109 Merge branches 'w/8.1/bugfix/S3C-4022-bump-warp10' and 'q/989/7.10/bugfix/S3C-4022-bump-warp10' into tmp/octopus/q/8.1 2021-03-09 16:21:55 +00:00
bert-e 06055fa26e Merge branches 'w/7.10/bugfix/S3C-4022-bump-warp10' and 'q/989/7.9/bugfix/S3C-4022-bump-warp10' into tmp/octopus/q/7.10 2021-03-09 16:21:54 +00:00
bert-e 6d26034612 Merge branches 'w/7.9/bugfix/S3C-4022-bump-warp10' and 'q/989/7.9.0/bugfix/S3C-4022-bump-warp10' into tmp/octopus/q/7.9 2021-03-09 16:21:54 +00:00
bert-e 6fe22616d4 Merge branch 'bugfix/S3C-4022-bump-warp10' into q/7.9.0 2021-03-09 16:21:54 +00:00
bert-e 594b34472f Merge branch 'w/7.10/bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/w/8.1/bugfix/S3C-4085_handle_unauthorized 2021-03-09 01:51:19 +00:00
bert-e 98aac303b0 Merge branch 'w/7.9/bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/w/7.10/bugfix/S3C-4085_handle_unauthorized 2021-03-09 01:51:19 +00:00
bert-e 848e74c746 Merge branch 'bugfix/S3C-4085_handle_unauthorized' into tmp/octopus/w/7.9/bugfix/S3C-4085_handle_unauthorized 2021-03-09 01:51:19 +00:00
Taylor McKinnon f7652d58f4 bf(S3C-4085): Don't try to translate resources if auth has failed 2021-03-08 17:50:47 -08:00
bert-e dbfac82feb Merge branch 'w/7.10/bugfix/S3C-4022-bump-warp10' into tmp/octopus/w/8.1/bugfix/S3C-4022-bump-warp10 2021-03-09 01:12:39 +00:00
bert-e 4556bfa4e1 Merge branch 'w/7.9/bugfix/S3C-4022-bump-warp10' into tmp/octopus/w/7.10/bugfix/S3C-4022-bump-warp10 2021-03-09 01:12:39 +00:00
bert-e b1c003ca5e Merge branch 'bugfix/S3C-4022-bump-warp10' into tmp/octopus/w/7.9/bugfix/S3C-4022-bump-warp10 2021-03-09 01:12:39 +00:00
Rahul Padigela 9e1aaf482e bugfix: S3C-4022 bump warp10 2021-03-08 17:12:15 -08:00
bert-e a03af1f05f Merge branches 'w/8.1/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' and 'q/978/7.10/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/q/8.1 2021-03-05 19:43:38 +00:00
bert-e 6c22bf8fd9 Merge branch 'bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into q/7.9.0 2021-03-05 19:43:37 +00:00
bert-e 8861b798e4 Merge branches 'w/7.10/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' and 'q/978/7.9/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/q/7.10 2021-03-05 19:43:37 +00:00
bert-e 97129fc407 Merge branches 'w/7.9/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' and 'q/978/7.9.0/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/q/7.9 2021-03-05 19:43:37 +00:00
bert-e 387f0f9a9b Merge branch 'w/7.10/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/w/8.1/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation 2021-03-05 00:37:08 +00:00
bert-e 0af492ce15 Merge branch 'w/7.9/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/w/7.10/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation 2021-03-05 00:37:08 +00:00
bert-e 62b326f735 Merge branch 'bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation' into tmp/octopus/w/7.9/bugfix/S3C-4067_handle_multiple_gts_during_checkpoint_creation 2021-03-05 00:37:08 +00:00
Taylor McKinnon f0ea18b697 bf(S3C-4067): Handle multiple GTS iduring checkpoint creation 2021-03-04 16:35:59 -08:00
bert-e ec250c4df2 Merge branches 'w/8.1/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' and 'q/968/7.10/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/q/8.1 2021-03-05 00:10:12 +00:00
bert-e 2fb0767235 Merge branch 'bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into q/7.9.0 2021-03-05 00:10:11 +00:00
bert-e 1d3e51f2ac Merge branches 'w/7.10/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' and 'q/968/7.9/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/q/7.10 2021-03-05 00:10:11 +00:00
bert-e ea271f77cb Merge branches 'w/7.9/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' and 'q/968/7.9.0/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/q/7.9 2021-03-05 00:10:11 +00:00
bert-e 5a28fc992e Merge branch 'w/7.10/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/w/8.1/bugfix/S3C-4049_call_delete_in_slices_rather_than_once 2021-03-04 19:12:19 +00:00
bert-e f2ee7345bc Merge branch 'w/7.9/bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/w/7.10/bugfix/S3C-4049_call_delete_in_slices_rather_than_once 2021-03-04 19:12:19 +00:00
bert-e 3852b996ee Merge branch 'bugfix/S3C-4049_call_delete_in_slices_rather_than_once' into tmp/octopus/w/7.9/bugfix/S3C-4049_call_delete_in_slices_rather_than_once 2021-03-04 19:12:19 +00:00
Taylor McKinnon d90899e4b4 bf(S3C-4049): Call delete in slices rather than once 2021-03-04 11:11:47 -08:00
bert-e b5a5f70d1c Merge branch 'w/7.10/bugfix/S3C-4057_handle_timestamp_conflicts' into tmp/octopus/w/8.1/bugfix/S3C-4057_handle_timestamp_conflicts 2021-03-03 19:21:01 +00:00
bert-e 5ef784023a Merge branch 'w/7.9/bugfix/S3C-4057_handle_timestamp_conflicts' into tmp/octopus/w/7.10/bugfix/S3C-4057_handle_timestamp_conflicts 2021-03-03 19:21:00 +00:00
bert-e 4cf58762a9 Merge branch 'bugfix/S3C-4057_handle_timestamp_conflicts' into tmp/octopus/w/7.9/bugfix/S3C-4057_handle_timestamp_conflicts 2021-03-03 19:21:00 +00:00
Taylor McKinnon 4c8a08903d bf(S3C-4057): Handle timestamp conflicts during ingestion 2021-03-03 11:20:25 -08:00
bert-e ee814772a7 Merge branch 'w/7.10/improvement/S3C-4034_simplify_soft_limit' into tmp/octopus/w/8.1/improvement/S3C-4034_simplify_soft_limit 2021-02-23 23:21:36 +00:00
bert-e 1c8403552d Merge branch 'w/7.9/improvement/S3C-4034_simplify_soft_limit' into tmp/octopus/w/7.10/improvement/S3C-4034_simplify_soft_limit 2021-02-23 23:21:36 +00:00
bert-e c0d6f9e686 Merge branch 'improvement/S3C-4034_simplify_soft_limit' into tmp/octopus/w/7.9/improvement/S3C-4034_simplify_soft_limit 2021-02-23 23:21:35 +00:00
Taylor McKinnon 72a96ee24e impr(S3C-4034): Simplify soft limit 2021-02-23 15:20:30 -08:00
bert-e 4c016f2838 Merge branch 'w/7.10/bugfix/S3C-4035-increasePodLimits' into tmp/octopus/w/8.1/bugfix/S3C-4035-increasePodLimits 2021-02-23 18:45:10 +00:00
bert-e 1bfb460a72 Merge branch 'bugfix/S3C-4035-increasePodLimits' into tmp/octopus/w/7.10/bugfix/S3C-4035-increasePodLimits 2021-02-23 18:45:10 +00:00
Jonathan Gramain 99574e7305 bugfix: S3C-4035 increase CI worker mem request
Increase the memory request of CI worker pods from 1G to 3G (limit is
still 3G)
2021-02-22 17:43:27 -08:00
bert-e 875b1fed30 Merge branch 'w/7.10/bugfix/S3C-3620-avoidCrashOnRedisError' into tmp/octopus/w/8.1/bugfix/S3C-3620-avoidCrashOnRedisError 2021-02-23 00:18:05 +00:00
bert-e de7f3cc14a Merge branch 'w/7.9/bugfix/S3C-3620-avoidCrashOnRedisError' into tmp/octopus/w/7.10/bugfix/S3C-3620-avoidCrashOnRedisError 2021-02-23 00:18:04 +00:00
Jonathan Gramain e009ad00ed bugfix: S3C-3620 don't raise exception on ioredis client error
When ioredis emits an error (e.g. connection issue), instead of
unconditionally raising it in the RedisClient wrapper, only raise it
if there is at least one listener for the 'error' event.

Some users of RedisClient (v2 cache and reindex tasks) do not set a
listener on error events, resulting in assertions being raised and
process crash.
2021-02-22 16:17:15 -08:00
Jonathan Gramain 7639b3f90a bugfix: S3C-3620 ioredis client error test
Add unit test failing when ioredis client emits an error that is not
caught
2021-02-22 16:17:15 -08:00
bert-e bc58f13b8d Merge branches 'w/8.1/feature/S3C-4033-bump-version' and 'q/938/7.10/feature/S3C-4033-bump-version' into tmp/octopus/q/8.1 2021-02-23 00:02:08 +00:00
bert-e c0419505de Merge branch 'feature/S3C-4033-bump-version' into q/7.10 2021-02-23 00:02:08 +00:00
Thomas Carmet cae286b4ac Merge remote-tracking branch 'origin/feature/S3C-4033-bump-version' into w/8.1/feature/S3C-4033-bump-version 2021-02-22 15:52:39 -08:00
Thomas Carmet b193423801 S3C-4033 bump version on package.json file 2021-02-22 15:51:26 -08:00
bert-e d862258cfd Merge branches 'w/8.1/bugfix/S3C-4023_add_missing_property' and 'q/918/7.10/bugfix/S3C-4023_add_missing_property' into tmp/octopus/q/8.1 2021-02-22 21:39:22 +00:00
bert-e 892cf53f20 Merge branch 'bugfix/S3C-4023_add_missing_property' into q/7.9.0 2021-02-22 21:39:21 +00:00
bert-e 0fa9b0081c Merge branch 'w/7.10/bugfix/S3C-4030_fix_flaky_testGetStorage' into tmp/octopus/w/8.1/bugfix/S3C-4030_fix_flaky_testGetStorage 2021-02-22 19:29:00 +00:00
Taylor McKinnon a0c3595a72 bf(S3C-4030): Fix flaky test 2021-02-22 11:27:44 -08:00
bert-e b257883e4b Merge branch 'w/7.10/bugfix/S3C-4023_add_missing_property' into tmp/octopus/w/8.1/bugfix/S3C-4023_add_missing_property 2021-02-22 17:53:59 +00:00
Taylor McKinnon 13ba6d65e7 bf(S3C-4023): Add missing property in constructor 2021-02-22 09:46:48 -08:00
bert-e 459464a97f Merge branch 'w/7.10/bugfix/S3C-3970_reduce_scanning_of_event_during_query' into tmp/octopus/w/8.1/bugfix/S3C-3970_reduce_scanning_of_event_during_query 2021-02-19 06:44:01 +00:00
Taylor McKinnon b1904e50a0 bf(S3C-3970): Reduce scanning of unrelated events during metric calculation 2021-02-18 22:42:59 -08:00
bert-e 357f8fe1e4 Merge branch 'w/7.10/improvement/S3C-3971_reduce_event_footprint' into tmp/octopus/w/8.1/improvement/S3C-3971_reduce_event_footprint 2021-02-11 00:17:20 +00:00
Taylor McKinnon 1af6532d83 impr(S3C-3971): Allow filtering of pushed event fields 2021-02-10 16:16:36 -08:00
bert-e 2dee952d57 Merge branch 'w/7.9/bugfix/S3C-3940_prevent_negative_metrics' into tmp/octopus/w/8.1/bugfix/S3C-3940_prevent_negative_metrics 2021-02-09 23:02:18 +00:00
Taylor McKinnon 884669b694 bf(S3C-3940): Prevent reporting of negative metrics 2021-02-09 15:01:45 -08:00
bert-e 71dac22a13 Merge branch 'feature/ZENKO-3110-add-release-stage' into q/8.1 2021-02-06 00:07:12 +00:00
Thomas Carmet 740951dc4b Switching to yarn instead of npm to run utapi 2021-02-05 15:12:20 -08:00
Thomas Carmet 5ef95cbc1e Upgrading nodejs to 10.22 2021-02-05 15:08:00 -08:00
Thomas Carmet 72463f72df ZENKO-3110 setting up release stage for utapi 2021-02-05 14:41:42 -08:00
bert-e bbde7b7644 Merge branch 'bugfix/S3C-3937_handle_phd_in_reindex' into q/7.9 2021-02-05 22:03:02 +00:00
bert-e e424a00bc6 Merge branches 'w/8.1/bugfix/S3C-3937_handle_phd_in_reindex' and 'q/898/7.9/bugfix/S3C-3937_handle_phd_in_reindex' into tmp/octopus/q/8.1 2021-02-05 22:03:02 +00:00
bert-e 0afb78e7ae Merge branch 'bugfix/S3C-3937_handle_phd_in_reindex' into tmp/octopus/w/8.1/bugfix/S3C-3937_handle_phd_in_reindex 2021-02-05 21:01:35 +00:00
Taylor McKinnon f70f7e1a74 bf(S3C-3937): Handle PHD objects returned by bucketd 2021-02-05 12:59:48 -08:00
bert-e df5e11eb9d Merge branch 'bugfix/S3C-3830-fetch-warp10-results' into tmp/octopus/w/8.1/bugfix/S3C-3830-fetch-warp10-results 2021-02-02 23:28:05 +00:00
Taylor McKinnon 947dcad5fb update method for seed generation, remove unused initial token gen 2021-02-02 15:27:53 -08:00
bert-e 97406a81a2 Merge branch 'bugfix/S3C-3830-fetch-warp10-results' into tmp/octopus/w/8.1/bugfix/S3C-3830-fetch-warp10-results 2021-02-02 20:33:41 +00:00
Rahul Padigela f033a58c1b bugfix: S3C-3830 bump warp10 2021-02-02 12:32:00 -08:00
bert-e 684c3389e9 Merge branch 'improvement/S3C-3800_rework_failover' into tmp/octopus/w/8.1/improvement/S3C-3800_rework_failover 2021-01-30 00:27:10 +00:00
Taylor McKinnon 0d2b21038e impr(S3C-3800): Refactor warp 10 failover 2021-01-29 16:26:12 -08:00
bert-e 9b1f55ec76 Merge branch 'feature/S3C-3609_hard_disk_limit' into tmp/octopus/w/8.1/feature/S3C-3609_hard_disk_limit 2021-01-29 20:49:10 +00:00
Taylor McKinnon a74f0dbb9b ft(S3C-3609): Add hard limit 2021-01-29 12:48:42 -08:00
bert-e 56a368d9a6 Merge branch 'feature/S3C-3707_soft_disk_limit' into tmp/octopus/w/8.1/feature/S3C-3707_soft_disk_limit 2021-01-28 22:14:07 +00:00
Taylor McKinnon 25ef0c9d0d ft(S3C-3707): Add disk usage soft limit task 2021-01-28 14:13:00 -08:00
bert-e 32e14b5099 Merge branch 'feature/S3C-3707_fix_tests' into tmp/octopus/w/8.1/feature/S3C-3707_fix_tests 2021-01-27 22:28:15 +00:00
Taylor McKinnon ab523ff579 ft(S3C-3707): Fix functional tests 2021-01-27 14:27:45 -08:00
bert-e 2349cf3791 Merge branches 'w/8.1/feature/S3C-3812_warp10_deletion' and 'q/874/7.9/feature/S3C-3812_warp10_deletion' into tmp/octopus/q/8.1 2021-01-15 23:19:15 +00:00
bert-e a674fab416 Merge branch 'feature/S3C-3812_warp10_deletion' into q/7.9 2021-01-15 23:19:15 +00:00
bert-e 3ca3e7fd32 Merge branch 'feature/S3C-3812_warp10_deletion' into tmp/octopus/w/8.1/feature/S3C-3812_warp10_deletion 2021-01-15 23:15:51 +00:00
Taylor McKinnon 8317d7422e ft(S3C-3812): Add support for deletion in warp 10 client 2021-01-15 15:15:22 -08:00
bert-e bb559f08c8 Merge branch 'bugfix/S3C-3764_upgrade_@senx/warp10' into tmp/octopus/w/8.1/bugfix/S3C-3764_upgrade_@senx/warp10 2021-01-13 23:38:57 +00:00
Taylor McKinnon ffd58af6aa bf(S3C-3764): Update @senx/warp10 2021-01-13 15:38:23 -08:00
bert-e e6eb18cc4c Merge branch 'feature/S3C-3721_monitor_disk_usage_task' into q/7.9 2021-01-12 18:36:40 +00:00
bert-e 0e83b9db55 Merge branches 'w/8.1/feature/S3C-3721_monitor_disk_usage_task' and 'q/871/7.9/feature/S3C-3721_monitor_disk_usage_task' into tmp/octopus/q/8.1 2021-01-12 18:36:40 +00:00
bert-e 58ff50b734 Merge branch 'bugfix/S3C-3725_translate_listBucketMulitpartUploads_in_migrate' into q/7.9 2021-01-12 17:38:45 +00:00
bert-e 3ff4fc9cc1 Merge branches 'w/8.1/bugfix/S3C-3725_translate_listBucketMulitpartUploads_in_migrate' and 'q/869/7.9/bugfix/S3C-3725_translate_listBucketMulitpartUploads_in_migrate' into tmp/octopus/q/8.1 2021-01-12 17:38:45 +00:00
bert-e 22cead971d Merge branch 'feature/S3C-3721_monitor_disk_usage_task' into tmp/octopus/w/8.1/feature/S3C-3721_monitor_disk_usage_task 2021-01-12 01:12:02 +00:00
Taylor McKinnon f7972f1ea1 ft(S3C-3721): Add generic disk usage monitor task 2021-01-11 17:11:30 -08:00
bert-e 98f528cf62 Merge branch 'bugfix/S3C-3725_translate_listBucketMulitpartUploads_in_migrate' into tmp/octopus/w/8.1/bugfix/S3C-3725_translate_listBucketMulitpartUploads_in_migrate 2021-01-11 19:41:28 +00:00
bert-e daf0b2c8d9 Merge branch 'feature/S3C-3524_update_warp10' into tmp/octopus/w/8.1/feature/S3C-3524_update_warp10 2021-01-07 22:13:22 +00:00
Taylor McKinnon 1d8183cf23 ft(S3C-3524): upgrade warp10 to 2.7.2 2021-01-07 14:07:29 -08:00
Taylor McKinnon a8491a3241 Merge remote-tracking branch 'origin/feature/S3C-3771_add_client_tls_support' into w/8.1/feature/S3C-3771_add_client_tls_support 2021-01-05 09:54:52 -08:00
Taylor McKinnon 9182e60d0e ft(S3C-3771): Add support for Utapiv2 client to communicate over TLS 2021-01-05 09:52:45 -08:00
bert-e c8a36e45bb Merge branch 'bugfix/S3C-3767_fix_internal_tls_config' into tmp/octopus/w/8.1/bugfix/S3C-3767_fix_internal_tls_config 2020-12-30 23:09:19 +00:00
Taylor McKinnon c5c4c79afb bf(S3C-3767): Fix internal TLS config for Vault 2020-12-30 15:07:35 -08:00
bert-e 97784eaa70 Merge branch 'bugfix/S3C-3763_batch_ingestion_of_shards' into tmp/octopus/w/8.1/bugfix/S3C-3763_batch_ingestion_of_shards 2020-12-28 19:22:40 +00:00
Taylor McKinnon 0fb3b79e71 bf(S3C-3763): Batch shard ingestion 2020-12-28 11:18:54 -08:00
Taylor McKinnon b2dfe765c8 bf(S3C-3725): translate listBucketMultipartUploads during migration 2020-12-16 13:02:31 -08:00
bert-e cd88e9f4bf Merge branches 'w/8.1/bugfix/S3C-3505_support_bucket_option_in_v1_reindex' and 'q/853/7.9/bugfix/S3C-3505_support_bucket_option_in_v1_reindex' into tmp/octopus/q/8.1 2020-12-15 19:06:43 +00:00
bert-e 558840c55c Merge branch 'bugfix/S3C-3505_support_bucket_option_in_v1_reindex' into q/7.9 2020-12-15 19:06:43 +00:00
bert-e c784f09bc0 Merge branch 'bugfix/S3C-3505_support_bucket_option_in_v1_reindex' into tmp/octopus/w/8.1/bugfix/S3C-3505_support_bucket_option_in_v1_reindex 2020-12-15 18:51:54 +00:00
Taylor McKinnon 1e4b7bd9f2 bf(S3C-3505): Add support for --bucket flag to s3_reindex.py 2020-12-15 10:49:26 -08:00
bert-e d61f9997c8 Merge branch 'bugfix/S3C-3680_fix_mpu_edgecase_in_migration' into tmp/octopus/w/8.1/bugfix/S3C-3680_fix_mpu_edgecase_in_migration 2020-12-11 21:58:07 +00:00
Taylor McKinnon e61655baea bf(S3C-3680): Handle pending MPU edgecase in migration 2020-12-11 13:57:29 -08:00
bert-e d911aadd15 Merge branch 'bugfix/S3C-3696_bump_default_java_max_heap' into q/7.9 2020-12-11 21:41:28 +00:00
bert-e d9555e0038 Merge branches 'w/8.1/bugfix/S3C-3696_bump_default_java_max_heap' and 'q/849/7.9/bugfix/S3C-3696_bump_default_java_max_heap' into tmp/octopus/q/8.1 2020-12-11 21:41:28 +00:00
bert-e e5a814aa13 Merge branch 'bugfix/S3C-3696_bump_default_java_max_heap' into tmp/octopus/w/8.1/bugfix/S3C-3696_bump_default_java_max_heap 2020-12-11 20:15:43 +00:00
Taylor McKinnon adf9ee325f bf(S3C-3696): Bump default java max heap to 4GiB 2020-12-11 12:15:08 -08:00
bert-e acf5bc273c Merge branch 'bugfix/S3C-3689_fix_incorrect_Date.now' into q/7.9 2020-12-11 20:08:14 +00:00
bert-e 1ed1b901c2 Merge branches 'w/8.1/bugfix/S3C-3689_fix_incorrect_Date.now' and 'q/845/7.9/bugfix/S3C-3689_fix_incorrect_Date.now' into tmp/octopus/q/8.1 2020-12-11 20:08:14 +00:00
bert-e 8ebb10c051 Merge branch 'bugfix/S3C-3679_cleanup_closed_redis_client' into q/7.9 2020-12-11 19:53:54 +00:00
bert-e 04a8021fe5 Merge branches 'w/8.1/bugfix/S3C-3679_cleanup_closed_redis_client' and 'q/841/7.9/bugfix/S3C-3679_cleanup_closed_redis_client' into tmp/octopus/q/8.1 2020-12-11 19:53:54 +00:00
bert-e d80bd66387 Merge branch 'bugfix/S3C-3679_cleanup_closed_redis_client' into tmp/octopus/w/8.1/bugfix/S3C-3679_cleanup_closed_redis_client 2020-12-11 19:49:20 +00:00
Taylor McKinnon 1caa33cf9e remove usage of -ci variant of warp 10 2020-12-09 14:21:41 -08:00
bert-e 4678fdae05 Merge branch 'bugfix/S3C-3689_fix_incorrect_Date.now' into tmp/octopus/w/8.1/bugfix/S3C-3689_fix_incorrect_Date.now 2020-12-08 19:46:35 +00:00
Taylor McKinnon 304bca04c7 bf(S3C-3689): Fix InterpolatedClock 2020-12-08 11:46:01 -08:00
Taylor McKinnon c05d2dbaf6 bf(S3C-3679): cleanup redis client on disconnect 2020-12-06 10:31:08 -08:00
bert-e 0814b59fda Merge branch 'bugfix/S3C-3553_improve_redis_reconnection_logic' into tmp/octopus/w/7.9/bugfix/S3C-3553_improve_redis_reconnection_logic 2020-11-18 23:53:16 +00:00
bert-e 3f7ea3e121 Merge branch 'w/7.9/bugfix/S3C-3553_improve_redis_reconnection_logic' into tmp/octopus/w/8.1/bugfix/S3C-3553_improve_redis_reconnection_logic 2020-11-18 23:53:16 +00:00
Taylor McKinnon d17e5545b3 bf(S3C-3553): Prevent leaking timeout event handlers 2020-11-18 11:46:30 -08:00
bert-e 3d9d949b05 Merge branch 'bugfix/S3C-3516_fix_authz_for_account_lvl_metrics' into q/7.9 2020-11-12 18:16:20 +00:00
bert-e f136d7d994 Merge branches 'w/8.1/bugfix/S3C-3516_fix_authz_for_account_lvl_metrics' and 'q/823/7.9/bugfix/S3C-3516_fix_authz_for_account_lvl_metrics' into tmp/octopus/q/8.1 2020-11-12 18:16:20 +00:00
bert-e d36af35db5 Merge branch 'improvement/S3C-3520_drop_hex_encoding_for_warp10_data' into q/7.9 2020-11-11 23:50:04 +00:00
bert-e a1e6c4d11a Merge branches 'w/8.1/improvement/S3C-3520_drop_hex_encoding_for_warp10_data' and 'q/828/7.9/improvement/S3C-3520_drop_hex_encoding_for_warp10_data' into tmp/octopus/q/8.1 2020-11-11 23:50:04 +00:00
bert-e 97014cf67b Merge branch 'bugfix/S3C-3516_fix_authz_for_account_lvl_metrics' into tmp/octopus/w/8.1/bugfix/S3C-3516_fix_authz_for_account_lvl_metrics 2020-11-11 23:49:42 +00:00
Taylor McKinnon 61cc5de8b5 bf(S3C-3516): Rework authz and accountId conversion 2020-11-11 15:49:12 -08:00
bert-e c8ac4cf688 Merge branch 'improvement/S3C-3520_drop_hex_encoding_for_warp10_data' into tmp/octopus/w/8.1/improvement/S3C-3520_drop_hex_encoding_for_warp10_data 2020-11-11 22:08:42 +00:00
Taylor McKinnon eb785bf3b3 impr(S3C-3520): Remove intermediate hex encoding for warp10 datapoints 2020-11-11 14:03:12 -08:00
bert-e 4554828a52 Merge branch 'bugfix/S3C-3514_add_missing_logline' into tmp/octopus/w/8.1/bugfix/S3C-3514_add_missing_logline 2020-11-09 23:46:53 +00:00
Taylor McKinnon 64c2f7307a bf(S3C-3514): Add missing call to responseLoggerMiddleware in errorMiddleware 2020-11-09 15:46:21 -08:00
bert-e 4caa7f5641 Merge branch 'w/7.9/feature/S3C-3484_extend_warp10_image' into tmp/octopus/w/8.1/feature/S3C-3484_extend_warp10_image 2020-10-28 22:06:32 +00:00
bert-e 9b800a062f Merge branch 'feature/S3C-3484_extend_warp10_image' into tmp/octopus/w/7.8/feature/S3C-3484_extend_warp10_image 2020-10-28 22:06:31 +00:00
Taylor McKinnon 99d3f7e2b8
Update images/warp10/s6/cont-init.d/20-install-config
Co-authored-by: miniscruff <halfpint1170@gmail.com>
2020-10-28 15:06:26 -07:00
bert-e 92ffbcc3d7 Merge branch 'w/7.9/feature/S3C-3484_extend_warp10_image' into tmp/octopus/w/8.1/feature/S3C-3484_extend_warp10_image 2020-10-28 19:10:21 +00:00
bert-e 77d311b01d Merge branch 'feature/S3C-3484_extend_warp10_image' into tmp/octopus/w/7.8/feature/S3C-3484_extend_warp10_image 2020-10-28 19:10:21 +00:00
Taylor McKinnon 90ac30d288 ft(S3C-3484): Add Sensision and extend warp10 tunables 2020-10-28 12:09:49 -07:00
bert-e 1197733b17 Merge branch 'w/7.9/bugfix/S3C-3485_fix_listMetrics_handler' into tmp/octopus/w/8.1/bugfix/S3C-3485_fix_listMetrics_handler 2020-10-27 23:39:12 +00:00
bert-e 72383df256 Merge branch 'bugfix/S3C-3485_fix_listMetrics_handler' into tmp/octopus/w/7.8/bugfix/S3C-3485_fix_listMetrics_handler 2020-10-27 23:39:12 +00:00
Taylor McKinnon 7e8dedcc83 bf(S3C-3485): fix listMetrics Handler 2020-10-27 16:20:53 -07:00
bert-e 78bc6290b2 Merge branch 'w/7.9/bugfix/S3C-3483_adjust_default_schedules' into tmp/octopus/w/8.1/bugfix/S3C-3483_adjust_default_schedules 2020-10-27 20:21:53 +00:00
bert-e 2073211a60 Merge branch 'bugfix/S3C-3483_adjust_default_schedules' into tmp/octopus/w/7.8/bugfix/S3C-3483_adjust_default_schedules 2020-10-27 20:21:53 +00:00
Taylor McKinnon 1fd8dbbc6e bf(S3C-3483): Adjust default schedules to specify a specific second 2020-10-27 13:19:50 -07:00
bert-e 124744b562 Merge branches 'w/8.1/bugfix/S3C-3426_fix_user_creds_support' and 'q/785/7.9/bugfix/S3C-3426_fix_user_creds_support' into tmp/octopus/q/8.1 2020-10-23 05:47:28 +00:00
bert-e 478d5d69db Merge branches 'w/7.8/bugfix/S3C-3426_fix_user_creds_support' and 'q/785/7.8.0/bugfix/S3C-3426_fix_user_creds_support' into tmp/octopus/q/7.8 2020-10-23 05:47:27 +00:00
bert-e 206348c5eb Merge branch 'bugfix/S3C-3426_fix_user_creds_support' into q/7.8.0 2020-10-23 05:47:27 +00:00
bert-e 4a806da678 Merge branches 'w/8.1/bugfix/S3C-3446_convert_account_to_canonical_id' and 'q/794/7.9/bugfix/S3C-3446_convert_account_to_canonical_id' into tmp/octopus/q/8.1 2020-10-23 05:33:40 +00:00
bert-e b198553bf1 Merge branches 'w/7.8/bugfix/S3C-3446_convert_account_to_canonical_id' and 'q/794/7.8.0/bugfix/S3C-3446_convert_account_to_canonical_id' into tmp/octopus/q/7.8 2020-10-23 05:33:39 +00:00
bert-e 3413fbf977 Merge branch 'bugfix/S3C-3446_convert_account_to_canonical_id' into q/7.8.0 2020-10-23 05:33:39 +00:00
bert-e 3f4f34976c Merge branch 'w/7.9/bugfix/S3C-3446_convert_account_to_canonical_id' into tmp/octopus/w/8.1/bugfix/S3C-3446_convert_account_to_canonical_id 2020-10-23 00:37:32 +00:00
bert-e 8a934e1b51 Merge branch 'bugfix/S3C-3446_convert_account_to_canonical_id' into tmp/octopus/w/7.8/bugfix/S3C-3446_convert_account_to_canonical_id 2020-10-23 00:37:31 +00:00
bert-e cf17b56976 Merge branch 'feature/S3C-3010_add_migrate_task' into q/7.8.0 2020-10-23 00:23:34 +00:00
bert-e 74b8c91244 Merge branches 'w/8.1/feature/S3C-3010_add_migrate_task' and 'q/769/7.9/feature/S3C-3010_add_migrate_task' into tmp/octopus/q/8.1 2020-10-23 00:23:34 +00:00
bert-e 97b65aaa5a Merge branches 'w/7.8/feature/S3C-3010_add_migrate_task' and 'q/769/7.8.0/feature/S3C-3010_add_migrate_task' into tmp/octopus/q/7.8 2020-10-23 00:23:34 +00:00
Taylor McKinnon 047a82f17c bf(S3C-3446): Convert accountId to canonicalId 2020-10-21 16:47:01 -07:00
bert-e df4e96132c Merge branch 'w/7.9/feature/S3C-3010_add_migrate_task' into tmp/octopus/w/8.1/feature/S3C-3010_add_migrate_task 2020-10-21 23:45:37 +00:00
bert-e a76cc118e9 Merge branch 'feature/S3C-3010_add_migrate_task' into tmp/octopus/w/7.8/feature/S3C-3010_add_migrate_task 2020-10-21 23:45:36 +00:00
Taylor McKinnon 79e0dbbc0d ft(S3C-3010): Add Migrate task 2020-10-21 16:44:49 -07:00
bert-e ffe3ece284 Merge branch 'w/7.9/bugfix/S3C-3426_fix_user_creds_support' into tmp/octopus/w/8.1/bugfix/S3C-3426_fix_user_creds_support 2020-10-21 23:42:51 +00:00
bert-e 979a8cc804 Merge branch 'bugfix/S3C-3426_fix_user_creds_support' into tmp/octopus/w/7.8/bugfix/S3C-3426_fix_user_creds_support 2020-10-21 23:42:50 +00:00
Taylor McKinnon d060e82755 bf(S3C-3426): Fix user credential support 2020-10-21 16:40:51 -07:00
bert-e d6d53eed8a Merge branch 'w/7.9/bugfix/S3C-3447_switch_to_node_schedule' into tmp/octopus/w/8.1/bugfix/S3C-3447_switch_to_node_schedule 2020-10-21 21:15:29 +00:00
bert-e bb3017e3b5 Merge branch 'bugfix/S3C-3447_switch_to_node_schedule' into tmp/octopus/w/7.8/bugfix/S3C-3447_switch_to_node_schedule 2020-10-21 21:15:28 +00:00
Taylor McKinnon c8076fa810 bf(S3C-3447): Switch to node-schedule 2020-10-21 14:04:58 -07:00
bert-e e3a6844fc5 Merge branch 'w/7.9/bugfix/S3C-3438_add_missing_reindex_schema' into tmp/octopus/w/8.1/bugfix/S3C-3438_add_missing_reindex_schema 2020-10-19 22:30:06 +00:00
bert-e 1f010bebde Merge branch 'bugfix/S3C-3438_add_missing_reindex_schema' into tmp/octopus/w/7.8/bugfix/S3C-3438_add_missing_reindex_schema 2020-10-19 22:30:06 +00:00
Taylor McKinnon d9737e74ed bf(S3C-3438): Add missing reindexSchedule schema entry 2020-10-19 15:27:51 -07:00
bert-e 58c73db7c3 Merge branch 'w/7.9/bugfix/S3C-2576-update-vaultclient' into tmp/octopus/w/8.1/bugfix/S3C-2576-update-vaultclient 2020-10-13 14:48:06 +00:00
Anurag Mittal d98bac3a75 bugfix: S3C-2576-updated-vaultclient 2020-10-13 16:46:48 +02:00
bert-e 9a7ea1e564 Merge branch 'w/7.9/bugfix/S3C-3322_bump_vaultclient' into tmp/octopus/w/8.1/bugfix/S3C-3322_bump_vaultclient 2020-10-12 20:56:10 +00:00
bert-e 117f2a37cd Merge branch 'bugfix/S3C-3322_bump_vaultclient' into tmp/octopus/w/7.8/bugfix/S3C-3322_bump_vaultclient 2020-10-12 20:56:09 +00:00
Taylor McKinnon 44f5279796 bf(S3C-3322): Bump vaultclient 2020-10-12 13:55:05 -07:00
bert-e 3a6e0a4c40 Merge branch 'w/7.9/bugfix/S3C-3424_remove_creds_from_client' into tmp/octopus/w/8.1/bugfix/S3C-3424_remove_creds_from_client 2020-10-08 23:17:10 +00:00
bert-e 641dbdc9f8 Merge branch 'bugfix/S3C-3424_remove_creds_from_client' into tmp/octopus/w/7.8/bugfix/S3C-3424_remove_creds_from_client 2020-10-08 23:17:09 +00:00
Taylor McKinnon f80b4e230c bf(S3C-3424): Remove creds from UtapiClient 2020-10-08 14:34:01 -07:00
bert-e 62498fa330 Merge branches 'w/8.1/feature/S3C-3423_add_client_ip_limiting_middleware' and 'q/741/7.9/feature/S3C-3423_add_client_ip_limiting_middleware' into tmp/octopus/q/8.1 2020-10-08 20:54:35 +00:00
bert-e ad31123c0f Merge branches 'w/7.8/feature/S3C-3423_add_client_ip_limiting_middleware' and 'q/741/7.8.0/feature/S3C-3423_add_client_ip_limiting_middleware' into tmp/octopus/q/7.8 2020-10-08 20:54:35 +00:00
bert-e 3401f9f89c Merge branch 'feature/S3C-3423_add_client_ip_limiting_middleware' into q/7.8.0 2020-10-08 20:54:35 +00:00
bert-e e9a252f3c4 Merge branch 'w/7.9/feature/S3C-3423_add_client_ip_limiting_middleware' into tmp/octopus/w/8.1/feature/S3C-3423_add_client_ip_limiting_middleware 2020-10-08 20:50:22 +00:00
bert-e c382a6647c Merge branch 'feature/S3C-3423_add_client_ip_limiting_middleware' into tmp/octopus/w/7.8/feature/S3C-3423_add_client_ip_limiting_middleware 2020-10-08 20:50:22 +00:00
Taylor McKinnon 3f0f7360d7 ft(S3C-3423): Add client ip limiting middleware and openapi spec support 2020-10-08 13:49:00 -07:00
bert-e 1b1d1ce35c Merge branch 'w/7.9/feature/S3C-3418_change_on_wire_ingestion_format_json' into tmp/octopus/w/8.1/feature/S3C-3418_change_on_wire_ingestion_format_json 2020-10-07 22:32:37 +00:00
bert-e dfbe4d61ab Merge branch 'feature/S3C-3418_change_on_wire_ingestion_format_json' into tmp/octopus/w/7.8/feature/S3C-3418_change_on_wire_ingestion_format_json 2020-10-07 22:32:37 +00:00
Taylor McKinnon 6e26153dfa ft(S3C-3418): Change wire format to json for warp10 ingestion 2020-10-07 15:32:04 -07:00
bert-e 0b308adf07 Merge branches 'w/8.1/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' and 'q/726/7.9/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' into tmp/octopus/q/8.1 2020-10-06 22:55:41 +00:00
bert-e f24da024c9 Merge branch 'bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' into q/7.8.0 2020-10-06 22:55:40 +00:00
bert-e c718de352a Merge branches 'w/7.8/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' and 'q/726/7.8.0/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' into tmp/octopus/q/7.8 2020-10-06 22:55:40 +00:00
bert-e 73735f0f27 Merge branch 'w/7.9/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' into tmp/octopus/w/8.1/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port 2020-10-06 21:19:03 +00:00
bert-e af4937019c Merge branch 'bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port' into tmp/octopus/w/7.8/bugfix/S3C-3307_handle_deleted_buckets_in_reindex_port 2020-10-06 21:19:03 +00:00
Taylor McKinnon 739e61ff7a bf(S3C-3307): Handle deleted buckets in reindex 2020-10-06 14:18:41 -07:00
bert-e 7b1ed984e8 Merge branches 'w/8.1/bugfix/S3C-3307_handle_deleted_buckets_in_reindex' and 'q/695/7.9/bugfix/S3C-3307_handle_deleted_buckets_in_reindex' into tmp/octopus/q/8.1 2020-10-06 19:08:10 +00:00
bert-e 700df82aa4 Merge branch 'w/7.8/bugfix/S3C-3307_handle_deleted_buckets_in_reindex' into tmp/octopus/q/7.8 2020-10-06 19:08:09 +00:00
bert-e 0adb775a1c Merge branch 'w/7.9/bugfix/S3C-3308_fix_mpuShadowBucket_counters_for_v2' into tmp/octopus/w/8.1/bugfix/S3C-3308_fix_mpuShadowBucket_counters_for_v2 2020-10-05 17:42:07 +00:00
Taylor McKinnon e7b8d0c7ce bf(S3C-3308): Fix mpuShadowBucket counter in reindex v2 2020-10-05 10:40:36 -07:00
bert-e a64b25c26e Merge branch 'w/7.9/bugfix/S3C-3307_handle_deleted_buckets_in_reindex' into tmp/octopus/w/8.1/bugfix/S3C-3307_handle_deleted_buckets_in_reindex 2020-10-05 17:36:57 +00:00
bert-e 8b827052dd Merge branch 'bugfix/S3C-3307_handle_deleted_buckets_in_reindex' into tmp/octopus/w/7.8/bugfix/S3C-3307_handle_deleted_buckets_in_reindex 2020-10-05 17:36:57 +00:00
bert-e 392636df76 Merge branch 'w/7.8/bugfix/S3C-3376_remove_requirement_for_access_key' into tmp/octopus/w/8.1/bugfix/S3C-3376_remove_requirement_for_access_key 2020-10-01 18:14:21 +00:00
Taylor McKinnon 2a10880f22 bf(S3C-3376): Remove requirement for keys from client 2020-10-01 11:13:54 -07:00
bert-e 32920a5a97 Merge branches 'w/8.1/feature/S3C-3382_add_configuration_for_ingestion_speed' and 'q/698/7.8/feature/S3C-3382_add_configuration_for_ingestion_speed' into tmp/octopus/q/8.1 2020-10-01 17:25:48 +00:00
bert-e ac40a004cd Merge branch 'feature/S3C-3382_add_configuration_for_ingestion_speed' into q/7.8 2020-10-01 17:25:47 +00:00
bert-e f95244cf29 Merge branch 'bugfix/S3C-3361_bump_v2_toggle_timeout' into q/7.8 2020-09-30 00:11:03 +00:00
bert-e b875c72451 Merge branches 'w/8.1/bugfix/S3C-3361_bump_v2_toggle_timeout' and 'q/702/7.8/bugfix/S3C-3361_bump_v2_toggle_timeout' into tmp/octopus/q/8.1 2020-09-30 00:11:03 +00:00
bert-e abbbad9f5f Merge branch 'bugfix/S3C-3361_bump_v2_toggle_timeout' into tmp/octopus/w/8.1/bugfix/S3C-3361_bump_v2_toggle_timeout 2020-09-29 21:43:20 +00:00
Taylor McKinnon c02f52bcbc bf(S3C-3361): Bump timeout for shim test 2020-09-29 14:37:53 -07:00
bert-e 4028b265f3 Merge branch 'feature/S3C-3382_add_configuration_for_ingestion_speed' into tmp/octopus/w/8.1/feature/S3C-3382_add_configuration_for_ingestion_speed 2020-09-29 21:33:15 +00:00
Taylor McKinnon 7dd8a1a585 ft(S3C-3382): Make ingestion lag and shard size configurable 2020-09-29 14:19:28 -07:00
Taylor McKinnon c2254443dc print -> _log.info 2020-09-29 12:28:52 -07:00
Taylor McKinnon dd48282e9c bf(S3C-3307): Handle deleted buckets in reindex 2020-09-29 12:08:02 -07:00
bert-e 9724619746 Merge branch 'bugfix/S3C-3308_fix_mpuShadowBucket_counters' into tmp/octopus/w/7.8/bugfix/S3C-3308_fix_mpuShadowBucket_counters 2020-09-28 20:05:41 +00:00
bert-e 6051cada33 Merge branch 'w/7.8/bugfix/S3C-3308_fix_mpuShadowBucket_counters' into tmp/octopus/w/8.1/bugfix/S3C-3308_fix_mpuShadowBucket_counters 2020-09-28 20:05:41 +00:00
Taylor McKinnon 10896e10b2 bf(S3C-3308): Fix mpuShadowBucket counter in reindex 2020-09-28 13:05:06 -07:00
bert-e dd1ef6860e Merge branch 'bugfix/S3C-3363_add_missing_logger_for_replay' into tmp/octopus/w/8.1/bugfix/S3C-3363_add_missing_logger_for_replay 2020-09-24 20:54:47 +00:00
Taylor McKinnon c437ece6ab bf(S3C-3363): Add missing logger for replay 2020-09-24 13:54:04 -07:00
bert-e 33c1af303d Merge branches 'w/8.1/bugfix/S3C-3362_fix_reindex_default_schedule' and 'q/683/7.8/bugfix/S3C-3362_fix_reindex_default_schedule' into tmp/octopus/q/8.1 2020-09-24 02:53:34 +00:00
bert-e fd25a14689 Merge branch 'bugfix/S3C-3362_fix_reindex_default_schedule' into q/7.8 2020-09-24 02:53:34 +00:00
bert-e 3d79444672 Merge branches 'w/8.1/feature/S3C-3324_redis_backed_routes' and 'q/668/7.8/feature/S3C-3324_redis_backed_routes' into tmp/octopus/q/8.1 2020-09-24 02:53:08 +00:00
bert-e d8a75fbca0 Merge branch 'feature/S3C-3324_redis_backed_routes' into q/7.8 2020-09-24 02:53:07 +00:00
bert-e 1bd45ffd68 Merge branch 'bugfix/S3C-3362_fix_reindex_default_schedule' into tmp/octopus/w/8.1/bugfix/S3C-3362_fix_reindex_default_schedule 2020-09-24 01:10:25 +00:00
Taylor McKinnon a6d24952e7 bf(S3C-3362): fix default reindex schedule 2020-09-23 18:09:33 -07:00
bert-e f19018f9e7 Merge branch 'feature/S3C-3324_redis_backed_routes' into tmp/octopus/w/8.1/feature/S3C-3324_redis_backed_routes 2020-09-24 00:48:00 +00:00
Taylor McKinnon 949da08ccd Fix missing auth header log 2020-09-23 17:47:16 -07:00
Taylor McKinnon 495c72b364 ft(S3C-3324): Add getStorage route 2020-09-23 17:47:10 -07:00
bert-e 9a5e0f4987 Merge branch 'bugfix/S3C-1997_fix_ioredis_failover' into q/7.8 2020-09-24 00:07:29 +00:00
bert-e 87658b4351 Merge branches 'w/8.1/bugfix/S3C-1997_fix_ioredis_failover' and 'q/671/7.8/bugfix/S3C-1997_fix_ioredis_failover' into tmp/octopus/q/8.1 2020-09-24 00:07:29 +00:00
bert-e 91eed09651 Merge branch 'bugfix/S3C-1997_fix_ioredis_failover' into tmp/octopus/w/8.1/bugfix/S3C-1997_fix_ioredis_failover 2020-09-24 00:03:21 +00:00
Taylor McKinnon 4af996c637 bf(S3C-1997): Fix ioredis failover for sentinels 2020-09-23 17:02:30 -07:00
bert-e fef6abfe76 Merge branch 'bugfix/S3C-3358_fix_metrics_resp_for_single_resource' into q/7.8 2020-09-23 18:20:27 +00:00
bert-e 728d54501b Merge branches 'w/8.1/bugfix/S3C-3358_fix_metrics_resp_for_single_resource' and 'q/674/7.8/bugfix/S3C-3358_fix_metrics_resp_for_single_resource' into tmp/octopus/q/8.1 2020-09-23 18:20:27 +00:00
bert-e 911cfb3c36 Merge branch 'bugfix/S3C-3358_fix_metrics_resp_for_single_resource' into tmp/octopus/w/8.1/bugfix/S3C-3358_fix_metrics_resp_for_single_resource 2020-09-23 18:06:30 +00:00
Taylor McKinnon 5b056e9690 bf(S3C-3358): Fix metrics response for single resource 2020-09-23 11:05:58 -07:00
bert-e ba047b40f8 Merge branch 'w/7.8/feature/S3C-3351_cache_node_deps_backport' into tmp/octopus/q/7.8 2020-09-23 17:23:14 +00:00
bert-e 5d7ed8520f Merge branches 'w/8.1/feature/S3C-3351_cache_node_deps_backport' and 'q/659/7.8/feature/S3C-3351_cache_node_deps_backport' into tmp/octopus/q/8.1 2020-09-23 17:23:14 +00:00
Taylor McKinnon dd118e4a44 Merge remote-tracking branch 'origin/feature/S3C-3286_add_reindex_task' into w/8.1/feature/S3C-3286_add_reindex_task 2020-09-17 11:57:37 -07:00
bert-e 3076eaf115 Merge branch 'w/7.8/feature/S3C-3351_cache_node_deps_backport' into tmp/octopus/w/8.1/feature/S3C-3351_cache_node_deps_backport 2020-09-17 18:47:55 +00:00
Taylor McKinnon c8c50131b6 Merge remote-tracking branch 'origin/feature/S3C-3351_cache_node_deps_backport' into w/7.8/feature/S3C-3351_cache_node_deps_backport 2020-09-17 11:47:31 -07:00
Taylor McKinnon c17dcc2535 ft(S3C-3351): Cache js dependencies in buildbot worker 2020-09-17 10:45:13 -07:00
Taylor McKinnon 6be66f59e5 ft(S3C-3286): Add reindex task 2020-09-17 10:37:04 -07:00
bert-e fcd74b5707 Merge branches 'w/8.1/feature/S3C-3351_cache_node_deps' and 'q/656/7.8/feature/S3C-3351_cache_node_deps' into tmp/octopus/q/8.1 2020-09-17 17:20:16 +00:00
bert-e 44b62c92fc Merge branch 'feature/S3C-3351_cache_node_deps' into q/7.8 2020-09-17 17:20:16 +00:00
bert-e ad52015f73 Merge branch 'feature/S3C-3351_cache_node_deps' into tmp/octopus/w/8.1/feature/S3C-3351_cache_node_deps 2020-09-17 17:11:39 +00:00
Taylor McKinnon 9664ec2e47 ft(S3C-3351): Cache js dependencies in buildbot worker 2020-09-17 10:10:58 -07:00
bert-e 60d0dc794d Merge branches 'w/8.1/feature/S3C-3324_add_counter_backend_to_cache' and 'q/651/7.8/feature/S3C-3324_add_counter_backend_to_cache' into tmp/octopus/q/8.1 2020-09-16 17:21:13 +00:00
bert-e 00035d09b0 Merge branch 'feature/S3C-3324_add_counter_backend_to_cache' into q/7.8 2020-09-16 17:21:13 +00:00
bert-e 14db2c93ce Merge branch 'feature/S3C-3324_add_counter_backend_to_cache' into tmp/octopus/w/8.1/feature/S3C-3324_add_counter_backend_to_cache 2020-09-11 22:28:44 +00:00
Taylor McKinnon 17225ad8c7 ft(S3C-3324): Add counter backend to cache 2020-09-11 13:48:55 -07:00
bert-e 4d4906dc94 Merge branch 'improvement/S3C-3325-support-bucket-notification-apis-utapi' into tmp/octopus/w/8.1/improvement/S3C-3325-support-bucket-notification-apis-utapi 2020-09-09 18:59:27 +00:00
naren-scality 4b69d78eae improvement: S3C-3325 support bucket notification
Adds support to bucket notification operations `getBucketNotification`
and `putBucketNotification`.
2020-09-09 11:57:19 -07:00
bert-e 824c584556 Merge branch 'feature/S3C-3269_add_tls_support' into q/7.8 2020-09-04 22:24:54 +00:00
bert-e 7a9ce9bf2c Merge branches 'w/8.1/feature/S3C-3269_add_tls_support' and 'q/642/7.8/feature/S3C-3269_add_tls_support' into tmp/octopus/q/8.1 2020-09-04 22:24:54 +00:00
bert-e d203a94367 Merge branch 'feature/S3C-3269_add_tls_support' into tmp/octopus/w/8.1/feature/S3C-3269_add_tls_support 2020-09-04 21:52:16 +00:00
Taylor McKinnon 3a24815fc6 ft(S3C-3269): Add TLS support 2020-09-04 14:51:43 -07:00
Taylor McKinnon bea0a4607a Merge remote-tracking branch 'origin/bugfix/S3C-3087_bump_version' into w/8.1/bugfix/S3C-3087_bump_version 2020-08-24 12:05:39 -07:00
Taylor McKinnon 633d296c38 bf(S3C-3087): Bump version 2020-08-24 12:04:11 -07:00
bert-e 047aa4f8eb Merge branch 'feature/S3C-3265_warp10_failover' into tmp/octopus/w/8.1/feature/S3C-3265_warp10_failover 2020-08-15 02:37:51 +00:00
Taylor McKinnon 018a1fcdb5 ft(S3C-3265): Warp10 Failover 2020-08-14 19:37:21 -07:00
bert-e 4aae68208c Merge branch 'bugfix/S3C-3264_fix_sentinel_parsing' into tmp/octopus/w/8.1/bugfix/S3C-3264_fix_sentinel_parsing 2020-08-14 18:46:21 +00:00
Taylor McKinnon 7129a8906a bf(S3C-3264): Fix sentinel parsing 2020-08-14 11:43:43 -07:00
bert-e 0b7781d3b8 Merge branches 'w/8.1/bugfix/S3C-3255_ensure_ingest_timestamp_precision' and 'q/624/7.8/bugfix/S3C-3255_ensure_ingest_timestamp_precision' into tmp/octopus/q/8.1 2020-08-13 21:46:34 +00:00
bert-e 6d59f01dec Merge branch 'bugfix/S3C-3255_ensure_ingest_timestamp_precision' into q/7.8 2020-08-13 21:46:33 +00:00
bert-e 2a2f818745 Merge branch 'bugfix/S3C-3255_ensure_ingest_timestamp_precision' into tmp/octopus/w/8.1/bugfix/S3C-3255_ensure_ingest_timestamp_precision 2020-08-13 20:57:20 +00:00
Taylor McKinnon c97f210b0e bf(S3C-3255): Ensure ingested events timestamp precision 2020-08-13 13:56:47 -07:00
bert-e 8064180984 Merge branch 'feature/S3C-3003_add_repair_process' into tmp/octopus/w/8.1/feature/S3C-3003_add_repair_process 2020-08-13 19:55:43 +00:00
Taylor McKinnon d4a2880a6d ft(S3C-3003): Add repair task and macros 2020-08-13 12:55:07 -07:00
bert-e 530e5ed056 Merge branch 'bugfix/S3C-3242_rework_redis_config' into q/7.8 2020-08-11 17:30:55 +00:00
bert-e 6e096c9d39 Merge branches 'w/8.1/bugfix/S3C-3242_rework_redis_config' and 'q/614/7.8/bugfix/S3C-3242_rework_redis_config' into tmp/octopus/q/8.1 2020-08-11 17:30:55 +00:00
bert-e 74a0256c10 Merge branch 'w/7.8/bugfix/S3C-3240_fix_tasks_exports' into tmp/octopus/q/7.8 2020-08-10 23:43:30 +00:00
bert-e 86285d1e45 Merge branches 'w/8.1/bugfix/S3C-3240_fix_tasks_exports' and 'q/611/7.8/bugfix/S3C-3240_fix_tasks_exports' into tmp/octopus/q/8.1 2020-08-10 23:43:30 +00:00
bert-e 92b9659510 Merge branch 'bugfix/S3C-3243_fix_warp10_token_config' into q/7.8 2020-08-10 23:29:27 +00:00
bert-e 8ea9a5dc0a Merge branches 'w/8.1/bugfix/S3C-3243_fix_warp10_token_config' and 'q/609/7.8/bugfix/S3C-3243_fix_warp10_token_config' into tmp/octopus/q/8.1 2020-08-10 23:29:27 +00:00
bert-e 390f1bb3c1 Merge branch 'w/7.8/bugfix/S3C-3240_fix_tasks_exports' into tmp/octopus/w/8.1/bugfix/S3C-3240_fix_tasks_exports 2020-08-10 23:23:15 +00:00
Taylor McKinnon 8b85fb555c bf(S3C-3240): Fix tasks export 2020-08-10 16:22:21 -07:00
bert-e 3b2fc1b045 Merge branch 'w/7.8/bugfix/S3C-3242_rework_redis_config' into tmp/octopus/w/8.1/bugfix/S3C-3242_rework_redis_config 2020-08-10 23:18:17 +00:00
Taylor McKinnon 8873a82d72 bf(S3C-3242): Rework redis config loading 2020-08-10 16:14:53 -07:00
bert-e 58f20049f3 Merge branch 'bugfix/S3C-3243_fix_warp10_token_config' into tmp/octopus/w/8.1/bugfix/S3C-3243_fix_warp10_token_config 2020-08-10 23:13:09 +00:00
bert-e db05aaf2a3 Merge branch 'bugfix/S3C-3241_remove_warp10_workaround' into tmp/octopus/w/8.1/bugfix/S3C-3241_remove_warp10_workaround 2020-08-10 23:11:48 +00:00
Taylor McKinnon 64c88ef62d bf(S3C-3243): Load warp 10 tokens from config 2020-08-10 15:19:44 -07:00
Taylor McKinnon 47de463afe bf(S3C-3241): Remove warp 10 packaging work around 2020-08-10 15:08:01 -07:00
bert-e 16c43c202b Merge branch 'feature/S3C-3235_fix_getMetricsAt_with_no_data_81' into q/8.1 2020-08-07 19:08:16 +00:00
bert-e 90beac2fa7 Merge branch 'w/7.8/feature/S3C-3230_add_authv4_support' into tmp/octopus/w/8.1/feature/S3C-3230_add_authv4_support 2020-08-05 23:03:09 +00:00
Taylor McKinnon 76a2091106 ft(S3C-3230): Add authentication support 2020-08-05 16:02:32 -07:00
bert-e 14446a10c2 Merge branch 'w/7.8/bugfix/S3C-3235_fix_getMetricsAt_with_no_data' into tmp/octopus/w/8.1/bugfix/S3C-3235_fix_getMetricsAt_with_no_data 2020-08-04 20:04:49 +00:00
Taylor McKinnon 9b1fefeef8 bf(S3C-3235): Fix Exception when calling getMetricsAt with an empty DB 2020-08-04 12:58:59 -07:00
bert-e 8e1417ad6b Merge branch 'w/7.8/feature/S3C-3020_add_functional_test_for_snapshot_task' into tmp/octopus/w/8.1/feature/S3C-3020_add_functional_test_for_snapshot_task 2020-08-03 21:49:18 +00:00
Taylor McKinnon 9c3179dd1c ft(S3C-3020): Add tests for CreateSnapshot 2020-08-03 14:47:47 -07:00
bert-e ae29a7d346 Merge branch 'w/7.8/feature/S3C-3020_add_functional_test_for_checkpoint_task' into tmp/octopus/w/8.1/feature/S3C-3020_add_functional_test_for_checkpoint_task 2020-08-03 21:42:48 +00:00
Taylor McKinnon 604598b95a ft(S3C-3020): Add functional tests for CreateCheckpoint 2020-08-03 14:42:13 -07:00
bert-e 646f921ded Merge branch 'w/7.8/feature/S3C-3020_add_functional_test_ingest_task' into tmp/octopus/w/8.1/feature/S3C-3020_add_functional_test_ingest_task 2020-08-03 19:39:54 +00:00
Taylor McKinnon cd46d79acb remove comment, fix assert 2020-08-03 12:39:40 -07:00
Taylor McKinnon 36a01a54c9 ft(S3C-3020): Add functional tests for IngestShards 2020-08-03 12:39:40 -07:00
bert-e 31ff2aa63b Merge branch 'w/7.8/feature/S3C-3007_Add_listMetrics_handler' into tmp/octopus/w/8.1/feature/S3C-3007_Add_listMetrics_handler 2020-08-03 19:30:40 +00:00
Taylor McKinnon c468ab2b8c ft(S3C-3007): Add listMetrics handler 2020-08-03 12:30:11 -07:00
bert-e 37f6b4ddc5 Merge branch 'w/7.8/feature/S3C-3132-utapi-v2-push-metric' into tmp/octopus/w/8.1/feature/S3C-3132-utapi-v2-push-metric 2020-07-31 20:37:59 +00:00
Dora Korpar eef4623a1e ft: S3C-3132 export utapi v2 client 2020-07-30 20:55:34 -07:00
bert-e d043d8dcae Merge branch 'w/7.8/feature/S3C-3006_add_metric_calculation_macro' into tmp/octopus/w/8.1/feature/S3C-3006_add_metric_calculation_macro 2020-07-29 21:52:13 +00:00
Taylor McKinnon 2f056004dc ft(S3C-3006): Add metric calculation macro 2020-07-29 14:51:46 -07:00
bert-e 6dbc500fa9 Merge branch 'w/7.8/feature/S3C-3196-update-node' into tmp/octopus/w/8.1/feature/S3C-3196-update-node 2020-07-27 21:55:34 +00:00
Dora Korpar 34fbdd5ac6 fix linter 2020-07-27 14:55:13 -07:00
bert-e 8e1550d61a Merge branch 'w/7.8/feature/S3C-3196-update-node' into tmp/octopus/w/8.1/feature/S3C-3196-update-node 2020-07-27 07:40:11 +00:00
bert-e 088be116a7 Merge branch 'w/7.6/feature/S3C-3196-update-node' into tmp/octopus/w/7.7/feature/S3C-3196-update-node 2020-07-27 07:40:10 +00:00
bert-e ef9b7e59dd Merge branch 'w/7.5/feature/S3C-3196-update-node' into tmp/octopus/w/7.6/feature/S3C-3196-update-node 2020-07-27 07:40:10 +00:00
Dora Korpar 07b869b51a fix linter 2020-07-27 00:39:51 -07:00
bert-e 4a811d7e86 Merge branch 'w/7.8/feature/S3C-3196-update-node' into tmp/octopus/w/8.1/feature/S3C-3196-update-node 2020-07-25 03:28:19 +00:00
bert-e 75aa3c85db Merge branch 'w/7.6/feature/S3C-3196-update-node' into tmp/octopus/w/7.7/feature/S3C-3196-update-node 2020-07-25 03:28:19 +00:00
Dora Korpar 11504d6240 Merge remote-tracking branch 'origin/w/7.5/feature/S3C-3196-update-node' into w/7.6/feature/S3C-3196-update-node 2020-07-24 20:25:48 -07:00
Dora Korpar 0022d3aab1 Merge remote-tracking branch 'origin/feature/S3C-3196-update-node' into w/7.5/feature/S3C-3196-update-node 2020-07-24 20:03:22 -07:00
Dora Korpar e12aaced08 ft: S3C-3196 update node 2020-07-24 16:11:29 -07:00
bert-e 1225f45805 Merge branch 'w/7.8/feature/S3C-3020_add_lag_flag_to_task' into tmp/octopus/w/8.1/feature/S3C-3020_add_lag_flag_to_task 2020-07-20 21:40:30 +00:00
Taylor McKinnon c5259b3e71 ft(S3C-3020): Fix warp10 config and move lag calc to BaseTask 2020-07-20 14:39:53 -07:00
bert-e 81e5c9e98c Merge branch 'w/7.8/feature/S3C-3020_add_snapshot_creation_task' into tmp/octopus/w/8.1/feature/S3C-3020_add_snapshot_creation_task 2020-07-20 20:21:06 +00:00
Taylor McKinnon 6a7de4d84d bump shim test timeout 2020-07-20 13:19:58 -07:00
Taylor McKinnon bd271349ab ft(ZENKO-3020): Add snapshot creation task 2020-07-20 12:56:41 -07:00
bert-e cdb9ef06d1 Merge branch 'w/7.8/feature/S3C-3002_add_checkpoint_creation_task' into tmp/octopus/w/8.1/feature/S3C-3002_add_checkpoint_creation_task 2020-07-20 19:21:38 +00:00
Taylor McKinnon 097cbe270a ft(S3C-3002): Add checkpoint creation 2020-07-20 12:15:32 -07:00
bert-e 186807e798 Merge branch 'w/7.8/feature/S3C-3001_add_redis_to_warp10_task' into tmp/octopus/w/8.1/feature/S3C-3001_add_redis_to_warp10_task 2020-07-16 17:21:16 +00:00
Taylor McKinnon 42bea22e3a address comments 2020-07-14 19:08:59 -07:00
Taylor McKinnon 1c52ca2c73 fix tests 2020-07-14 11:59:59 -07:00
Taylor McKinnon aa0d410760 revert Dockerfile changes for eve 2020-07-14 11:49:08 -07:00
bert-e 25ffbe3bbc Merge branch 'w/7.8/feature/S3C-3001_Add_shard_tracking_to_redis_backend' into tmp/octopus/w/8.1/feature/S3C-3001_Add_shard_tracking_to_redis_backend 2020-07-13 19:06:58 +00:00
Taylor McKinnon 94b6267673 Add IngestShard task 2020-07-13 10:30:57 -07:00
Taylor McKinnon cd9bc15a05 add entrypoint 2020-07-12 17:41:49 -07:00
Taylor McKinnon 995545a458 Add config options and constant 2020-07-12 17:41:49 -07:00
bert-e 77f8fc4b11 Merge branch 'w/7.8/feature/S3C-3001_usec_resolution_for_shards' into tmp/octopus/w/8.1/feature/S3C-3001_usec_resolution_for_shards 2020-07-13 00:25:33 +00:00
Taylor McKinnon dcf50d4875 Add shard tracking to redis backend and expose through cache client 2020-07-12 17:21:04 -07:00
Taylor McKinnon 30b3f771ab add dependencies 2020-07-12 17:21:04 -07:00
Taylor McKinnon 6971dd8462 Add Protobuf and MacroEncoder extensions 2020-07-12 17:21:04 -07:00
Taylor McKinnon 2a630aeaee add microresolution support for util 2020-07-12 17:19:31 -07:00
bert-e a02fd4830c Merge branch 'w/7.8/feature/S3C-3005_add_ingest_api' into tmp/octopus/w/8.1/feature/S3C-3005_add_ingest_api 2020-07-07 17:54:27 +00:00
Taylor McKinnon bea8a4dd82 ft(S3C-3005): Ingest API 2020-07-07 10:53:43 -07:00
bert-e aa314c5ed9 Merge branch 'w/7.8/feature/S3C-3008_Add_cloudserver_client' into tmp/octopus/w/8.1/feature/S3C-3008_Add_cloudserver_client 2020-07-01 18:58:57 +00:00
Taylor McKinnon 2cefa20860 ft(S3C-3008): Add Utapi client 2020-07-01 11:58:35 -07:00
bert-e 51858fe41a Merge branch 'w/7.8/feature/S3C-3004_Add_http_server' into tmp/octopus/w/8.1/feature/S3C-3004_Add_http_server 2020-06-29 19:45:46 +00:00
Taylor McKinnon 92b22e8de8 use assert.rejects validation function 2020-06-29 12:45:34 -07:00
Taylor McKinnon 70a79537fe Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3004_Add_http_server' into w/8.1/feature/S3C-3004_Add_http_server 2020-06-28 21:10:12 -07:00
Taylor McKinnon 9e56839700 Apply suggestions from code review
typos and spelling

Co-authored-by: miniscruff <halfpint1170@gmail.com>
2020-06-28 21:08:00 -07:00
Taylor McKinnon ad7ec79565 reviews 2020-06-26 17:59:58 -07:00
Taylor McKinnon 888c129be1 fix eve 2020-06-26 13:45:32 -07:00
Taylor McKinnon efe2551d85 add tests 2020-06-25 13:48:23 -07:00
Taylor McKinnon 951411bdb3 small fixes 2020-06-25 13:45:36 -07:00
Taylor McKinnon 6bb90551cf Add API controller and healthcheck handler 2020-06-21 21:48:21 -07:00
Taylor McKinnon 6b712829d9 Add server class and middleware 2020-06-21 21:48:21 -07:00
Taylor McKinnon e46b04697d Add request and response containers 2020-06-21 21:48:21 -07:00
Taylor McKinnon 2b9c88b9cc Add error definitions 2020-06-21 21:48:21 -07:00
Taylor McKinnon 97f63e2e6d Small changes to logger 2020-06-21 21:48:21 -07:00
Taylor McKinnon 9d8b5f181d Modify cache client and config to add cacheBackend setting 2020-06-21 21:48:21 -07:00
bert-e b335675a36 Merge branch 'w/7.8/feature/S3C-3004_Add_process_absraction' into tmp/octopus/w/8.1/feature/S3C-3004_Add_process_absraction 2020-06-19 19:36:52 +00:00
Taylor McKinnon 8e39560359 ft(S3C-3004): Add process abstraction 2020-06-18 16:49:12 -07:00
bert-e 707620acf7 Merge branch 'w/7.8/feature/S3C-3004_Add_config_loading' into tmp/octopus/w/8.1/feature/S3C-3004_Add_config_loading 2020-06-18 23:47:50 +00:00
Taylor McKinnon ea9e6285e2 ft(S3C-3004): Add config loading 2020-06-18 16:45:29 -07:00
bert-e 30306f3dce Merge branch 'w/7.8/feature/S3C-3004_Add_stub_openapi' into tmp/octopus/w/8.1/feature/S3C-3004_Add_stub_openapi 2020-06-18 22:26:29 +00:00
Taylor McKinnon 18520244b8 ft(S3C-3004): Add stub OpenAPI spec 2020-06-18 15:25:43 -07:00
bert-e d82623014d Merge branch 'w/7.8/feature/S3C-3000_Add_warp10_client' into tmp/octopus/w/8.1/feature/S3C-3000_Add_warp10_client 2020-06-18 20:31:03 +00:00
Taylor McKinnon 8614845b28 ft(S3C-3000): Add Warp 10 client 2020-06-18 13:30:26 -07:00
bert-e 1c5c011699 Merge branch 'w/7.8/feature/S3C-2999_add_redis_cache_client' into tmp/octopus/w/8.1/feature/S3C-2999_add_redis_cache_client 2020-06-18 20:26:15 +00:00
Taylor McKinnon 875bd09413 ft(S3C-2999): Add Redis and Memory cache client 2020-06-18 13:25:46 -07:00
bert-e 282a55c724 Merge branch 'w/7.8/feature/S3C-2960-object-lock-metrics' into tmp/octopus/w/8.1/feature/S3C-2960-object-lock-metrics 2020-06-16 17:33:22 +00:00
Dora Korpar 89ede12a60 ft: S3C-2960 object lock metrics 2020-06-16 10:21:45 -07:00
Taylor McKinnon 90c8f49222 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3041_Add_v2_toggle' into w/8.1/feature/S3C-3041_Add_v2_toggle 2020-06-10 17:40:37 -07:00
Taylor McKinnon c97ab239fc ft(S3C-3041): Add v2 Toggle based on environment variable 2020-06-10 17:38:28 -07:00
Taylor McKinnon 2d0c104cc2 Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-3043_bump_scality_guidelines' into w/8.1/bugfix/S3C-3043_bump_scality_guidelines 2020-06-10 15:10:04 -07:00
Taylor McKinnon 77f2a37488 Fix new linting errors 2020-06-10 14:48:53 -07:00
Taylor McKinnon e6142a0ad5 bf(S3C-3043): Bump scality/Guidelines to HEAD 2020-06-09 14:40:09 -07:00
bert-e 72bda734bf Merge branch 'w/7.8/bugfix/S3C-3023_bump_warp10_version' into tmp/octopus/w/8.1/bugfix/S3C-3023_bump_warp10_version 2020-06-02 23:09:36 +00:00
Taylor McKinnon eaf08595c7 bf(S3C-3023): Bump Warp 10 Verison 2020-06-02 16:08:36 -07:00
bert-e a2e8fe51b4 Merge branch 'feature/S3C-2878_Add_warp10_Dockerfile' into tmp/octopus/w/8.1/feature/S3C-2878_Add_warp10_Dockerfile 2020-05-26 19:49:42 +00:00
Taylor McKinnon d5182078cc ft(S3C-2878): Add Warp10 Dockerfile 2020-05-26 12:49:01 -07:00
bert-e b494f1e85c Merge branch 'w/7.7/bugfix/S3C-2408/mpu-overwrite' into tmp/octopus/w/8.1/bugfix/S3C-2408/mpu-overwrite 2020-04-22 02:20:17 +00:00
bert-e 2bc5d07f85 Merge branch 'w/7.5/bugfix/S3C-2408/mpu-overwrite' into tmp/octopus/w/7.6/bugfix/S3C-2408/mpu-overwrite 2020-04-22 02:07:32 +00:00
Rahul Padigela 2291dbb352 Merge remote-tracking branch 'origin/bugfix/S3C-2408/mpu-overwrite' into w/7.5/bugfix/S3C-2408/mpu-overwrite 2020-04-21 19:05:00 -07:00
bert-e af1a01b692 Merge branch 'w/8.0/improvement/S3C-2808-clean-upstream' into tmp/octopus/w/8.1/improvement/S3C-2808-clean-upstream 2020-04-21 23:59:47 +00:00
Rahul Padigela 1f0f7d91ff Merge remote-tracking branch 'origin/w/7.7/improvement/S3C-2808-clean-upstream' into w/8.0/improvement/S3C-2808-clean-upstream 2020-04-21 16:59:04 -07:00
bert-e 34b2a327fd Merge branch 'improvement/S3C-2808-clean-upstream' into tmp/octopus/w/7.6/improvement/S3C-2808-clean-upstream 2020-04-21 23:57:25 +00:00
Rahul Padigela c79091a4ed improvement: S3C-2808 bump version 2020-04-21 16:54:28 -07:00
bbuchanan9 e759f6c29e improvement S3C-234 Operation counters config
(cherry picked from commit 71a3bc9bbb)
2020-04-21 16:52:02 -07:00
bennettbuchanan 45d7e26afb bugfix: S3C-2408 MPU overwrite 2020-04-21 15:51:20 -07:00
Taylor McKinnon 5c6386e33d Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2603_Update_utapi_reindex' into w/8.1/bugfix/S3C-2603_Update_utapi_reindex 2020-04-06 12:34:17 -07:00
Taylor McKinnon 0bf2a533f5 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2603_Update_utapi_reindex' into w/8.0/bugfix/S3C-2603_Update_utapi_reindex 2020-04-06 12:33:13 -07:00
Taylor McKinnon 4eb69776b6 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2603_Update_utapi_reindex' into w/7.6/bugfix/S3C-2603_Update_utapi_reindex 2020-04-06 12:31:30 -07:00
Taylor McKinnon cb3a3163db Merge remote-tracking branch 'origin/bugfix/S3C-2603_Update_utapi_reindex' into w/7.5/bugfix/S3C-2603_Update_utapi_reindex 2020-04-06 12:30:35 -07:00
Taylor McKinnon a38035b2ad remove unused is_bucket_v7 2020-04-03 10:57:55 -07:00
Taylor McKinnon 13de16ebcf feat(S3C-2603): Update utapi 2020-04-02 14:44:45 -07:00
Taylor McKinnon 8a0d6496fd feat(S3C-2603): Add updated s3_bucketd.py script 2020-03-26 15:45:38 -07:00
bert-e c59323952d Merge branch 'w/8.0/bugfix/S3C-2604-listMultipleBucketMetrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-listMultipleBucketMetrics 2020-02-26 09:30:18 +00:00
bert-e b05d8c5528 Merge branch 'w/7.7/bugfix/S3C-2604-listMultipleBucketMetrics' into tmp/octopus/w/8.0/bugfix/S3C-2604-listMultipleBucketMetrics 2020-02-26 09:30:17 +00:00
bert-e 5b9f8e2688 Merge branch 'bugfix/S3C-2604-listMultipleBucketMetrics' into tmp/octopus/w/7.6/bugfix/S3C-2604-listMultipleBucketMetrics 2020-02-26 09:30:17 +00:00
Anurag Mittal 9be5c2c40b
bugfix: S3C-2604-list-multiple-bucket-metrics 2020-02-26 10:29:11 +01:00
bert-e a5430ba8a8 Merge branch 'w/8.0/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-25 19:24:23 +00:00
bert-e cc0087c3ba Merge branch 'w/7.7/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/8.0/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-25 19:24:23 +00:00
bert-e fb5341ed34 Merge branch 'bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/7.5/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-25 19:24:22 +00:00
bert-e d03e22f585 Merge branch 'w/7.5/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/7.6/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-25 19:24:22 +00:00
Anurag Mittal a33f1f6b86
bugfix: S3C-2604-list-multiple-bucket-metrics 2020-02-25 20:23:22 +01:00
bert-e 6a45a13ab4 Merge branch 'w/8.0/bugfix/S3C-2475/utapi_response_correction' into tmp/octopus/w/8.1/bugfix/S3C-2475/utapi_response_correction 2020-02-05 19:34:07 +00:00
bert-e 3dd760835f Merge branch 'w/7.7/bugfix/S3C-2475/utapi_response_correction' into tmp/octopus/w/8.0/bugfix/S3C-2475/utapi_response_correction 2020-02-05 19:34:07 +00:00
bert-e 09c770585d Merge branch 'w/7.5/bugfix/S3C-2475/utapi_response_correction' into tmp/octopus/w/7.6/bugfix/S3C-2475/utapi_response_correction 2020-02-05 19:34:07 +00:00
bert-e 8cd05d9659 Merge branch 'bugfix/S3C-2475/utapi_response_correction' into tmp/octopus/w/7.5/bugfix/S3C-2475/utapi_response_correction 2020-02-05 19:34:06 +00:00
naren-scality ef0cae81d3 bugfix: 2475 utapi response corrections 2020-02-06 00:57:45 +05:30
Flavien Lebarbé 2720bdb096 Merge branch 'development/8.0' into development/8.1 2019-11-11 19:15:36 +01:00
Flavien Lebarbé 54390f82ba Merge branch 'development/7.6' into development/8.0 2019-11-11 19:15:09 +01:00
Flavien Lebarbé 369bb97d77 Merge branch 'development/7.5' into development/7.6 2019-11-11 19:14:41 +01:00
Flavien Lebarbé 7fc1f383f1 Merge branch 'development/7.4' into development/7.5 2019-11-11 19:13:45 +01:00
Katherine Laue 5ccb8d03be Update yarn.lock 2019-09-11 15:41:50 -07:00
Katherine Laue 9fdad30ca0 improvement/S3C-2365 update vaultclient dep 2019-09-11 15:41:27 -07:00
Katherine Laue 3180aa2d02 update yarn.lock 2019-09-11 15:40:50 -07:00
Katherine Laue 60e4ed7880 remove yarn.lock 2019-09-11 15:40:50 -07:00
Katherine Laue d77d15c7dd update yarn.lock 2019-09-11 15:40:50 -07:00
Katherine Laue dc34912298 improvement/S3C-2364 install yarn frozen lockfile 2019-09-11 15:40:50 -07:00
Katherine Laue 4a845e80cd improvement/S3C-2364 migrate package manager to yarn 2019-09-11 15:40:50 -07:00
bbuchanan9 b56405f031 improvement S3C-234 Operation counters config 2019-09-11 15:40:50 -07:00
bbuchanan9 4d6fd39693 bugfix: S3C-2342 BucketD listing functional tests 2019-09-11 15:38:11 -07:00
bbuchanan9 b3a3383289 bugfix: S3C-2317 Add uuid module as a dependency 2019-09-11 15:38:11 -07:00
bbuchanan9 196acf9fc8 bugfix: S3C-2342 Add bucket listing pagination 2019-09-11 15:38:11 -07:00
bbuchanan9 347ac8faf1 bugfix: S3C-2315 Support versioning with reindex 2019-09-11 15:38:11 -07:00
bbuchanan9 a62c22f06d improvement: S3C-2337 Parallelize tests 2019-09-11 15:38:11 -07:00
bbuchanan9 d65b9a65ee bugfix: S3C-2317 Append UUID to sorted set members 2019-09-11 15:38:11 -07:00
Katherine Laue 72aedba8ba Update yarn.lock 2019-08-13 15:06:30 -07:00
Katherine Laue 072cb14338 Merge remote-tracking branch 'origin/improvement/S3C-2365-update-deps' into w/7.5/improvement/S3C-2365-update-deps 2019-08-13 14:59:39 -07:00
Katherine Laue b522b3d782 improvement/S3C-2365 update vaultclient dep 2019-08-13 13:59:49 -07:00
Katherine Laue e52aa89c6a update yarn.lock 2019-08-08 12:59:42 -07:00
Katherine Laue 3ad505dcd0 remove yarn.lock 2019-08-08 12:58:47 -07:00
Katherine Laue 98cb31ecdc update yarn.lock 2019-08-08 12:43:59 -07:00
bert-e 761df6dccf Merge branch 'improvement/S3C-2364-install-yarn-frozen-lockfile' into tmp/octopus/w/7.5/improvement/S3C-2364-install-yarn-frozen-lockfile 2019-08-08 18:29:21 +00:00
Katherine Laue 233dbfd34e improvement/S3C-2364 install yarn frozen lockfile 2019-08-08 10:40:32 -07:00
bert-e 836092ffb2 Merge branches 'w/7.5/improvement/S3C-2345/allow-config-to-disable-counters' and 'q/357/7.4/improvement/S3C-2345/allow-config-to-disable-counters' into tmp/octopus/q/7.5 2019-08-06 22:12:56 +00:00
bert-e 7033fe7cd6 Merge branch 'improvement/S3C-2345/allow-config-to-disable-counters' into q/7.4 2019-08-06 22:12:56 +00:00
bert-e 49767cb9d0 Merge branch 'improvement/S3C-2345/allow-config-to-disable-counters' into tmp/octopus/w/7.5/improvement/S3C-2345/allow-config-to-disable-counters 2019-08-06 22:01:14 +00:00
bert-e 8f8ab887e0 Merge branch 'improvement/S3C-2364-install-yarn' into tmp/octopus/w/7.5/improvement/S3C-2364-install-yarn 2019-08-01 20:16:05 +00:00
Katherine Laue b7eb0b5c15 improvement/S3C-2364 migrate package manager to yarn 2019-08-01 13:08:54 -07:00
bbuchanan9 71a3bc9bbb improvement S3C-234 Operation counters config 2019-07-31 13:38:06 -07:00
bert-e fe5cee7b7e Merge branch 'bugfix/S3C-2342/bucket-listing-pagination-functional-tests' into tmp/octopus/w/7.5/bugfix/S3C-2342/bucket-listing-pagination-functional-tests 2019-07-26 23:46:32 +00:00
bbuchanan9 f514acc87c bugfix: S3C-2342 BucketD listing functional tests 2019-07-26 16:43:06 -07:00
bert-e 34c0238236 Merge branches 'w/7.5/bugfix/S3C-2317/install-uuid-as-dep' and 'q/349/7.4/bugfix/S3C-2317/install-uuid-as-dep' into tmp/octopus/q/7.5 2019-07-25 17:42:31 +00:00
bert-e 5593d68b2a Merge branch 'bugfix/S3C-2317/install-uuid-as-dep' into q/7.4 2019-07-25 17:42:31 +00:00
bert-e 1e9bb9404f Merge branch 'bugfix/S3C-2317/install-uuid-as-dep' into tmp/octopus/w/7.5/bugfix/S3C-2317/install-uuid-as-dep 2019-07-25 17:20:22 +00:00
bbuchanan9 cd716bb4c3 bugfix: S3C-2317 Add uuid module as a dependency 2019-07-25 10:17:55 -07:00
bert-e 151cbd095f Merge branch 'bugfix/S3C-2342/bucket-listing-pagination' into tmp/octopus/w/7.5/bugfix/S3C-2342/bucket-listing-pagination 2019-07-25 15:56:23 +00:00
bbuchanan9 abcadb4b38 bugfix: S3C-2342 Add bucket listing pagination 2019-07-24 17:44:37 -07:00
bert-e e1a9a9e927 Merge branch 'bugfix/S3C-2315/support-versioning-with-reindex' into q/7.4 2019-07-24 00:47:11 +00:00
bert-e 88974b687d Merge branches 'w/7.5/bugfix/S3C-2315/support-versioning-with-reindex' and 'q/341/7.4/bugfix/S3C-2315/support-versioning-with-reindex' into tmp/octopus/q/7.5 2019-07-24 00:47:11 +00:00
bert-e 548e71be7a Merge branch 'bugfix/S3C-2315/support-versioning-with-reindex' into tmp/octopus/w/7.5/bugfix/S3C-2315/support-versioning-with-reindex 2019-07-24 00:41:28 +00:00
bbuchanan9 9aa50adbeb bugfix: S3C-2315 Support versioning with reindex 2019-07-23 17:38:55 -07:00
bert-e 9533009100 Merge branch 'w/7.5/improvement/S3C-2337-parallelize-tests' into tmp/octopus/w/8.0/improvement/S3C-2337-parallelize-tests 2019-07-19 17:12:55 +00:00
bert-e f819a51db3 Merge branch 'improvement/S3C-2337-parallelize-tests' into tmp/octopus/w/7.5/improvement/S3C-2337-parallelize-tests 2019-07-19 17:12:54 +00:00
bbuchanan9 92f2dda73c improvement: S3C-2337 Parallelize tests 2019-07-19 10:11:16 -07:00
bert-e d336997813 Merge branch 'w/7.5/bugfix/S3C-2317/use-uuid' into tmp/octopus/w/8.0/bugfix/S3C-2317/use-uuid 2019-07-19 01:22:33 +00:00
bert-e bee7c1dcfa Merge branch 'bugfix/S3C-2317/use-uuid' into tmp/octopus/w/7.5/bugfix/S3C-2317/use-uuid 2019-07-19 01:22:32 +00:00
bbuchanan9 25dfba72e9 bugfix: S3C-2317 Append UUID to sorted set members 2019-07-18 17:36:42 -07:00
Katherine Laue 166d2c06cf Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2332-update-vaultclient' into w/8.0/improvement/S3C-2332-update-vaultclient 2019-07-16 13:52:39 -07:00
Katherine Laue 8cbda6d4e5 Merge remote-tracking branch 'origin/improvement/S3C-2332-update-vaultclient' into w/7.5/improvement/S3C-2332-update-vaultclient 2019-07-16 13:49:32 -07:00
Katherine Laue e95281d37f Merge remote-tracking branch 'origin/development/7.4' into HEAD 2019-07-16 13:44:28 -07:00
Katherine Laue 604335f51d update werelogs in package.json 2019-07-16 13:39:15 -07:00
Katherine Laue 885f075cb9 update vaultclient in package.json 2019-07-16 13:16:42 -07:00
bbuchanan9 9042956610 improvement: S3C-2314 Update Scality dependencies 2019-07-15 13:54:34 -07:00
bert-e 4f754e26f9 Merge branch 'improvement/S3C-2314/update-scality-dependencies' into tmp/octopus/w/8.0/improvement/S3C-2314/update-scality-dependencies 2019-07-15 20:29:07 +00:00
bbuchanan9 8d27348561 improvement: S3C-2314 Update Scality dependencies 2019-07-15 13:27:36 -07:00
bbuchanan9 dfb7a83b2a Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2322/incorrect-expire-TTL-config-field' into w/8.0/bugfix/S3C-2322/incorrect-expire-TTL-config-field 2019-07-12 17:22:14 -07:00
bbuchanan9 e6c3ca61b0 bugfix: S3C-2322 UtapiClient configuration update 2019-07-11 17:01:12 -07:00
Katherine Laue e8ac66ff09 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2290-upgrade-nodejs' into w/8.0/improvement/S3C-2290-upgrade-nodejs 2019-07-08 14:39:56 -07:00
Scality 3c8ef4a9a7 improvement: S3C-2290 upgrade node.js to 10.x 2019-07-08 14:33:28 -07:00
bert-e 1919808c09 Merge branch 'w/7.5/feature/S3C-2273/maintenance-testing-for-utapi-reindexer' into tmp/octopus/w/8.0/feature/S3C-2273/maintenance-testing-for-utapi-reindexer 2019-06-25 20:24:37 +00:00
bbuchanan9 933f34f837 feature: S3C-2273 Add unit and functional testing 2019-06-25 13:23:54 -07:00
bert-e 46f62388cd Merge branch 'w/7.5/feature/S3C-2260-maintenance-testing-for-utapi-reindexer' into tmp/octopus/w/8.0/feature/S3C-2260-maintenance-testing-for-utapi-reindexer 2019-06-19 17:58:13 +00:00
bbuchanan9 e23b48ac73 feature: S3C-2260 Add unit and functional testing 2019-06-19 10:56:05 -07:00
bert-e 894f37750f Merge branch 'w/7.5/bugfix/S3C-2019-reindex-script-redis-authentication' into tmp/octopus/w/8.0/bugfix/S3C-2019-reindex-script-redis-authentication 2019-06-07 04:28:53 +00:00
Jianqin Wang ebc7b3cb5b bugfix: all redis clients need auth for 7.4.4 2019-06-06 19:28:17 -07:00
bert-e a990c743af Merge branch 'w/7.5/bugfix/S3C-2019-redis-sentinel-password' into tmp/octopus/w/8.0/bugfix/S3C-2019-redis-sentinel-password 2019-06-06 05:34:54 +00:00
Jianqin Wang 750188f39b bugfix: S3C-2019 redis sentinel authentication 2019-06-05 22:33:15 -07:00
bert-e 3a3083c379 Merge branch 'w/7.5/bugfix/S3C-2076/update-default-reindex-schedule' into tmp/octopus/w/8.0/bugfix/S3C-2076/update-default-reindex-schedule 2019-06-05 18:48:28 +00:00
bbuchanan9 03dd49e3a9 bugfix: S3C-2076 Update default reindex schedule 2019-06-05 11:45:50 -07:00
bert-e 39b4b8b623 Merge branch 'w/7.5/bugfix/S3C-2076/add-utapi-reindex' into tmp/octopus/w/8.0/bugfix/S3C-2076/add-utapi-reindex 2019-06-05 04:45:30 +00:00
bbuchanan9 49d134e455 bugfix: S3C-2076 Add UtapiReindex 2019-06-04 21:36:05 -07:00
bert-e c5165a0338 Merge branches 'w/8.0/improvement/S3C-2034-bump-ioredis-version' and 'q/249/7.5/improvement/S3C-2034-bump-ioredis-version' into tmp/octopus/q/8.0 2019-05-21 17:38:37 +00:00
bert-e 8900e8d75e Merge branch 'improvement/S3C-2034-bump-ioredis-version' into q/7.4 2019-05-21 17:38:37 +00:00
bert-e ef56d39193 Merge branch 'w/7.5/improvement/S3C-2034-bump-ioredis-version' into tmp/octopus/w/8.0/improvement/S3C-2034-bump-ioredis-version 2019-05-20 21:49:40 +00:00
Jianqin Wang 178666f93d S3C-2034: bump ioredis to use redis 5.x features 2019-05-20 14:47:03 -07:00
bert-e da7144389d Merge branch 'w/7.5/bugfix/S3C-2195/upload-copy-part-metrics' into tmp/octopus/w/8.0/bugfix/S3C-2195/upload-copy-part-metrics 2019-05-20 20:44:58 +00:00
bbuchanan9 d14999e478 bugfix: S3C-2195 Add uploadPartCopy metric 2019-05-20 13:42:50 -07:00
bert-e d2020f8190 Merge branches 'w/8.0/bugfix/S3C-2155/allow-range-into-the-future' and 'q/242/7.5/bugfix/S3C-2155/allow-range-into-the-future' into tmp/octopus/q/8.0 2019-05-10 00:03:41 +00:00
bert-e cc2ebd2afb Merge branches 'w/7.4/bugfix/S3C-2155/allow-range-into-the-future' and 'q/242/6.4/bugfix/S3C-2155/allow-range-into-the-future' into tmp/octopus/q/7.4 2019-05-10 00:03:40 +00:00
bert-e f8edbb37a6 Merge branch 'bugfix/S3C-2155/allow-range-into-the-future' into q/6.4 2019-05-10 00:03:40 +00:00
bert-e 27ef9dfa33 Merge branch 'w/7.5/bugfix/S3C-2155/allow-range-into-the-future' into tmp/octopus/w/8.0/bugfix/S3C-2155/allow-range-into-the-future 2019-05-09 23:17:31 +00:00
bert-e bfa76c639d Merge branch 'bugfix/S3C-2155/allow-range-into-the-future' into tmp/octopus/w/7.4/bugfix/S3C-2155/allow-range-into-the-future 2019-05-09 23:17:31 +00:00
bert-e fae26f0933 Merge branches 'w/8.0/bugfix/S3C-2105/push-backbeat-metrics' and 'q/236/7.5/bugfix/S3C-2105/push-backbeat-metrics' into tmp/octopus/q/8.0 2019-05-09 23:09:10 +00:00
bert-e ffc450c312 Merge branch 'bugfix/S3C-2105/push-backbeat-metrics' into q/7.4 2019-05-09 23:09:10 +00:00
bbuchanan9 2f269402f5 bugfix: S3C-2155 Allow end time in the future 2019-05-09 15:59:52 -07:00
bert-e 270591bf23 Merge branch 'w/7.5/bugfix/S3C-1506/start-end-reducer-values' into tmp/octopus/w/8.0/bugfix/S3C-1506/start-end-reducer-values 2019-05-09 17:01:16 +00:00
bert-e 1755151f18 Merge branch 'bugfix/S3C-1506/start-end-reducer-values' into tmp/octopus/w/7.4/bugfix/S3C-1506/start-end-reducer-values 2019-05-09 17:01:15 +00:00
bbuchanan9 dcf404d36a bugfix: S3C-1506 Update start/end reducer 2019-05-09 10:00:19 -07:00
bert-e 12fa8b567c Merge branch 'w/7.5/bugfix/S3C-2105/push-backbeat-metrics' into tmp/octopus/w/8.0/bugfix/S3C-2105/push-backbeat-metrics 2019-05-08 23:04:06 +00:00
bbuchanan9 c316f3792d bugfix: S3C-2105 Add put data metric 2019-05-08 16:02:34 -07:00
bbuchanan9 fac88a209f Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1506/add-long-range-request-ft-tests' into w/8.0/bugfix/S3C-1506/add-long-range-request-ft-tests 2019-05-02 15:34:43 -07:00
bbuchanan9 d81ea8e335 Merge remote-tracking branch 'origin/bugfix/S3C-1506/add-long-range-request-ft-tests' into w/7.4/bugfix/S3C-1506/add-long-range-request-ft-tests 2019-05-02 15:25:20 -07:00
bbuchanan9 024f86a350 bugfix: S3C-1506 Range requests functional tests 2019-05-02 14:47:52 -07:00
bbuchanan9 1a9310e400 bugfix: S3C-1506 Add Utapi test pod 2019-05-02 14:43:01 -07:00
bert-e ef2c350724 Merge branch 'w/7.5/bugfix/S3C-2155/time-range-validation' into tmp/octopus/w/8.0/bugfix/S3C-2155/time-range-validation 2019-05-01 23:15:42 +00:00
bbuchanan9 720100cc03 bugfix: S3C-2155 Update import paths 2019-05-01 16:15:10 -07:00
bert-e 46bb81e9f8 Merge branch 'w/7.5/bugfix/S3C-2155/time-range-validation' into tmp/octopus/w/8.0/bugfix/S3C-2155/time-range-validation 2019-05-01 23:08:25 +00:00
bert-e f6b691e1f2 Merge branch 'bugfix/S3C-2155/time-range-validation' into tmp/octopus/w/7.4/bugfix/S3C-2155/time-range-validation 2019-05-01 23:08:25 +00:00
bbuchanan9 e9ace977a1 bugfix: S3C-2155 Time range validation
* Check that both start and end times are in the
  past
* Do not allow the start time to be greater than
  the end time
2019-05-01 14:36:00 -07:00
bert-e 829369d37b Merge branch 'w/7.5/bugfix/S3C-1506/prevent-heap-memory-issue' into tmp/octopus/w/8.0/bugfix/S3C-1506/prevent-heap-memory-issue 2019-04-30 20:07:06 +00:00
bbuchanan9 5c3f4cfce8 Merge remote-tracking branch 'origin/bugfix/S3C-1506/prevent-heap-memory-issue' into w/7.4/bugfix/S3C-1506/prevent-heap-memory-issue 2019-04-30 13:05:05 -07:00
bbuchanan9 fd835d9665 bugfix: S3C-1506 Prevent heap memory issue 2019-04-29 10:34:08 -07:00
bert-e b5def9cb54 Merge branch 'w/7.5/improvement/S3C-2140/do-not-track-dump.rbd' into tmp/octopus/w/8.0/improvement/S3C-2140/do-not-track-dump.rbd 2019-04-26 20:13:05 +00:00
bert-e a33d838939 Merge branch 'improvement/S3C-2140/do-not-track-dump.rbd' into tmp/octopus/w/7.4/improvement/S3C-2140/do-not-track-dump.rbd 2019-04-26 20:13:05 +00:00
bbuchanan9 9a2564f6c7 improvement: Fix lint warning 2019-04-26 12:41:39 -07:00
bbuchanan9 4e31ca26ba improvement: S3C-214 Do not track dump.rbd file 2019-04-26 12:41:18 -07:00
bert-e 2b514a618e Merge branch 'w/7.5/feature/S3C-2133/add-eve-support' into tmp/octopus/w/8.0/feature/S3C-2133/add-eve-support 2019-04-26 17:25:50 +00:00
bert-e a94dd88349 Merge branch 'feature/S3C-2133/add-eve-support' into tmp/octopus/w/7.4/feature/S3C-2133/add-eve-support 2019-04-26 17:25:50 +00:00
Rayene Ben Rayana adf8b9acbe ft: add eve ci support 2019-04-25 14:54:54 -07:00
bbuchanan9 4f119ea917 documentation: S3C-2070 Update README
* Remove outdated CI badge
* Update links
* Update component name
* Fix typos
* Redefine CLI input fields
2019-04-04 14:58:35 -07:00
anurag4dsb 608fddb4bd
Merge remote-tracking branch 'origin/feature/S3C-1561-getStorageUsedForAccountQuotas' into w/8.0/feature/S3C-1561-getStorageUsedForAccountQuotas 2019-01-24 11:25:17 -08:00
anurag4dsb ec34c1bfb9
ft: S3C-1561 - get storage untilized by a resource 2019-01-24 11:11:41 -08:00
Rahul Padigela f2f1d0c742 improvement: reply arsenal errors to the client
Without replying Arsenal style errors, the lib breaks the contract and causes an
exception on the cloudserver
2018-08-31 16:27:33 -07:00
Dora Korpar 6d0c8dd1c0 bf: ZENKO 676 - only location metrics 2018-07-06 13:17:05 -07:00
bert-e cd3324df87 Merge branch 'bugfix/dependencies' into tmp/octopus/w/8.0/bugfix/dependencies 2018-06-29 14:07:22 +00:00
David Pineau 4664ee3cca Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 19:45:42 +02:00
David Pineau a00aa6f05f Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 14:58:45 +02:00
bert-e 4b646285d2 Merge branch 'feature/ZENKO-142-location-quota-metric' into q/8.0 2018-06-27 17:27:55 +00:00
bert-e e77bcc8e72 Merge branch 'feature/S3C-1212-expire-metrics' into tmp/octopus/w/8.0/feature/S3C-1212-expire-metrics 2018-06-26 22:10:37 +00:00
Rahul Padigela e3511ee7ef Merge remote-tracking branch 'origin/development/7.4' into improvement/port-7.4 2018-06-26 14:55:42 -07:00
Dora Korpar fc634ee028 ft: ZENKO 142 Location quota metrics 2018-06-26 14:44:35 -07:00
Rahul Padigela 4c776b3eb5
Merge pull request #177 from scality/ft/ZENKO-465-utapi-docker-image
ft: ZENKO 465 Utapi docker image
2018-06-07 14:10:00 -07:00
Dora Korpar 33024215e3 ft: ZENKO 465 Utapi docker image 2018-06-07 10:37:20 -07:00
Dora Korpar 4965d96f5c
Merge pull request #175 from scality/ft/ZENKO-386-utapi-service-accounts
ft: ZENKO 386 zenko utapi integration
2018-06-04 14:10:32 -07:00
Dora Korpar 0bfd8a66fb ft: ZENKO 386 zenko utapi integration 2018-05-31 11:59:40 -07:00
Rahul Padigela a8a8ad42ff chore: update version and dependencies 2018-05-31 11:23:56 -07:00
Rahul Padigela 8e11d15893
Merge pull request #174 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-23 00:15:16 -07:00
Rahul Padigela bf1cbe4bf4 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-23 00:12:38 -07:00
Rahul Padigela a4ab00ad92
Merge pull request #173 from scality/fwdport/7.4-7.4-beta
Fwdport/7.4 7.4 beta
2018-04-19 11:04:48 -07:00
Rahul Padigela 6c4e7aedce Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta 2018-04-19 11:01:53 -07:00
Stefano Maffulli b27c57bcfc
Merge pull request #172 from scality/FT/addIssueTemplate
FT: ZNC-26: add issue template
2018-04-12 15:45:21 -07:00
LaureVergeron 1fda068967 FT: ZNC-26: add issue template 2018-04-11 11:11:17 +02:00
Rahul Padigela 18bf5bb00e
Merge pull request #170 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 15:33:52 -07:00
Alexander Chan 6de529b8b4 fix dependencies 2018-03-20 08:15:38 -07:00
Alexander Chan ec3efcb9af Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-19 16:05:52 -07:00
Rahul Padigela d77f8cc46c ft: update version 2018-03-14 13:30:18 -07:00
Rahul Padigela 7487555957
Merge pull request #141 from scality/ft/add-example-python-request-script
FT: Add python request example
2018-03-05 11:13:03 -08:00
Bennett Buchanan 7fbddc071b FT: Add python request example 2018-03-05 11:10:56 -08:00
ironman-machine 6d708d54d0 merge #160 2018-02-09 14:14:25 +00:00
Rayene Ben Rayana 6ab610b27f ft: add eve ci support 2018-02-01 16:48:06 -08:00
220 changed files with 18376 additions and 1063 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
node_modules/
**/node_modules/
.git

View File

@ -1 +1,25 @@
{ "extends": "scality" }
{
"extends": "scality",
"env": {
"es6": true
},
"parserOptions": {
"ecmaVersion": 9
},
"rules": {
"no-underscore-dangle": "off",
"implicit-arrow-linebreak" : "off",
"import/extensions": 0,
"prefer-spread": 0,
"no-param-reassign": 0,
"array-callback-return": 0
},
"settings": {
"import/resolver": {
"node": {
"paths": ["/utapi/node_modules", "node_modules"]
}
}
}
}

87
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,87 @@
# General support information
GitHub Issues are **reserved** for actionable bug reports (including
documentation inaccuracies), and feature requests.
**All questions** (regarding configuration, usecases, performance, community,
events, setup and usage recommendations, among other things) should be asked on
the **[Zenko Forum](http://forum.zenko.io/)**.
> Questions opened as GitHub issues will systematically be closed, and moved to
> the [Zenko Forum](http://forum.zenko.io/).
--------------------------------------------------------------------------------
## Avoiding duplicates
When reporting a new issue/requesting a feature, make sure that we do not have
any duplicates already open:
- search the issue list for this repository (use the search bar, select
"Issues" on the left pane after searching);
- if there is a duplicate, please do not open your issue, and add a comment
to the existing issue instead.
--------------------------------------------------------------------------------
## Bug report information
(delete this section (everything between the lines) if you're not reporting a
bug but requesting a feature)
### Description
Briefly describe the problem you are having in a few paragraphs.
### Steps to reproduce the issue
Please provide steps to reproduce, including full log output
### Actual result
Describe the results you received
### Expected result
Describe the results you expected
### Additional information
- Node.js version,
- Docker version,
- npm version,
- distribution/OS,
- optional: anything else you deem helpful to us.
--------------------------------------------------------------------------------
## Feature Request
(delete this section (everything between the lines) if you're not requesting
a feature but reporting a bug)
### Proposal
Describe the feature
### Current behavior
What currently happens
### Desired behavior
What you would like to happen
### Usecase
Please provide usecases for changing the current behavior
### Additional information
- Is this request for your company? Y/N
- If Y: Company name:
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
- Are you willing to contribute this feature yourself?
- Position/Title:
- How did you hear about us?
--------------------------------------------------------------------------------

14
.github/docker/redis-replica/Dockerfile vendored Normal file
View File

@ -0,0 +1,14 @@
# Creating this image for the CI as GitHub Actions
# is unable to overwrite the entrypoint
ARG REDIS_IMAGE="redis:latest"
FROM ${REDIS_IMAGE}
ENV REDIS_LISTEN_PORT 6380
ENV REDIS_MASTER_HOST redis
ENV REDIS_MASTER_PORT_NUMBER 6379
ENTRYPOINT redis-server \
--port ${REDIS_LISTEN_PORT} \
--slaveof ${REDIS_MASTER_HOST} ${REDIS_MASTER_PORT_NUMBER}

7
.github/docker/vault/Dockerfile vendored Normal file
View File

@ -0,0 +1,7 @@
FROM ghcr.io/scality/vault:c2607856
ENV VAULT_DB_BACKEND LEVELDB
RUN chmod 400 tests/utils/keyfile
ENTRYPOINT yarn start

22
.github/scripts/run_ft_tests.bash vendored Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
set -x
set -e -o pipefail
# port for utapi server
PORT=8100
trap killandsleep EXIT
killandsleep () {
kill -9 $(lsof -t -i:$PORT) || true
sleep 10
}
if [ -z "$SETUP_CMD" ]; then
SETUP_CMD="start"
fi
UTAPI_INTERVAL_TEST_MODE=$1 npm $SETUP_CMD 2>&1 | tee -a "setup_$2.log" &
bash tests/utils/wait_for_local_port.bash $PORT 40
UTAPI_INTERVAL_TEST_MODE=$1 npm run $2 | tee -a "test_$2.log"

65
.github/workflows/build-ci.yaml vendored Normal file
View File

@ -0,0 +1,65 @@
name: build-ci-images
on:
workflow_call:
jobs:
warp10-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: warp10-ci
context: .
file: images/warp10/Dockerfile
lfs: true
redis-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-ci
context: .
file: images/redis/Dockerfile
redis-replica-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
needs:
- redis-ci
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-replica-ci
context: .github/docker/redis-replica
build-args: |
REDIS_IMAGE=ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
vault-ci:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push vault Image
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/vault
tags: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
cache-from: type=gha,scope=vault
cache-to: type=gha,mode=max,scope=vault

16
.github/workflows/build-dev.yaml vendored Normal file
View File

@ -0,0 +1,16 @@
name: build-dev-image
on:
push:
branches-ignore:
- 'development/**'
jobs:
build-dev:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}

39
.github/workflows/release-warp10.yaml vendored Normal file
View File

@ -0,0 +1,39 @@
name: release-warp10
on:
workflow_dispatch:
inputs:
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: true
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit
with:
name: warp10
context: .
file: images/warp10/Dockerfile
tag: ${{ github.event.inputs.tag }}
lfs: true
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: Release utapi/warp10:${{ github.event.inputs.tag }}-warp10
tag_name: ${{ github.event.inputs.tag }}-warp10
generate_release_notes: false
target_commitish: ${{ github.sha }}

45
.github/workflows/release.yaml vendored Normal file
View File

@ -0,0 +1,45 @@
name: release
on:
workflow_dispatch:
inputs:
dockerfile:
description: Dockerfile to build image from
type: choice
options:
- images/nodesvc-base/Dockerfile
- Dockerfile
required: true
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: false
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}
context: .
file: ${{ github.event.inputs.dockerfile}}
tag: ${{ github.event.inputs.tag }}
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}
generate_release_notes: true
target_commitish: ${{ github.sha }}

361
.github/workflows/tests.yaml vendored Normal file
View File

@ -0,0 +1,361 @@
---
name: tests
on:
push:
branches-ignore:
- 'development/**'
workflow_dispatch:
inputs:
debug:
description: Debug (enable the ability to SSH to runners)
type: boolean
required: false
default: 'false'
connection-timeout-m:
type: number
required: false
description: Timeout for ssh connection to worker (minutes)
default: 30
jobs:
build-ci:
uses: ./.github/workflows/build-ci.yaml
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: run static analysis tools on markdown
run: yarn run lint_md
- name: run static analysis tools on code
run: yarn run lint
tests-v1:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run unit tests
command: yarn test
env:
UTAPI_METRICS_ENABLED: 'true'
- name: run v1 client tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:client
env: {}
- name: run v1 server tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:server
env: {}
- name: run v1 cron tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:cron
env: {}
- name: run v1 interval tests
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:interval
env: {}
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ENABLE_SENSISION: 't'
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
ports:
- 4802:4802
- 8082:8082
- 9718:9718
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
tests-v2-with-vault:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ENABLE_SENSISION: 't'
ports:
- 4802:4802
- 8082:8082
- 9718:9718
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
vault:
image: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
ports:
- 8500:8500
- 8600:8600
- 8700:8700
- 8800:8800
options: >-
--health-cmd "curl http://localhost:8500/_/healthcheck"
--health-interval 10s
--health-timeout 5s
--health-retries 10
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 for 60 seconds
run: sleep 60
- name: run v2 functional tests
run: bash ./.github/scripts/run_ft_tests.bash true ft_test:v2
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}
tests-v2-without-sensision:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run v2 soft limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:softLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: run v2 hard limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:hardLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ports:
- 4802:4802
- 8082:8082
- 9718:9718
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
vault:
image: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
ports:
- 8500:8500
- 8600:8600
- 8700:8700
- 8800:8800
options: >-
--health-cmd "curl http://localhost:8500/_/healthcheck"
--health-interval 10s
--health-timeout 5s
--health-retries 10
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 a little bit
run: sleep 60
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}

5
.gitignore vendored
View File

@ -1,4 +1,7 @@
dist
node_modules
node_modules/
**/node_modules/
logs
*.log
dump.rdb
.vscode/

5
.prettierrc Normal file
View File

@ -0,0 +1,5 @@
# .prettierrc or .prettierrc.yaml
trailingComma: "all"
tabWidth: 4
arrowParens: avoid
singleQuote: true

31
Dockerfile Normal file
View File

@ -0,0 +1,31 @@
FROM node:16.13.2-buster-slim
WORKDIR /usr/src/app
COPY package.json yarn.lock /usr/src/app/
RUN apt-get update \
&& apt-get install -y \
curl \
gnupg2
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt-get update \
&& apt-get install -y jq git python3 build-essential yarn --no-install-recommends \
&& yarn cache clean \
&& yarn install --frozen-lockfile --production --ignore-optional --network-concurrency=1 \
&& apt-get autoremove --purge -y python3 git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
EXPOSE 8100

View File

@ -3,14 +3,21 @@
![Utapi logo](res/utapi-logo.png)
[![Circle CI][badgepub]](https://circleci.com/gh/scality/utapi)
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/utapi)
Service Utilization API for tracking resource usage and metrics reporting
Service Utilization API for tracking resource usage and metrics reporting.
## Design
Please refer to the [design](/DESIGN.md) for more information.
## Server
To run the server:
```
npm start
```
## Client
The module exposes a client, named UtapiClient. Projects can use this client to
@ -80,13 +87,13 @@ Server is running.
1. Create an IAM user
```
aws iam --endpoint-url <endpoint> create-user --user-name utapiuser
aws iam --endpoint-url <endpoint> create-user --user-name <user-name>
```
2. Create access key for the user
```
aws iam --endpoint-url <endpoint> create-access-key --user-name utapiuser
aws iam --endpoint-url <endpoint> create-access-key --user-name <user-name>
```
3. Define a managed IAM policy
@ -195,12 +202,11 @@ Server is running.
5. Attach user to the managed policy
```
aws --endpoint-url <endpoint> iam attach-user-policy --user-name utapiuser
--policy-arn <policy arn>
aws --endpoint-url <endpoint> iam attach-user-policy --user-name
<user-name> --policy-arn <policy arn>
```
Now the user `utapiuser` has access to ListMetrics request in Utapi on all
buckets.
Now the user has access to ListMetrics request in Utapi on all buckets.
### Signing request with Auth V4
@ -216,16 +222,18 @@ following urls for reference.
You may also view examples making a request with Auth V4 using various languages
and AWS SDKs [here](/examples).
Alternatively, you can use a nifty command line tool available in Scality's S3.
Alternatively, you can use a nifty command line tool available in Scality's
CloudServer.
You can git clone S3 repo from here https://github.com/scality/S3.git and follow
the instructions in README to install the dependencies.
You can git clone the CloudServer repo from here
https://github.com/scality/cloudserver and follow the instructions in the README
to install the dependencies.
If you have S3 running inside a docker container you can docker exec into the S3
container as
If you have CloudServer running inside a docker container you can docker exec
into the CloudServer container as
```
docker exec -it <container id> bash
docker exec -it <container-id> bash
```
and then run the command
@ -263,7 +271,7 @@ Usage: list_metrics [options]
-v, --verbose
```
A typical call to list metrics for a bucket `demo` to Utapi in a https enabled
An example call to list metrics for a bucket `demo` to Utapi in a https enabled
deployment would be
```
@ -275,7 +283,7 @@ Both start and end times are time expressed as UNIX epoch timestamps **expressed
in milliseconds**.
Keep in mind, since Utapi metrics are normalized to the nearest 15 min.
interval, so start time and end time need to be in specific format as follows.
interval, start time and end time need to be in the specific format as follows.
#### Start time
@ -289,7 +297,7 @@ Date: Tue Oct 11 2016 17:35:25 GMT-0700 (PDT)
Unix timestamp (milliseconds): 1476232525320
Here's a typical JS method to get start timestamp
Here's an example JS method to get a start timestamp
```javascript
function getStartTimestamp(t) {
@ -309,7 +317,7 @@ seconds and milliseconds set to 59 and 999 respectively. So valid end timestamps
would look something like `09:14:59:999`, `09:29:59:999`, `09:44:59:999` and
`09:59:59:999`.
Here's a typical JS method to get end timestamp
Here's an example JS method to get an end timestamp
```javascript
function getEndTimestamp(t) {
@ -334,4 +342,3 @@ In order to contribute, please follow the
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
[badgepub]: http://circleci.com/gh/scality/utapi.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/utapi.svg?style=svg

15
bin/createCheckpoint.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'CreateCheckpoint',
});
const task = new tasks.CreateCheckpoint({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting checkpoint creation'))
.then(() => task.start())
.then(() => logger.info('Checkpoint creation started'));

14
bin/createSnapshot.js Normal file
View File

@ -0,0 +1,14 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'CreateSnapshot',
});
const task = new tasks.CreateSnapshot({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting snapshot creation'))
.then(() => task.start())
.then(() => logger.info('Snapshot creation started'));

15
bin/diskUsage.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'MonitorDiskUsage',
});
const task = new tasks.MonitorDiskUsage({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting disk usage monitor'))
.then(() => task.start())
.then(() => logger.info('Disk usage monitor started'));

276
bin/ensureServiceUser Executable file
View File

@ -0,0 +1,276 @@
#! /usr/bin/env node
// TODO
// - deduplicate with Vault's seed script at https://github.com/scality/Vault/pull/1627
// - add permission boundaries to user when https://scality.atlassian.net/browse/VAULT-4 is implemented
const { errors } = require('arsenal');
const program = require('commander');
const werelogs = require('werelogs');
const async = require('async');
const { IAM } = require('aws-sdk');
const { version } = require('../package.json');
const systemPrefix = '/scality-internal/';
function generateUserPolicyDocument() {
return {
Version: '2012-10-17',
Statement: {
Effect: 'Allow',
Action: 'utapi:ListMetrics',
Resource: 'arn:scality:utapi:::*/*',
},
};
}
function createIAMClient(opts) {
return new IAM({
endpoint: opts.iamEndpoint,
});
}
function needsCreation(v) {
if (Array.isArray(v)) {
return !v.length;
}
return !v;
}
class BaseHandler {
constructor(serviceName, iamClient, log) {
this.serviceName = serviceName;
this.iamClient = iamClient;
this.log = log;
}
applyWaterfall(values, done) {
this.log.debug('applyWaterfall', { values, type: this.resourceType });
const v = values[this.resourceType];
if (needsCreation(v)) {
this.log.debug('creating', { v, type: this.resourceType });
return this.create(values)
.then(res =>
done(null, Object.assign(values, {
[this.resourceType]: res,
})))
.catch(done);
}
this.log.debug('conflicts check', { v, type: this.resourceType });
if (this.conflicts(v)) {
return done(errors.EntityAlreadyExists.customizeDescription(
`${this.resourceType} ${this.serviceName} already exists and conflicts with the expected value.`));
}
this.log.debug('nothing to do', { v, type: this.resourceType });
return done(null, values);
}
}
class UserHandler extends BaseHandler {
get resourceType() {
return 'user';
}
collect() {
return this.iamClient.getUser({
UserName: this.serviceName,
})
.promise()
.then(res => res.User);
}
create(allResources) {
return this.iamClient.createUser({
UserName: this.serviceName,
Path: systemPrefix,
})
.promise()
.then(res => res.User);
}
conflicts(u) {
return u.Path !== systemPrefix;
}
}
class PolicyHandler extends BaseHandler {
get resourceType() {
return 'policy';
}
collect() {
return this.iamClient.listPolicies({
MaxItems: 100,
OnlyAttached: false,
Scope: 'All',
})
.promise()
.then(res => res.Policies.find(p => p.PolicyName === this.serviceName));
}
create(allResources) {
const doc = generateUserPolicyDocument();
return this.iamClient.createPolicy({
PolicyName: this.serviceName,
PolicyDocument: JSON.stringify(doc),
Path: systemPrefix,
})
.promise()
.then(res => res.Policy);
}
conflicts(p) {
return p.Path !== systemPrefix;
}
}
class PolicyAttachmentHandler extends BaseHandler {
get resourceType() {
return 'policyAttachment';
}
collect() {
return this.iamClient.listAttachedUserPolicies({
UserName: this.serviceName,
MaxItems: 100,
})
.promise()
.then(res => res.AttachedPolicies)
}
create(allResources) {
return this.iamClient.attachUserPolicy({
PolicyArn: allResources.policy.Arn,
UserName: this.serviceName,
})
.promise();
}
conflicts(p) {
return false;
}
}
class AccessKeyHandler extends BaseHandler {
get resourceType() {
return 'accessKey';
}
collect() {
return this.iamClient.listAccessKeys({
UserName: this.serviceName,
MaxItems: 100,
})
.promise()
.then(res => res.AccessKeyMetadata)
}
create(allResources) {
return this.iamClient.createAccessKey({
UserName: this.serviceName,
})
.promise()
.then(res => res.AccessKey);
}
conflicts(a) {
return false;
}
}
function collectResource(v, done) {
v.collect()
.then(res => done(null, res))
.catch(err => {
if (err.code === 'NoSuchEntity') {
return done(null, null);
}
done(err);
});
}
function collectResourcesFromHandlers(handlers, cb) {
const tasks = handlers.reduce((acc, v) => ({
[v.resourceType]: done => collectResource(v, done),
...acc,
}), {});
async.parallel(tasks, cb);
}
function buildServiceUserHandlers(serviceName, client, log) {
return [
UserHandler,
PolicyHandler,
PolicyAttachmentHandler,
AccessKeyHandler,
].map(h => new h(serviceName, client, log));
}
function apply(client, serviceName, log, cb) {
const handlers = buildServiceUserHandlers(serviceName, client, log);
async.waterfall([
done => collectResourcesFromHandlers(handlers, done),
...handlers.map(h => h.applyWaterfall.bind(h)),
(values, done) => done(null, values.accessKey),
], cb);
}
function wrapAction(actionFunc, serviceName, options) {
werelogs.configure({
level: options.logLevel,
dump: options.logDumpLevel,
});
const log = new werelogs.Logger(process.argv[1]).newRequestLogger();
const client = createIAMClient(options);
actionFunc(client, serviceName, log, (err, data) => {
if (err) {
log.error('failed', {
data,
error: err,
});
if (err.EntityAlreadyExists) {
log.error(`run "${process.argv[1]} purge ${serviceName}" to fix.`);
}
process.exit(1);
}
log.info('success', { data });
process.exit();
});
}
program.version(version);
[
{
name: 'apply <service-name>',
actionFunc: apply,
},
].forEach(cmd => {
program
.command(cmd.name)
.option('--iam-endpoint <url>', 'IAM endpoint', 'http://localhost:8600')
.option('--log-level <level>', 'log level', 'info')
.option('--log-dump-level <level>', 'log level that triggers a dump of the debug buffer', 'error')
.action(wrapAction.bind(null, cmd.actionFunc));
});
const validCommands = program.commands.map(n => n._name);
// Is the command given invalid or are there too few arguments passed
if (!validCommands.includes(process.argv[2])) {
program.outputHelp();
process.stdout.write('\n');
process.exit(1);
} else {
program.parse(process.argv);
}

15
bin/ingestShards.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'IngestShard',
});
const task = new tasks.IngestShard({ warp10: warp10Clients });
task.setup()
.then(() => logger.info('Starting shard ingestion'))
.then(() => task.start())
.then(() => logger.info('Ingestion started'));

15
bin/manualAdjust.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'ManualAdjust',
});
const task = new tasks.ManualAdjust({ warp10: warp10Clients });
task.setup()
.then(() => logger.info('Starting manual adjustment'))
.then(() => task.start())
.then(() => logger.info('Manual adjustment started'));

14
bin/migrate.js Normal file
View File

@ -0,0 +1,14 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Migrate',
});
const task = new tasks.MigrateTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting utapi v1 => v2 migration'))
.then(() => task.start())
.then(() => logger.info('Migration started'));

15
bin/reindex.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Reindex',
});
const task = new tasks.ReindexTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting Reindex daemon'))
.then(() => task.start())
.then(() => logger.info('Reindex started'));

15
bin/repair.js Normal file
View File

@ -0,0 +1,15 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Repair',
});
const task = new tasks.RepairTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting Repair daemon'))
.then(() => task.start())
.then(() => logger.info('Repair started'));

9
bin/server.js Normal file
View File

@ -0,0 +1,9 @@
const { startUtapiServer } = require('..');
const { LoggerContext } = require('../libV2/utils');
const logger = new LoggerContext({ module: 'entrypoint' });
startUtapiServer().then(
() => logger.info('utapi started'),
error => logger.error('Unhandled Error', { error }),
);

75
docker-compose.yaml Normal file
View File

@ -0,0 +1,75 @@
version: '3.8'
x-models:
warp10: &warp10
build:
context: .
dockerfile: ./images/warp10/Dockerfile
volumes: [ $PWD/warpscript:/usr/local/share/warpscript ]
warp10_env: &warp10_env
ENABLE_WARPSTUDIO: 'true'
ENABLE_SENSISION: 'true'
warpscript.repository.refresh: 1000
warpscript.maxops: 1000000000
warpscript.maxops.hard: 1000000000
warpscript.maxfetch: 1000000000
warpscript.maxfetch.hard: 1000000000
warpscript.extension.debug: io.warp10.script.ext.debug.DebugWarpScriptExtension
warpscript.maxrecursion: 1000
warpscript.repository.directory: /usr/local/share/warpscript
warpscript.extension.logEvent: io.warp10.script.ext.logging.LoggingWarpScriptExtension
redis: &redis
build:
context: .
dockerfile: ./images/redis/Dockerfile
services:
redis-0:
image: redis:7.2.4
command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6379:6379
environment:
- HOST_IP="${EXTERNAL_HOST}"
redis-1:
image: redis:7.2.4
command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6380:6380
environment:
- HOST_IP="${EXTERNAL_HOST}"
redis-sentinel-0:
image: redis:7.2.4
command: |-
bash -c 'cat > /tmp/sentinel.conf <<EOF
port 16379
logfile ""
dir /tmp
sentinel announce-ip ${EXTERNAL_HOST}
sentinel announce-port 16379
sentinel monitor scality-s3 "${EXTERNAL_HOST}" 6379 1
EOF
redis-sentinel /tmp/sentinel.conf'
environment:
- HOST_IP="${EXTERNAL_HOST}"
ports:
- 16379:16379
warp10:
<< : *warp10
environment:
<< : *warp10_env
ports:
- 4802:4802
- 8081:8081
- 9718:9718
volumes:
- /tmp/warp10:/data
- '${PWD}/warpscript:/usr/local/share/warpscript'

47
docker-entrypoint.sh Executable file
View File

@ -0,0 +1,47 @@
#!/bin/bash
# set -e stops the execution of a script if a command or pipeline has an error
set -e
# modifying config.json
JQ_FILTERS_CONFIG="."
if [[ "$LOG_LEVEL" ]]; then
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
echo "Log level has been modified to $LOG_LEVEL"
else
echo "The log level you provided is incorrect (info/debug/trace)"
fi
fi
if [[ "$WORKERS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workers=\"$WORKERS\""
fi
if [[ "$REDIS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HOST\""
fi
if [[ "$REDIS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=\"$REDIS_PORT\""
fi
if [[ "$VAULTD_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.host=\"$VAULTD_HOST\""
fi
if [[ "$VAULTD_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.port=\"$VAULTD_PORT\""
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json
fi
exec "$@"

42
docs/RELEASE.md Normal file
View File

@ -0,0 +1,42 @@
# Utapi Release Plan
## Docker Image Generation
Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages).
Utapi has one namespace there:
* Namespace: ghcr.io/scality/utapi
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
This allows those images to be used by developers, CI builds,
build chain and so on.
Tagged versions of utapi will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/utapi:<commit hash>
docker pull ghcr.io/scality/utapi:<tag>
```
## Release Process
To release a production image:
* Name the tag for the repository and Docker image.
* Use the `yarn version` command with the same tag to update `package.json`.
* Create a PR and merge the `package.json` change.
* Tag the repository using the same tag.
* [Force a build] using:
* A given branch that ideally matches the tag.
* The `release` stage.
* An extra property with the name `tag` and its value being the actual tag.
[Force a build]:
https://eve.devsca.com/github/scality/utapi/#/builders/bootstrap/force/force

View File

@ -1,39 +0,0 @@
---
version: 0.2
branches:
default:
stage: pre-merge
stages:
pre-merge:
worker:
type: docker
path: eve/workers/unit_and_feature_tests
volumes:
- '/home/eve/workspace'
steps:
- Git:
name: fetch source
repourl: '%(prop:git_reference)s'
shallow: True
retryFetch: True
haltOnFailure: True
- ShellCommand:
name: npm install
command: npm install
# - ShellCommand:
# name: get api node modules from cache
# command: mv /home/eve/node_reqs/node_modules .
- ShellCommand:
name: run static analysis tools on markdown
command: npm run lint_md
- ShellCommand:
name: run static analysis tools on code
command: npm run lint
- ShellCommand:
name: run unit tests
command: npm test
- ShellCommand:
name: run feature tests
command: npm run ft_test

View File

@ -1,26 +0,0 @@
FROM buildpack-deps:jessie-curl
#
# Install apt packages needed by backbeat and buildbot_worker
#
ENV LANG C.UTF-8
COPY utapi_packages.list buildbot_worker_packages.list /tmp/
RUN curl -sL https://deb.nodesource.com/setup_6.x | bash - \
&& apt-get update -qq \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& pip install pip==9.0.1 \
&& rm -rf /var/lib/apt/lists/* \
&& rm -f /tmp/*packages.list \
&& rm -f /etc/supervisor/conf.d/*.conf
#
# Run buildbot-worker on startup through supervisor
#
ARG BUILDBOT_VERSION
RUN pip install buildbot-worker==$BUILDBOT_VERSION
ADD supervisor/buildbot_worker.conf /etc/supervisor/conf.d/
CMD ["supervisord", "-n"]

View File

@ -1,9 +0,0 @@
ca-certificates
git
libffi-dev
libssl-dev
python2.7
python2.7-dev
python-pip
sudo
supervisor

View File

@ -1,9 +0,0 @@
[program:buildbot_worker]
command=/bin/sh -c 'buildbot-worker create-worker . "%(ENV_BUILDMASTER)s:%(ENV_BUILDMASTER_PORT)s" "%(ENV_WORKERNAME)s" "%(ENV_WORKERPASS)s" && buildbot-worker start --nodaemon'
autostart=true
autorestart=false
[program:redis]
command=/usr/bin/redis-server
autostart=true
autorestart=false

View File

@ -1,3 +0,0 @@
build-essential
redis-server
nodejs

View File

@ -1,5 +1,6 @@
const http = require('http');
const aws4 = require('aws4'); // eslint-disable-line import/no-unresolved
// eslint-disable-next-line import/no-extraneous-dependencies
const aws4 = require('aws4');
// Input AWS access key, secret key, and session token.
const accessKeyId = 'EO4FRH6BA2L7FCK0EKVT';

View File

@ -0,0 +1,90 @@
import sys, os, base64, datetime, hashlib, hmac, datetime, calendar, json
import requests # pip install requests
access_key = '9EQTVVVCLSSG6QBMNKO5'
secret_key = 'T5mK/skkkwJ/mTjXZnHyZ5UzgGIN=k9nl4dyTmDH'
method = 'POST'
service = 's3'
host = 'localhost:8100'
region = 'us-east-1'
canonical_uri = '/buckets'
canonical_querystring = 'Action=ListMetrics&Version=20160815'
content_type = 'application/x-amz-json-1.0'
algorithm = 'AWS4-HMAC-SHA256'
t = datetime.datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def get_start_time(t):
start = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
return calendar.timegm(start.utctimetuple()) * 1000;
def get_end_time(t):
end = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
return calendar.timegm(end.utctimetuple()) * 1000 - 1;
start_time = get_start_time(datetime.datetime(2016, 1, 1, 0, 0, 0, 0))
end_time = get_end_time(datetime.datetime(2016, 2, 1, 0, 0, 0, 0))
# Request parameters for listing Utapi bucket metrics--passed in a JSON block.
bucketListing = {
'buckets': [ 'utapi-test' ],
'timeRange': [ start_time, end_time ],
}
request_parameters = json.dumps(bucketListing)
payload_hash = hashlib.sha256(request_parameters).hexdigest()
canonical_headers = \
'content-type:{0}\nhost:{1}\nx-amz-content-sha256:{2}\nx-amz-date:{3}\n' \
.format(content_type, host, payload_hash, amz_date)
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash)
credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service)
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
.format(algorithm, amz_date, credential_scope,
hashlib.sha256(canonical_request).hexdigest())
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
hashlib.sha256).hexdigest()
authorization_header = \
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
.format(algorithm, access_key, credential_scope, signed_headers, signature)
# The 'host' header is added automatically by the Python 'requests' library.
headers = {
'Content-Type': content_type,
'X-Amz-Content-Sha256': payload_hash,
'X-Amz-Date': amz_date,
'Authorization': authorization_header
}
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring;
r = requests.post(endpoint, data=request_parameters, headers=headers)
print (r.text)

View File

@ -0,0 +1,20 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.5.0
ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json
WORKDIR ${HOME_DIR}/utapi
COPY ./package.json ./yarn.lock ${HOME_DIR}/utapi
# Remove when gitcache is sorted out
RUN rm /root/.gitconfig
RUN yarn install --production --frozen-lockfile --network-concurrency 1
COPY . ${HOME_DIR}/utapi
RUN chown -R ${USER} ${HOME_DIR}/utapi
USER ${USER}
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/${SUPERVISORD_CONF}"

17
images/redis/Dockerfile Normal file
View File

@ -0,0 +1,17 @@
FROM redis:alpine
ENV S6_VERSION 2.0.0.1
ENV EXPORTER_VERSION 1.24.0
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_VERSION}/s6-overlay-amd64.tar.gz -O /tmp/s6-overlay-amd64.tar.gz \
&& tar xzf /tmp/s6-overlay-amd64.tar.gz -C / \
&& rm -rf /tmp/s6-overlay-amd64.tar.gz
RUN wget https://github.com/oliver006/redis_exporter/releases/download/v${EXPORTER_VERSION}/redis_exporter-v${EXPORTER_VERSION}.linux-amd64.tar.gz -O redis_exporter.tar.gz \
&& tar xzf redis_exporter.tar.gz -C / \
&& cd .. \
&& mv /redis_exporter-v${EXPORTER_VERSION}.linux-amd64/redis_exporter /usr/local/bin/redis_exporter
ADD ./images/redis/s6 /etc
CMD /init

View File

@ -0,0 +1,4 @@
#!/usr/bin/with-contenv sh
echo "starting redis exporter"
exec redis_exporter

View File

@ -0,0 +1,4 @@
#!/usr/bin/with-contenv sh
echo "starting redis"
exec redis-server

View File

@ -0,0 +1,2 @@
standalone.host = 0.0.0.0
standalone.port = 4802

56
images/warp10/Dockerfile Normal file
View File

@ -0,0 +1,56 @@
FROM golang:1.14-alpine as builder
ENV WARP10_EXPORTER_VERSION 2.7.5
RUN apk add zip unzip build-base \
&& wget -q -O exporter.zip https://github.com/centreon/warp10-sensision-exporter/archive/refs/heads/master.zip \
&& unzip exporter.zip \
&& cd warp10-sensision-exporter-master \
&& go mod download \
&& cd tools \
&& go run generate_sensision_metrics.go ${WARP10_EXPORTER_VERSION} \
&& cp sensision.go ../collector/ \
&& cd .. \
&& go build -a -o /usr/local/go/warp10_sensision_exporter
FROM ghcr.io/scality/utapi/warp10:2.8.1-95-g73e7de80
# Override baked in version
# Remove when updating to a numbered release
ENV WARP10_VERSION 2.8.1-95-g73e7de80
ENV S6_VERSION 2.0.0.1
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
ENV WARP10_CONF_TEMPLATES ${WARP10_HOME}/conf.templates/standalone
ENV SENSISION_DATA_DIR /data/sensision
ENV SENSISION_PORT 8082
# Modify Warp 10 default config
ENV standalone.home /opt/warp10
ENV warpscript.repository.directory /usr/local/share/warpscript
ENV warp.token.file /static.tokens
ENV warpscript.extension.protobuf io.warp10.ext.protobuf.ProtobufWarpScriptExtension
ENV warpscript.extension.macrovalueencoder 'io.warp10.continuum.ingress.MacroValueEncoder$Extension'
# ENV warpscript.extension.debug io.warp10.script.ext.debug.DebugWarpScriptExtension
RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_VERSION}/s6-overlay-amd64.tar.gz -O /tmp/s6-overlay-amd64.tar.gz \
&& tar xzf /tmp/s6-overlay-amd64.tar.gz -C / \
&& rm -rf /tmp/s6-overlay-amd64.tar.gz
# Install jmx exporter
ADD https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar /opt/jmx_prom_agent.jar
ADD ./images/warp10/jmx_prom.yaml /opt/jmx_prom.yaml
# Install protobuf extestion
ADD ./images/warp10/warp10-ext-protobuf-1.2.2-uberjar.jar /opt/warp10/lib/
# Install Sensision exporter
COPY --from=builder /usr/local/go/warp10_sensision_exporter /usr/local/bin/warp10_sensision_exporter
ADD ./images/warp10/s6 /etc
ADD ./warpscript /usr/local/share/warpscript
ADD ./images/warp10/static.tokens /
ADD ./images/warp10/90-default-host-port.conf $WARP10_CONF_TEMPLATES/90-default-host-port.conf
CMD /init

View File

@ -0,0 +1,2 @@
---
startDelaySeconds: 0

View File

@ -0,0 +1,22 @@
#!/usr/bin/with-contenv bash
set -eu
ensureDir() {
if [ ! -d "$1" ]; then
mkdir -p "$1"
echo "Created directory $1"
fi
}
ensureDir "$WARP10_DATA_DIR"
ensureDir "$WARP10_DATA_DIR/logs"
ensureDir "$WARP10_DATA_DIR/conf"
ensureDir "$WARP10_DATA_DIR/data/leveldb"
ensureDir "$WARP10_DATA_DIR/data/datalog"
ensureDir "$WARP10_DATA_DIR/data/datalog_done"
ensureDir "$SENSISION_DATA_DIR"
ensureDir "$SENSISION_DATA_DIR/logs"
ensureDir "$SENSISION_DATA_DIR/conf"
ensureDir "/var/run/sensision"

View File

@ -0,0 +1,14 @@
#!/usr/bin/with-contenv bash
set -eu
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
WARP10_SECRETS="$WARP10_CONFIG_DIR/00-secrets.conf"
if [ ! -f "$WARP10_SECRETS" ]; then
cp "$WARP10_CONF_TEMPLATES/00-secrets.conf.template" "$WARP10_SECRETS"
/usr/bin/java -cp ${WARP10_JAR} -Dfile.encoding=UTF-8 io.warp10.GenerateCryptoKey ${WARP10_SECRETS}
echo "warp10.manager.secret = scality" >> $WARP10_SECRETS
fi

View File

@ -0,0 +1,14 @@
#!/usr/bin/with-contenv sh
echo "Installing warp 10 config"
for path in $WARP10_CONF_TEMPLATES/*; do
name="$(basename $path .template)"
if [ ! -f "$WARP10_DATA_DIR/conf/$name" ]; then
cp "$path" "$WARP10_DATA_DIR/conf/$name"
echo "Copied $name to $WARP10_DATA_DIR/conf/$name"
fi
done
echo "Installing sensision config"
cp ${SENSISION_HOME}/templates/sensision.template ${SENSISION_DATA_DIR}/conf/sensision.conf
cp ${SENSISION_HOME}/templates/log4j.properties.template ${SENSISION_DATA_DIR}/conf/log4j.properties

View File

@ -0,0 +1,23 @@
#!/usr/bin/with-contenv sh
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
ensure_link() {
if [ ! -L "$1" ]; then
rm -rf "$1"
ln -s "$2" "$1"
echo "Created symlink $1->$2"
fi
}
ensure_link "$WARP10_HOME/logs" "$WARP10_DATA_DIR/logs"
ensure_link "$WARP10_HOME/etc/conf.d" "$WARP10_DATA_DIR/conf"
ensure_link "$WARP10_HOME/leveldb" "$WARP10_DATA_DIR/data/leveldb"
ensure_link "$WARP10_HOME/datalog" "$WARP10_DATA_DIR/data/datalog"
ensure_link "$WARP10_HOME/datalog_done" "$WARP10_DATA_DIR/data/datalog_done"
ensure_link "$SENSISION_HOME/etc" "${SENSISION_DATA_DIR}/conf"
ensure_link "$SENSISION_HOME/logs" "${SENSISION_DATA_DIR}/logs"
ensure_link /var/run/sensision/metrics ${SENSISION_HOME}/metrics
ensure_link /var/run/sensision/targets ${SENSISION_HOME}/targets
ensure_link /var/run/sensision/queued ${SENSISION_HOME}/queued

View File

@ -0,0 +1,14 @@
#!/usr/bin/with-contenv sh
JAVA="/usr/bin/java"
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
WARP10_INIT="io.warp10.standalone.WarpInit"
LEVELDB_HOME="/opt/warp10/leveldb"
# Create leveldb database
if [ "$(find -L ${LEVELDB_HOME} -maxdepth 1 -type f | wc -l)" -eq 0 ]; then
echo "Init leveldb database..." | tee -a "$WARP10_HOME/logs/warp10.log"
$JAVA -cp "$WARP10_CP" "$WARP10_INIT" "$LEVELDB_HOME" | tee -a "$WARP10_HOME/logs/warp10.log" 2>&1
fi

View File

@ -0,0 +1,11 @@
#!/usr/bin/with-contenv sh
WARPSTUDIO_CONFIG=${WARP10_CONFIG_DIR}/80-warpstudio-plugin.conf
if [ -n "$ENABLE_WARPSTUDIO" ]; then
cat > $WARPSTUDIO_CONFIG << EOF
warp10.plugin.warpstudio = io.warp10.plugins.warpstudio.WarpStudioPlugin
warpstudio.port = 8081
warpstudio.host = \${standalone.host}
EOF
fi

View File

@ -0,0 +1,9 @@
#!/usr/bin/with-contenv sh
chmod 1733 "$SENSISION_HOME/metrics"
chmod 1733 "$SENSISION_HOME/targets"
chmod 700 "$SENSISION_HOME/queued"
sed -i 's/@warp:WriteToken@/'"writeTokenStatic"'/' $SENSISION_DATA_DIR/conf/sensision.conf
sed -i -e "s_^sensision\.home.*_sensision\.home = ${SENSISION_HOME}_" $SENSISION_DATA_DIR/conf/sensision.conf
sed -i -e 's_^sensision\.qf\.url\.default.*_sensision\.qf\.url\.default=http://127.0.0.1:4802/api/v0/update_' $SENSISION_DATA_DIR/conf/sensision.conf

View File

@ -0,0 +1,12 @@
#!/usr/bin/with-contenv sh
EXPORTER_CMD="warp10_sensision_exporter --warp10.url=http://localhost:${SENSISION_PORT}/metrics"
if [ -f "/usr/local/bin/warp10_sensision_exporter" -a -n "$ENABLE_SENSISION" ]; then
echo "Starting Sensision exporter with $EXPORTER_CMD ..."
exec $EXPORTER_CMD
else
echo "Sensision is disabled. Not starting exporter."
# wait indefinitely
exec tail -f /dev/null
fi

View File

@ -0,0 +1,25 @@
#!/usr/bin/with-contenv sh
JAVA="/usr/bin/java"
JAVA_OPTS=""
SENSISION_CONFIG=${SENSISION_DATA_DIR}/conf/sensision.conf
SENSISION_JAR=${SENSISION_HOME}/bin/sensision-${SENSISION_VERSION}.jar
SENSISION_CP=${SENSISION_HOME}/etc:${SENSISION_JAR}
SENSISION_CLASS=io.warp10.sensision.Main
export MALLOC_ARENA_MAX=1
if [ -z "$SENSISION_HEAP" ]; then
SENSISION_HEAP=64m
fi
SENSISION_CMD="${JAVA} ${JAVA_OPTS} -Xmx${SENSISION_HEAP} -Dsensision.server.port=${SENSISION_PORT} ${SENSISION_OPTS} -Dsensision.config=${SENSISION_CONFIG} -cp ${SENSISION_CP} ${SENSISION_CLASS}"
if [ -n "$ENABLE_SENSISION" ]; then
echo "Starting Sensision with $SENSISION_CMD ..."
exec $SENSISION_CMD | tee -a ${SENSISION_HOME}/logs/sensision.log
else
echo "Sensision is disabled. Not starting."
# wait indefinitely
exec tail -f /dev/null
fi

View File

@ -0,0 +1,43 @@
#!/usr/bin/with-contenv sh
export SENSISIONID=warp10
export MALLOC_ARENA_MAX=1
JAVA="/usr/bin/java"
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CLASS=io.warp10.standalone.Warp
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
CONFIG_FILES="$(find ${WARP10_CONFIG_DIR} -not -path '*/.*' -name '*.conf' | sort | tr '\n' ' ' 2> /dev/null)"
LOG4J_CONF=${WARP10_HOME}/etc/log4j.properties
if [ -z "$WARP10_HEAP" ]; then
WARP10_HEAP=1g
fi
if [ -z "$WARP10_HEAP_MAX" ]; then
WARP10_HEAP_MAX=4g
fi
JAVA_OPTS="-Dlog4j.configuration=file:${LOG4J_CONF} ${JAVA__EXTRA_OPTS} -Djava.awt.headless=true -Xms${WARP10_HEAP} -Xmx${WARP10_HEAP_MAX} -XX:+UseG1GC"
SENSISION_OPTS=
if [ -n "$ENABLE_SENSISION" ]; then
_SENSISION_LABELS=
# Expects a comma seperated list of key=value ex key=value,foo=bar
if [ -n "$SENSISION_LABELS" ]; then
_SENSISION_LABELS="-Dsensision.default.labels=$SENSISION_LABELS"
fi
SENSISION_OPTS="${_SENSISION_LABELS} -Dsensision.events.dir=/var/run/sensision/metrics -Dfile.encoding=UTF-8 ${SENSISION_EXTRA_OPTS}"
fi
JMX_EXPORTER_OPTS=
if [ -n "$ENABLE_JMX_EXPORTER" ]; then
JMX_EXPORTER_OPTS="-javaagent:/opt/jmx_prom_agent.jar=4803:/opt/jmx_prom.yaml ${JMX_EXPORTER_EXTRA_OPTS}"
echo "Starting jmx exporter with Warp 10."
fi
WARP10_CMD="${JAVA} ${JMX_EXPORTER_OPTS} ${JAVA_OPTS} ${SENSISION_OPTS} -cp ${WARP10_CP} ${WARP10_CLASS} ${CONFIG_FILES}"
echo "Starting Warp 10 with $WARP10_CMD ..."
exec $WARP10_CMD | tee -a ${WARP10_HOME}/logs/warp10.log

View File

@ -0,0 +1,9 @@
token.write.0.name=writeTokenStatic
token.write.0.producer=42424242-4242-4242-4242-424242424242
token.write.0.owner=42424242-4242-4242-4242-424242424242
token.write.0.app=utapi
token.read.0.name=readTokenStatic
token.read.0.owner=42424242-4242-4242-4242-424242424242
token.read.0.app=utapi

View File

@ -1,6 +1,22 @@
'use strict'; // eslint-disable-line strict
module.exports = {
UtapiServer: require('./lib/server.js'),
UtapiClient: require('./lib/UtapiClient.js'),
UtapiReplay: require('./lib/UtapiReplay.js'),
};
/* eslint-disable global-require */
// eslint-disable-line strict
let toExport;
if (process.env.ENABLE_UTAPI_V2) {
toExport = {
utapiVersion: 2,
startUtapiServer: require('./libV2/server').startServer,
UtapiClient: require('./libV2/client'),
tasks: require('./libV2/tasks'),
};
} else {
toExport = {
utapiVersion: 1,
UtapiServer: require('./lib/server'),
UtapiClient: require('./lib/UtapiClient'),
UtapiReplay: require('./lib/UtapiReplay'),
UtapiReindex: require('./lib/UtapiReindex'),
};
}
module.exports = toExport;

View File

@ -1,46 +1,25 @@
/* eslint-disable no-bitwise */
const assert = require('assert');
const fs = require('fs');
const path = require('path');
/**
* Reads from a config file and returns the content as a config object
*/
class Config {
constructor() {
/*
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
this._basePath = path.resolve(__dirname, '..');
this.path = `${this._basePath}/config.json`;
if (process.env.UTAPI_CONFIG_FILE !== undefined) {
this.path = process.env.UTAPI_CONFIG_FILE;
}
// Read config automatically
this._getConfig();
}
_getConfig() {
let config;
try {
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
config = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
constructor(config) {
this.component = config.component;
this.port = 9500;
if (config.port !== undefined) {
assert(Number.isInteger(config.port) && config.port > 0,
'bad config: port must be a positive integer');
'bad config: port must be a positive integer');
this.port = config.port;
}
this.workers = 10;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
'bad config: workers must be a positive integer');
this.workers = config.workers;
}
@ -48,12 +27,12 @@ class Config {
if (config.log !== undefined) {
if (config.log.logLevel !== undefined) {
assert(typeof config.log.logLevel === 'string',
'bad config: log.logLevel must be a string');
'bad config: log.logLevel must be a string');
this.log.logLevel = config.log.logLevel;
}
if (config.log.dumpLevel !== undefined) {
assert(typeof config.log.dumpLevel === 'string',
'bad config: log.dumpLevel must be a string');
'bad config: log.dumpLevel must be a string');
this.log.dumpLevel = config.log.dumpLevel;
}
}
@ -61,16 +40,17 @@ class Config {
this.healthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
if (config.healthChecks && config.healthChecks.allowFrom) {
assert(Array.isArray(config.healthChecks.allowFrom),
'config: invalid healthcheck configuration. allowFrom must ' +
'be an array');
'config: invalid healthcheck configuration. allowFrom must '
+ 'be an array');
config.healthChecks.allowFrom.forEach(item => {
assert(typeof item === 'string',
'config: invalid healthcheck configuration. allowFrom IP ' +
'address must be a string');
'config: invalid healthcheck configuration. allowFrom IP '
+ 'address must be a string');
});
// augment to the defaults
this.healthChecks.allowFrom = this.healthChecks.allowFrom.concat(
config.healthChecks.allowFrom);
config.healthChecks.allowFrom,
);
}
// default to standalone configuration
this.redis = { host: '127.0.0.1', port: 6379 };
@ -92,6 +72,11 @@ class Config {
'bad config: sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
if (config.redis.sentinelPassword !== undefined) {
assert(typeof config.redis.sentinelPassword === 'string',
'bad config: redis.sentinelPassword must be a string');
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
assert(typeof config.redis.host === 'string',
@ -103,53 +88,56 @@ class Config {
}
if (config.redis.password !== undefined) {
assert(typeof config.redis.password === 'string',
'bad confg: redis.password must be a string');
'bad confg: redis.password must be a string');
this.redis.password = config.redis.password;
}
}
this.vaultd = {};
if (config.vaultd) {
if (config.vaultd.port !== undefined) {
assert(Number.isInteger(config.vaultd.port)
&& config.vaultd.port > 0,
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
if (config.vaultclient) {
// Instance passed from outside
this.vaultclient = config.vaultclient;
this.vaultd = null;
} else {
// Connection data
this.vaultclient = null;
this.vaultd = {};
if (config.vaultd) {
if (config.vaultd.port !== undefined) {
assert(Number.isInteger(config.vaultd.port)
&& config.vaultd.port > 0,
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
}
}
}
if (config.certFilePaths) {
assert(typeof config.certFilePaths === 'object' &&
typeof config.certFilePaths.key === 'string' &&
typeof config.certFilePaths.cert === 'string' && ((
config.certFilePaths.ca &&
typeof config.certFilePaths.ca === 'string') ||
!config.certFilePaths.ca)
);
assert(typeof config.certFilePaths === 'object'
&& typeof config.certFilePaths.key === 'string'
&& typeof config.certFilePaths.cert === 'string' && ((
config.certFilePaths.ca
&& typeof config.certFilePaths.ca === 'string')
|| !config.certFilePaths.ca));
}
const { key, cert, ca } = config.certFilePaths ?
config.certFilePaths : {};
const { key, cert, ca } = config.certFilePaths
? config.certFilePaths : {};
if (key && cert) {
const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`;
const certpath = (cert[0] === '/') ?
cert : `${this._basePath}/${cert}`;
let capath = undefined;
const keypath = key;
const certpath = cert;
let capath;
if (ca) {
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
assert.doesNotThrow(() =>
fs.accessSync(capath, fs.F_OK | fs.R_OK),
capath = ca;
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`);
}
assert.doesNotThrow(() =>
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
assert.doesNotThrow(() => fs.accessSync(keypath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${keypath}`);
assert.doesNotThrow(() =>
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
assert.doesNotThrow(() => fs.accessSync(certpath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${certpath}`);
this.https = {
cert: fs.readFileSync(certpath, 'ascii'),
@ -161,16 +149,21 @@ class Config {
cert: certpath,
};
} else if (key || cert) {
throw new Error('bad config: both certFilePaths.key and ' +
'certFilePaths.cert must be defined');
throw new Error('bad config: both certFilePaths.key and '
+ 'certFilePaths.cert must be defined');
}
if (config.expireMetrics !== undefined) {
assert(typeof config.expireMetrics === 'boolean', 'bad config: ' +
'expireMetrics must be a boolean');
assert(typeof config.expireMetrics === 'boolean', 'bad config: '
+ 'expireMetrics must be a boolean');
this.expireMetrics = config.expireMetrics;
}
return config;
if (config.onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof config.onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
}
}
}
module.exports = new Config();
module.exports = Config;

View File

@ -33,11 +33,14 @@ class Datastore {
* @return {undefined}
*/
set(key, value, cb) {
return this._client.set(key, value, cb);
return this._client.call(
(backend, done) => backend.set(key, value, done),
cb,
);
}
/**
* Set the replay lock key (if it does not already exist) with an expiration
* Set a lock key, if it does not already exist, with an expiration
* @param {string} key - key to set with an expiration
* @param {string} value - value containing the data
* @param {string} ttl - time after which the key expires
@ -45,7 +48,7 @@ class Datastore {
*/
setExpire(key, value, ttl) {
// This method is a Promise because no callback is given.
return this._client.set(key, value, 'EX', ttl, 'NX');
return this._client.call(backend => backend.set(key, value, 'EX', ttl, 'NX'));
}
/**
@ -54,7 +57,8 @@ class Datastore {
* @return {undefined}
*/
del(key) {
return this._client.del(key);
// This method is a Promise because no callback is given.
return this._client.call(backend => backend.del(key));
}
/**
@ -64,7 +68,7 @@ class Datastore {
* @return {undefined}
*/
get(key, cb) {
return this._client.get(key, cb);
return this._client.call((backend, done) => backend.get(key, done), cb);
}
/**
@ -74,7 +78,18 @@ class Datastore {
* @return {undefined}
*/
incr(key, cb) {
return this._client.incr(key, cb);
return this._client.call((backend, done) => backend.incr(key, done), cb);
}
/**
* increment value of a key by the provided value
* @param {string} key - key holding the value
* @param {string} value - value containing the data
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, value, cb) {
return this._client.call((backend, done) => backend.incrby(key, value, done), cb);
}
/**
@ -84,7 +99,7 @@ class Datastore {
* @return {undefined}
*/
decr(key, cb) {
return this._client.decr(key, cb);
return this._client.call((backend, done) => backend.decr(key, done), cb);
}
/**
@ -95,7 +110,7 @@ class Datastore {
* @return {undefined}
*/
decrby(key, value, cb) {
return this._client.decrby(key, value, cb);
return this._client.call((backend, done) => backend.decrby(key, value, done), cb);
}
/**
@ -107,7 +122,7 @@ class Datastore {
* @return {undefined}
*/
zadd(key, score, value, cb) {
return this._client.zadd(key, score, value, cb);
return this._client.call((backend, done) => backend.zadd(key, score, value, done), cb);
}
/**
@ -120,7 +135,7 @@ class Datastore {
* @return {undefined}
*/
zrange(key, min, max, cb) {
return this._client.zrange(key, min, max, cb);
return this._client.call((backend, done) => backend.zrange(key, min, max, done), cb);
}
/**
@ -133,7 +148,7 @@ class Datastore {
* @return {undefined}
*/
zrangebyscore(key, min, max, cb) {
return this._client.zrangebyscore(key, min, max, cb);
return this._client.call((backend, done) => backend.zrangebyscore(key, min, max, done), cb);
}
/**
@ -146,8 +161,12 @@ class Datastore {
* @return {undefined}
*/
bZrangebyscore(keys, min, max, cb) {
return this._client.pipeline(keys.map(
item => ['zrangebyscore', item, min, max])).exec(cb);
return this._client.call(
(backend, done) => backend
.pipeline(keys.map(item => ['zrangebyscore', item, min, max]))
.exec(done),
cb,
);
}
/**
@ -157,7 +176,9 @@ class Datastore {
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.multi(cmds).exec(cb);
return this._client.call((backend, done) => {
backend.multi(cmds).exec(done);
}, cb);
}
/**
@ -167,7 +188,7 @@ class Datastore {
* @return {undefined}
*/
pipeline(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
return this._client.call((backend, done) => backend.pipeline(cmds).exec(done), cb);
}
/**
@ -177,20 +198,21 @@ class Datastore {
* @return {undefined}
*/
multi(cmds, cb) {
return this._client.multi(cmds).exec((err, res) => {
if (err) {
return cb(err);
}
const flattenRes = [];
const resErr = res.filter(item => {
flattenRes.push(item[1]);
return item[0] !== null;
});
if (resErr && resErr.length > 0) {
return cb(resErr);
}
return cb(null, flattenRes);
});
return this._client.call((backend, done) =>
backend.multi(cmds).exec((err, res) => {
if (err) {
return done(err);
}
const flattenRes = [];
const resErr = res.filter(item => {
flattenRes.push(item[1]);
return item[0] !== null;
});
if (resErr && resErr.length > 0) {
return done(resErr);
}
return done(null, flattenRes);
}), cb);
}
/**
@ -203,7 +225,7 @@ class Datastore {
* @return {undefined}
*/
zremrangebyscore(key, min, max, cb) {
return this._client.zremrangebyscore(key, min, max, cb);
return this._client.call((backend, done) => backend.zremrangebyscore(key, min, max, done), cb);
}
/**
@ -214,7 +236,7 @@ class Datastore {
* @return {undefined}
*/
lpush(key, val, cb) {
return this._client.lpush(key, val, cb);
return this._client.call((backend, done) => backend.lpush(key, val, done), cb);
}
/**
@ -224,7 +246,7 @@ class Datastore {
* @return {undefined}
*/
rpop(key, cb) {
return this._client.rpop(key, cb);
return this._client.call((backend, done) => backend.rpop(key, done), cb);
}
/**
@ -236,7 +258,7 @@ class Datastore {
* @return {undefined}
*/
lrange(key, start, stop, cb) {
return this._client.lrange(key, start, stop, cb);
return this._client.call((backend, done) => backend.lrange(key, start, stop, done), cb);
}
/**
@ -246,7 +268,7 @@ class Datastore {
* @return {undefined}
*/
llen(key, cb) {
return this._client.llen(key, cb);
return this._client.call((backend, done) => backend.llen(key, done), cb);
}
/**
@ -257,7 +279,7 @@ class Datastore {
* @return {undefined}
*/
publish(channel, message, cb) {
return this._client.publish(channel, message, cb);
return this._client.call((backend, done) => backend.publish(channel, message, done), cb);
}
/**
@ -268,7 +290,7 @@ class Datastore {
* @return {undefined}
*/
scan(cursor, pattern, cb) {
return this._client.scan(cursor, 'match', pattern, cb);
return this._client.call((backend, done) => backend.scan(cursor, 'match', pattern, done), cb);
}
}

View File

@ -1,15 +1,18 @@
/* eslint-disable prefer-spread */
/* eslint-disable prefer-destructuring */
/* eslint-disable class-methods-use-this */
/* eslint-disable no-mixed-operators */
const async = require('async');
const { errors } = require('arsenal');
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
const s3metricResponseJSON = require('../models/s3metricResponse');
const config = require('./Config');
const Vault = require('./Vault');
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
/**
* Provides methods to get metrics of different levels
*/
class ListMetrics {
/**
* Assign the metric property to an instance of this class
* @param {string} metric - The metric type (e.g., 'buckets', 'accounts')
@ -18,7 +21,6 @@ class ListMetrics {
constructor(metric, component) {
this.metric = metric;
this.service = component;
this.vault = new Vault(config);
}
/**
@ -78,9 +80,10 @@ class ListMetrics {
const resources = validator.get(this.metric);
const timeRange = validator.get('timeRange');
const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return this.vault.getCanonicalIds(resources, log, (err, list) => {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
@ -91,16 +94,13 @@ class ListMetrics {
return next(err);
}
return next(null, Object.assign({}, res,
{ accountId: item.accountId }));
{ accountId: item.accountId }));
}),
cb
);
cb);
});
}
return async.mapLimit(resources, 5, (resource, next) =>
this.getMetrics(resource, timeRange, datastore, log,
next), cb
);
return async.mapLimit(resources, 5, (resource, next) => this.getMetrics(resource, timeRange, datastore, log,
next), cb);
}
/**
@ -122,10 +122,29 @@ class ListMetrics {
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
const timeRange = [start - fifteenMinutes, end];
const datastore = utapiRequest.getDatastore();
async.mapLimit(resources, 5, (resource, next) =>
this.getMetrics(resource, timeRange, datastore, log,
next), cb
);
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
return async.mapLimit(list.message.body, 5,
(item, next) => this.getMetrics(item.canonicalId, timeRange,
datastore, log, (err, res) => {
if (err) {
return next(err);
}
return next(null, Object.assign({}, res,
{ accountId: item.accountId }));
}),
cb);
});
}
return async.mapLimit(resources, 5, (resource, next) => this.getMetrics(resource, timeRange, datastore, log,
next), cb);
}
/**
@ -142,11 +161,60 @@ class ListMetrics {
res.push(last);
const d = new Date(last);
last = d.setMinutes(d.getMinutes() + 15);
if (process.env.UTAPI_INTERVAL_TEST_MODE === 'true') {
last = d.setSeconds(d.getSeconds() + 15);
}
}
res.push(end);
return res;
}
_buildSubRanges(range) {
let start = range[0];
const end = range[1] || Date.now();
const subRangesCount = Math.floor((end - start) / MAX_RANGE_MS) + 1;
const subRanges = [];
// eslint-disable-next-line no-plusplus
for (let i = 0; i < subRangesCount; i++) {
if (i + 1 === subRangesCount) {
subRanges.push([start, end]);
} else {
subRanges.push([start, (start + (MAX_RANGE_MS - 1))]);
start += MAX_RANGE_MS;
}
}
return subRanges;
}
_reduceResults(results) {
const reducer = (accumulator, current) => {
const result = Object.assign({}, accumulator);
result.timeRange[1] = current.timeRange[1];
result.storageUtilized[1] = current.storageUtilized[1];
result.numberOfObjects[1] = current.numberOfObjects[1];
result.incomingBytes += current.incomingBytes;
result.outgoingBytes += current.outgoingBytes;
const operations = Object.keys(result.operations);
operations.forEach(operation => {
result.operations[operation] += current.operations[operation];
});
return result;
};
return results.reduce(reducer);
}
getMetrics(resource, range, datastore, log, cb) {
const ranges = this._buildSubRanges(range);
async.mapLimit(ranges, 5, (subRange, next) => this._getMetricsRange(resource, subRange, datastore, log, next),
(err, results) => {
if (err) {
return cb(err);
}
const response = this._reduceResults(results);
return cb(null, response);
});
}
/**
* Callback for getting metrics for a single resource
* @callback ListMetrics~getMetricsCb
@ -178,9 +246,9 @@ class ListMetrics {
* @param {ListMetrics~getMetricsCb} cb - callback
* @return {undefined}
*/
getMetrics(resource, range, datastore, log, cb) {
_getMetricsRange(resource, range, datastore, log, cb) {
const start = range[0];
const end = range[1] || Date.now();
const end = range[1];
const obj = this._getSchemaObject(resource);
// find nearest neighbors for absolutes
@ -196,7 +264,8 @@ class ListMetrics {
'-inf', 'LIMIT', '0', '1'];
const timestampRange = this._getTimestampRange(start, end);
const metricKeys = [].concat.apply([], timestampRange.map(
i => getKeys(obj, i)));
i => getKeys(obj, i),
));
const cmds = metricKeys.map(item => ['get', item]);
cmds.push(storageUtilizedStart, storageUtilizedEnd,
numberOfObjectsStart, numberOfObjectsEnd);
@ -243,10 +312,10 @@ class ListMetrics {
});
if (!areMetricsPositive) {
return cb(errors.InternalError.customizeDescription(
'Utapi is in a transient state for this time period as ' +
'metrics are being collected. Please try again in a few ' +
'minutes.'));
log.info('negative metric value found', {
error: resource,
method: 'ListMetrics.getMetrics',
});
}
/**
* Batch result is of the format
@ -271,8 +340,8 @@ class ListMetrics {
if (m === 'incomingBytes' || m === 'outgoingBytes') {
metricResponse[m] += count;
} else {
metricResponse.operations[`${this.service}:${m}`] +=
count;
metricResponse.operations[`${this.service}:${m}`]
+= count;
}
}
});

View File

@ -1,10 +1,14 @@
/* eslint-disable prefer-destructuring */
/* eslint-disable class-methods-use-this */
/* eslint-disable no-mixed-operators */
const assert = require('assert');
const { doUntil, parallel } = require('async');
const werelogs = require('werelogs');
const { errors } = require('arsenal');
const Datastore = require('./Datastore');
const { generateKey, generateCounter, generateStateKey } = require('./schema');
const { errors } = require('arsenal');
const redisClient = require('../utils/redisClient');
const redisClientv2 = require('../utils/redisClientv2');
const member = require('../utils/member');
const methods = {
createBucket: { method: '_genericPushMetric', changesData: true },
@ -19,47 +23,83 @@ const methods = {
getBucketWebsite: { method: '_genericPushMetric', changesData: false },
getBucketLocation: { method: '_genericPushMetric', changesData: false },
deleteBucketWebsite: { method: '_genericPushMetric', changesData: true },
uploadPart: { method: '_pushMetricUploadPart', changesData: true },
initiateMultipartUpload: { method: '_genericPushMetric',
changesData: true },
completeMultipartUpload: { method: '_pushMetricCompleteMultipartUpload',
changesData: true },
listMultipartUploads: { method: '_pushMetricListBucketMultipartUploads',
changesData: false },
listMultipartUploadParts: { method: '_genericPushMetric',
changesData: false },
abortMultipartUpload: { method: '_genericPushMetricDeleteObject',
changesData: true },
deleteObject: { method: '_genericPushMetricDeleteObject',
changesData: true },
multiObjectDelete: { method: '_genericPushMetricDeleteObject',
changesData: true },
uploadPart: { method: '_genericPushMetricUploadPart', changesData: true },
uploadPartCopy: {
method: '_genericPushMetricUploadPart',
changesData: true,
},
initiateMultipartUpload: {
method: '_genericPushMetric',
changesData: true,
},
completeMultipartUpload: {
method: '_pushMetricCompleteMultipartUpload',
changesData: true,
},
listMultipartUploads: {
method: '_pushMetricListBucketMultipartUploads',
changesData: false,
},
listMultipartUploadParts: {
method: '_genericPushMetric',
changesData: false,
},
abortMultipartUpload: {
method: '_genericPushMetricDeleteObject',
changesData: true,
},
deleteObject: {
method: '_genericPushMetricDeleteObject',
changesData: true,
},
multiObjectDelete: {
method: '_genericPushMetricDeleteObject',
changesData: true,
},
getObject: { method: '_pushMetricGetObject', changesData: false },
getObjectAcl: { method: '_genericPushMetric', changesData: false },
getObjectLegalHold: { method: '_genericPushMetric', changesData: false },
getObjectRetention: { method: '_genericPushMetric', changesData: false },
getObjectTagging: { method: '_genericPushMetric', changesData: false },
putObject: { method: '_genericPushMetricPutObject', changesData: true },
copyObject: { method: '_genericPushMetricPutObject', changesData: true },
putObjectAcl: { method: '_genericPushMetric', changesData: true },
putObjectLegalHold: { method: '_genericPushMetric', changesData: true },
putObjectRetention: { method: '_genericPushMetric', changesData: true },
putObjectTagging: { method: '_genericPushMetric', changesData: true },
deleteObjectTagging: { method: '_genericPushMetric', changesData: true },
headBucket: { method: '_genericPushMetric', changesData: false },
headObject: { method: '_genericPushMetric', changesData: false },
putBucketVersioning: { method: '_genericPushMetric', changesData: true },
getBucketVersioning: { method: '_genericPushMetric', changesData: false },
putDeleteMarkerObject: { method: '_pushMetricDeleteMarkerObject',
changesData: true },
putBucketReplication: { method: '_genericPushMetric',
changesData: true },
getBucketReplication: { method: '_genericPushMetric',
changesData: false },
deleteBucketReplication: { method: '_genericPushMetric',
changesData: true },
putDeleteMarkerObject: {
method: '_pushMetricDeleteMarkerObject',
changesData: true,
},
putBucketReplication: {
method: '_genericPushMetric',
changesData: true,
},
getBucketReplication: {
method: '_genericPushMetric',
changesData: false,
},
deleteBucketReplication: {
method: '_genericPushMetric',
changesData: true,
},
putBucketObjectLock: { method: '_genericPushMetric', changesData: true },
getBucketObjectLock: { method: '_genericPushMetric', changesData: true },
replicateObject: { method: '_genericPushMetricPutObject', changesData: true },
replicateTags: { method: '_genericPushMetric', changesData: true },
replicateDelete: { method: '_pushMetricDeleteMarkerObject', changesData: true },
};
const metricObj = {
buckets: 'bucket',
accounts: 'accountId',
users: 'userId',
location: 'location',
};
class UtapiClient {
@ -83,35 +123,44 @@ class UtapiClient {
const api = (config || {}).logApi || werelogs;
this.log = new api.Logger('UtapiClient');
// By default, we push all resource types
this.metrics = ['buckets', 'accounts', 'users', 'service'];
this.metrics = ['buckets', 'accounts', 'users', 'service', 'location'];
this.service = 's3';
this.disableOperationCounters = false;
this.enabledOperationCounters = [];
this.disableClient = true;
if (config) {
if (config && !config.disableClient) {
this.disableClient = false;
this.expireMetrics = config.expireMetrics;
this.expireMetricsTTL = config.expireMetricsTTL || 0;
if (config.metrics) {
const message = 'invalid property in UtapiClient configuration';
assert(Array.isArray(config.metrics), `${message}: metrics ` +
'must be an array');
assert(config.metrics.length !== 0, `${message}: metrics ` +
'array cannot be empty');
assert(Array.isArray(config.metrics), `${message}: metrics `
+ 'must be an array');
assert(config.metrics.length !== 0, `${message}: metrics `
+ 'array cannot be empty');
this.metrics = config.metrics;
}
if (config.redis) {
this.ds = new Datastore()
.setClient(redisClient(config.redis, this.log));
.setClient(redisClientv2(config.redis, this.log));
}
if (config.localCache) {
this.localCache = new Datastore()
.setClient(redisClient(config.localCache, this.log));
.setClient(redisClientv2(config.localCache, this.log));
}
if (config.component) {
// The configuration uses the property `component`, while
// internally this is known as a metric level `service`.
this.service = config.component;
}
this.disableClient = false;
this.expireMetrics = config.expireMetrics;
this.expireTTL = config.expireTTL || 0;
if (config.disableOperationCounters) {
this.disableOperationCounters = config.disableOperationCounters;
}
if (config.enabledOperationCounters) {
this.enabledOperationCounters = config.enabledOperationCounters;
}
}
}
@ -123,6 +172,10 @@ class UtapiClient {
static getNormalizedTimestamp() {
const d = new Date();
const minutes = d.getMinutes();
if (process.env.UTAPI_INTERVAL_TEST_MODE === 'true') {
const seconds = d.getSeconds();
return d.setSeconds((seconds - seconds % 15), 0, 0);
}
return d.setMinutes((minutes - minutes % 15), 0, 0);
}
@ -137,6 +190,13 @@ class UtapiClient {
return this;
}
_isCounterEnabled(action) {
if (this.enabledOperationCounters.length > 0) {
return this.enabledOperationCounters.some(counter => counter.toLowerCase() === action.toLowerCase());
}
return this.disableOperationCounters === false;
}
/*
* Utility function to use when callback is not defined
*/
@ -154,16 +214,21 @@ class UtapiClient {
*/
_pushLocalCache(params, operation, timestamp, log, cb) {
// 'listMultipartUploads' has a different name in the metric response.
const action = operation === 'listBucketMultipartUploads' ?
'listMultipartUploads' : operation;
const logObject = { method: 'UtapiClient._pushLocalCache', action,
params };
const action = operation === 'listBucketMultipartUploads'
? 'listMultipartUploads' : operation;
const logObject = {
method: 'UtapiClient._pushLocalCache',
action,
params,
};
if (!this.localCache) {
log.fatal('failed to push metrics', logObject);
return cb(errors.InternalError);
}
const reqUid = log.getSerializedUids();
const value = JSON.stringify({ action, reqUid, params, timestamp });
const value = JSON.stringify({
action, reqUid, params, timestamp,
});
return this.localCache.lpush('s3:utapireplay', value, err => {
if (err) {
log.error('error inserting data in local cache', logObject);
@ -174,7 +239,7 @@ class UtapiClient {
});
}
/**
/**
* Check the types of `params` object properties. This enforces object
* properties for particular push metric calls.
* @param {object} params - params object with metric data
@ -189,15 +254,15 @@ class UtapiClient {
*/
_checkProperties(params, properties = []) {
properties.forEach(prop => {
assert(params[prop] !== undefined, 'Metric object must include ' +
`${prop} property`);
assert(params[prop] !== undefined, 'Metric object must include '
+ `${prop} property`);
if (prop === 'oldByteLength') {
assert(typeof params[prop] === 'number' ||
params[prop] === null, 'oldByteLength property must be ' +
'an integer or `null`');
assert(typeof params[prop] === 'number'
|| params[prop] === null, 'oldByteLength property must be '
+ 'an integer or `null`');
} else {
assert(typeof params[prop] === 'number', `${prop} property ` +
'must be an integer');
assert(typeof params[prop] === 'number', `${prop} property `
+ 'must be an integer');
}
});
}
@ -216,9 +281,9 @@ class UtapiClient {
// Object of metric types and their associated property names
this.metrics.forEach(level => {
const propName = metricObj[level];
assert(typeof params[propName] === 'string' ||
params[propName] === undefined,
`${propName} must be a string`);
assert(typeof params[propName] === 'string'
|| params[propName] === undefined,
`${propName} must be a string`);
});
}
@ -254,8 +319,9 @@ class UtapiClient {
_getParamsArr(params) {
this._checkMetricTypes(params);
const props = [];
const { byteLength, newByteLength, oldByteLength, numberOfObjects } =
params;
const {
byteLength, newByteLength, oldByteLength, numberOfObjects,
} = params;
// We add a `service` property to any non-service level to be able to
// build the appropriate schema key.
this.metrics.forEach(level => {
@ -348,10 +414,11 @@ class UtapiClient {
}
if (methods[metric].changesData) {
return this._publishEvent(metric, params,
log, callback);
log, callback);
}
return callback();
});
},
);
}
log.debug(`UtapiClient::pushMetric: ${metric} unsupported`);
return callback();
@ -369,6 +436,9 @@ class UtapiClient {
* @return {undefined}
*/
_genericPushMetric(params, timestamp, action, log, callback) {
if (!this._isCounterEnabled(action)) {
return process.nextTick(callback);
}
this._checkProperties(params);
this._logMetric(params, '_genericPushMetric', timestamp, log);
const cmds = this._getParamsArr(params)
@ -424,30 +494,30 @@ class UtapiClient {
return done();
}),
// if cursor is 0, it reached end of scan
() => cursor === '0',
err => callback(err, keys)
cb => cb(null, cursor === '0'),
err => callback(err, keys),
);
}
_expireMetrics(keys, log, callback) {
// expire metrics here
const expireCmds = keys.map(k => ['expire', k, this.expireTTL]);
const expireCmds = keys.map(k => ['expire', k, this.expireMetricsTTL]);
return this.ds.multi(expireCmds, (err, result) => {
if (err) {
const logParam = Array.isArray(err) ? { errorList: err } :
{ error: err };
const logParam = Array.isArray(err) ? { errorList: err }
: { error: err };
log.error('error expiring metrics', logParam);
return callback(err);
}
// each delete command gets a score 1 if it's a success,
// should match the total commands sent for deletion
const allKeysDeleted =
keys.length === result.reduce((a, v) => a + v, 0);
const allKeysDeleted = keys.length === result.reduce((a, v) => a + v, 0);
if (!allKeysDeleted) {
log.debug('error expiring keys', { delResult: result });
return callback(
errors.InternalError.customizeDescription(
'error expiring some keys')
'error expiring some keys',
),
);
}
return callback();
@ -472,12 +542,15 @@ class UtapiClient {
log);
const cmds = [];
const paramsArr = this._getParamsArr(params);
paramsArr.forEach(p => cmds.push(
['incr', generateCounter(p, 'numberOfObjectsCounter')],
['incr', generateKey(p, 'deleteObject', timestamp)]));
// We track the number of commands needed for each `paramsArr` property
// to eventually locate each group in the results from Redis.
const commandsGroupSize = 2;
paramsArr.forEach(p => {
cmds.push(['incr', generateCounter(p, 'numberOfObjectsCounter')]);
const counterAction = action === 'putDeleteMarkerObject' ? 'deleteObject' : action;
if (this._isCounterEnabled(counterAction)) {
cmds.push(['incr', generateKey(p, counterAction, timestamp)]);
}
cmds.push(['zrangebyscore', generateStateKey(p, 'storageUtilized'), timestamp, timestamp]);
});
return this.ds.batch(cmds, (err, results) => {
if (err) {
log.error('error pushing metric', {
@ -487,9 +560,13 @@ class UtapiClient {
return this._pushLocalCache(params, action, timestamp, log, cb);
}
const cmds2 = [];
// We track the number of commands needed for each `paramsArr`
// property to eventually locate each group in the results from
// Redis.
const commandsGroupSize = (cmds.length / paramsArr.length);
const noErr = paramsArr.every((p, i) => {
// We want the first element of every group of two commands
// returned from Redis. This contains the value of the
// We want the first element of every group of commands returned
// from Redis. This contains the value of the
// numberOfObjectsCounter after it has been incremented.
const index = i * commandsGroupSize;
const actionErr = results[index][0];
@ -505,14 +582,50 @@ class UtapiClient {
let actionCounter = parseInt(results[index][1], 10);
// If < 0 or NaN, record numberOfObjects as though bucket were
// empty.
actionCounter = Number.isNaN(actionCounter) ||
actionCounter < 0 ? 1 : actionCounter;
actionCounter = Number.isNaN(actionCounter)
|| actionCounter < 0 ? 1 : actionCounter;
if (Number.isInteger(params.byteLength)) {
/* byteLength is passed in from cloudserver under the follow conditions:
* - bucket versioning is suspended
* - object version id is null
* - the content length of the object exists
* In this case, the master key is deleted and replaced with a delete marker.
* The decrement accounts for the deletion of the master key when utapi reports
* on the number of objects.
*/
actionCounter -= 1;
}
const key = generateStateKey(p, 'numberOfObjects');
const byteArr = results[index + commandsGroupSize - 1][1];
const oldByteLength = byteArr ? parseInt(byteArr[0], 10) : 0;
const newByteLength = member.serialize(Math.max(0, oldByteLength - params.byteLength));
cmds2.push(
['zremrangebyscore', key, timestamp, timestamp],
['zadd', key, timestamp, actionCounter]);
['zadd', key, timestamp, member.serialize(actionCounter)],
);
if (Number.isInteger(params.byteLength)) {
cmds2.push(
['decr', generateCounter(p, 'numberOfObjectsCounter')],
['decrby', generateCounter(p, 'storageUtilizedCounter'), params.byteLength],
);
}
if (byteArr) {
cmds2.push(
['zremrangebyscore', generateStateKey(p, 'storageUtilized'), timestamp, timestamp],
['zadd', generateStateKey(p, 'storageUtilized'), timestamp, newByteLength],
);
}
return true;
});
if (noErr) {
return this.ds.batch(cmds2, cb);
}
@ -521,7 +634,7 @@ class UtapiClient {
}
/**
* Updates counter for UploadPart action
* Updates counter for UploadPart or UploadPartCopy action
* @param {object} params - params for the metrics
* @param {string} [params.bucket] - (optional) bucket name
* @param {string} [params.accountId] - (optional) account ID
@ -532,9 +645,9 @@ class UtapiClient {
* @param {callback} callback - callback to call
* @return {undefined}
*/
_pushMetricUploadPart(params, timestamp, action, log, callback) {
_genericPushMetricUploadPart(params, timestamp, action, log, callback) {
this._checkProperties(params, ['newByteLength', 'oldByteLength']);
this._logMetric(params, '_pushMetricUploadPart', timestamp, log);
this._logMetric(params, '_genericPushMetricUploadPart', timestamp, log);
const cmds = [];
const { newByteLength, oldByteLength } = params;
const oldObjSize = oldByteLength === null ? 0 : oldByteLength;
@ -546,14 +659,16 @@ class UtapiClient {
storageUtilizedDelta],
['incrby', generateKey(p, 'incomingBytes', timestamp),
newByteLength],
['incr', generateKey(p, action, timestamp)]
);
if (this._isCounterEnabled(action)) {
cmds.push(['incr', generateKey(p, action, timestamp)]);
}
});
// update counters
return this.ds.batch(cmds, (err, results) => {
if (err) {
log.error('error pushing metric', {
method: 'UtapiClient._pushMetricUploadPart',
method: 'UtapiClient._genericPushMetricUploadPart',
error: err,
});
return this._pushLocalCache(params, action, timestamp, log,
@ -574,11 +689,11 @@ class UtapiClient {
actionErr = results[index][0];
actionCounter = parseInt(results[index][1], 10);
// If < 0, record storageUtilized as though bucket were empty.
actionCounter = actionCounter < 0 ? storageUtilizedDelta :
actionCounter;
actionCounter = actionCounter < 0 ? storageUtilizedDelta
: actionCounter;
if (actionErr) {
log.error('error incrementing counter for push metric', {
method: 'UtapiClient._pushMetricUploadPart',
method: 'UtapiClient._genericPushMetricUploadPart',
metric: 'storage utilized',
error: actionErr,
});
@ -590,7 +705,7 @@ class UtapiClient {
['zremrangebyscore', generateStateKey(p, 'storageUtilized'),
timestamp, timestamp],
['zadd', generateStateKey(p, 'storageUtilized'),
timestamp, actionCounter]
timestamp, member.serialize(actionCounter)],
);
return true;
});
@ -601,6 +716,57 @@ class UtapiClient {
});
}
_multipartUploadOverwrite(params, timestamp, action, log, cb) {
const counterCommands = [];
const levels = this._getParamsArr(params);
levels.forEach(level => {
const key = generateCounter(level, 'storageUtilizedCounter');
counterCommands.push(['decrby', key, params.oldByteLength]);
if (this._isCounterEnabled(action)) {
const key = generateKey(level, action, timestamp);
counterCommands.push(['incr', key]);
}
});
return this.ds.batch(counterCommands, (err, res) => {
if (err) {
log.error('error decrementing counter for push metric', {
method: 'UtapiClient._multipartUploadOverwrite',
error: err,
});
return this._pushLocalCache(params, action, timestamp, log, cb);
}
const commandErr = res.find(i => i[0]);
if (commandErr) {
log.error('error decrementing counter for push metric', {
method: 'UtapiClient._multipartUploadOverwrite',
error: commandErr,
});
return this._pushLocalCache(params, action, timestamp, log, cb);
}
const sortedSetCommands = [];
levels.forEach((level, i) => {
const key = generateStateKey(level, 'storageUtilized');
// We want the result of the storage utilized counter update
// that is the first of every group of levels.
const groupSize = counterCommands.length / levels.length;
const value = res[i * groupSize][1];
const storageUtilized = Number.parseInt(value, 10);
sortedSetCommands.push(
['zremrangebyscore', key, timestamp, timestamp],
['zadd', key, timestamp, member.serialize(storageUtilized)],
);
});
return this.ds.batch(sortedSetCommands, cb);
});
}
/**
* Updates counter for CompleteMultipartUpload action
* @param {object} params - params for the metrics
@ -617,17 +783,25 @@ class UtapiClient {
this._checkProperties(params);
this._logMetric(params, '_pushMetricCompleteMultipartUpload', timestamp,
log);
// Is the MPU completion overwriting an object?
if (params.oldByteLength !== null) {
return this._multipartUploadOverwrite(
params, timestamp, action, log, callback,
);
}
const paramsArr = this._getParamsArr(params);
const cmds = [];
paramsArr.forEach(p => {
cmds.push(
['incr', generateCounter(p, 'numberOfObjectsCounter')],
['incr', generateKey(p, action, timestamp)]
);
cmds.push(['incr', generateCounter(p, 'numberOfObjectsCounter')]);
if (this._isCounterEnabled(action)) {
cmds.push(['incr', generateKey(p, action, timestamp)]);
}
});
// We track the number of commands needed for each `paramsArr` object to
// eventually locate each group in the results from Redis.
const commandsGroupSize = 2;
const commandsGroupSize = (cmds.length / paramsArr.length);
return this.ds.batch(cmds, (err, results) => {
if (err) {
log.error('error incrementing counter for push metric', {
@ -649,12 +823,12 @@ class UtapiClient {
const index = i * commandsGroupSize;
actionErr = results[index][0];
actionCounter = parseInt(results[index][1], 10);
// If < 0, record numberOfObjects as though bucket were empty.
// If < 0, record numberOfObjects as though bucket were empty.
actionCounter = actionCounter < 0 ? 1 : actionCounter;
if (actionErr) {
log.error('error incrementing counter for push metric', {
method: 'UtapiClient._pushMetricCompleteMultipart' +
'Upload',
method: 'UtapiClient._pushMetricCompleteMultipart'
+ 'Upload',
metric: 'number of objects',
error: actionErr,
});
@ -664,7 +838,7 @@ class UtapiClient {
}
key = generateStateKey(p, 'numberOfObjects');
cmds2.push(['zremrangebyscore', key, timestamp, timestamp],
['zadd', key, timestamp, actionCounter]);
['zadd', key, timestamp, member.serialize(actionCounter)]);
return true;
});
if (noErr) {
@ -706,8 +880,8 @@ class UtapiClient {
* @return {undefined}
*/
_genericPushMetricDeleteObject(params, timestamp, action, log, callback) {
const expectedProps = action === 'abortMultipartUpload' ?
['byteLength'] : ['byteLength', 'numberOfObjects'];
const expectedProps = action === 'abortMultipartUpload'
? ['byteLength'] : ['byteLength', 'numberOfObjects'];
this._checkProperties(params, expectedProps);
const { byteLength, numberOfObjects } = params;
this._logMetric(params, '_genericPushMetricDeleteObject', timestamp,
@ -720,8 +894,10 @@ class UtapiClient {
cmds.push(
['decrby', generateCounter(p, 'storageUtilizedCounter'),
byteLength],
['incr', generateKey(p, action, timestamp)]
);
if (this._isCounterEnabled(action)) {
cmds.push(['incr', generateKey(p, action, timestamp)]);
}
// The 'abortMultipartUpload' action affects only storage utilized,
// so number of objects remains unchanged.
if (action !== 'abortMultipartUpload') {
@ -731,7 +907,7 @@ class UtapiClient {
});
// We track the number of commands needed for each `paramsArr` object to
// eventually locate each group in the results from Redis.
const commandsGroupSize = action !== 'abortMultipartUpload' ? 3 : 2;
const commandsGroupSize = (cmds.length / paramsArr.length);
return this.ds.batch(cmds, (err, results) => {
if (err) {
log.error('error incrementing counter', {
@ -776,15 +952,16 @@ class UtapiClient {
timestamp, timestamp],
['zadd',
generateStateKey(p, 'storageUtilized'), timestamp,
actionCounter]);
member.serialize(actionCounter)],
);
// The 'abortMultipartUpload' action does not affect number of
// objects, so we return here.
if (action === 'abortMultipartUpload') {
return true;
}
// The number of objects counter result is the third element of
// each group of commands. Thus we add two.
const numberOfObjectsResult = currentResultsGroup + 2;
// The number of objects counter result is the last element of
// each group of commands.
const numberOfObjectsResult = currentResultsGroup + (commandsGroupSize - 1);
actionErr = results[numberOfObjectsResult][0];
actionCounter = parseInt(results[numberOfObjectsResult][1], 10);
// If < 0, record numberOfObjects as though bucket were empty.
@ -806,7 +983,8 @@ class UtapiClient {
generateStateKey(p, 'numberOfObjects'), timestamp,
timestamp],
['zadd', generateStateKey(p, 'numberOfObjects'), timestamp,
actionCounter]);
member.serialize(actionCounter)],
);
return true;
});
if (noErr) {
@ -838,8 +1016,10 @@ class UtapiClient {
cmds.push(
['incrby', generateKey(p, 'outgoingBytes', timestamp),
newByteLength],
['incr', generateKey(p, action, timestamp)]
);
if (this._isCounterEnabled(action)) {
cmds.push(['incr', generateKey(p, action, timestamp)]);
}
});
// update counters
return this.ds.batch(cmds, err => {
@ -886,12 +1066,14 @@ class UtapiClient {
['incrby', generateCounter(p, 'storageUtilizedCounter'),
storageUtilizedDelta],
[redisCmd, generateCounter(p, 'numberOfObjectsCounter')],
['incr', generateKey(p, action, timestamp)]
);
if (action === 'putObject') {
if (this._isCounterEnabled(action)) {
cmds.push(['incr', generateKey(p, action, timestamp)]);
}
if (action === 'putObject' || action === 'replicateObject') {
cmds.push(
['incrby', generateKey(p, 'incomingBytes', timestamp),
newByteLength]
newByteLength],
);
}
});
@ -919,8 +1101,8 @@ class UtapiClient {
actionErr = results[storageIndex][0];
actionCounter = parseInt(results[storageIndex][1], 10);
// If < 0, record storageUtilized as though bucket were empty.
actionCounter = actionCounter < 0 ? storageUtilizedDelta :
actionCounter;
actionCounter = actionCounter < 0 ? storageUtilizedDelta
: actionCounter;
if (actionErr) {
log.error('error incrementing counter for push metric', {
method: 'UtapiClient._genericPushMetricPutObject',
@ -936,7 +1118,8 @@ class UtapiClient {
generateStateKey(p, 'storageUtilized'),
timestamp, timestamp],
['zadd', generateStateKey(p, 'storageUtilized'),
timestamp, actionCounter]);
timestamp, member.serialize(actionCounter)],
);
// number of objects counter
objectsIndex = (i * (cmdsLen / paramsArrLen)) + 1;
@ -945,8 +1128,8 @@ class UtapiClient {
// If the key does not exist, actionCounter will be null.
// Hence we check that action counter is a number and is > 0. If
// true, we record numberOfObjects as though bucket were empty.
actionCounter = Number.isNaN(actionCounter) ||
actionCounter < 0 ? 1 : actionCounter;
actionCounter = Number.isNaN(actionCounter)
|| actionCounter < 0 ? 1 : actionCounter;
if (actionErr) {
log.error('error incrementing counter for push metric', {
method: 'UtapiClient._genericPushMetricPutObject',
@ -962,7 +1145,8 @@ class UtapiClient {
generateStateKey(p, 'numberOfObjects'),
timestamp, timestamp],
['zadd', generateStateKey(p, 'numberOfObjects'),
timestamp, actionCounter]);
timestamp, member.serialize(actionCounter)],
);
return true;
});
if (noErr) {
@ -971,6 +1155,95 @@ class UtapiClient {
return undefined;
});
}
/**
*
* @param {string} location - name of data location
* @param {number} updateSize - size in bytes to update location metric by,
* could be negative, indicating deleted object
* @param {string} reqUid - Request Unique Identifier
* @param {function} callback - callback to call
* @return {undefined}
*/
pushLocationMetric(location, updateSize, reqUid, callback) {
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
const params = {
level: 'location',
service: 's3',
location,
};
this._checkMetricTypes(params);
const action = (updateSize < 0) ? 'decrby' : 'incrby';
const size = (updateSize < 0) ? -updateSize : updateSize;
return this.ds[action](generateKey(params, 'locationStorage'), size,
err => {
if (err) {
log.error('error pushing metric', {
method: 'UtapiClient.pushLocationMetric',
error: err,
});
return callback(errors.InternalError);
}
return callback();
});
}
/**
*
* @param {string} location - name of data backend to get metric for
* @param {string} reqUid - Request Unique Identifier
* @param {function} callback - callback to call
* @return {undefined}
*/
getLocationMetric(location, reqUid, callback) {
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
const params = {
level: 'location',
service: 's3',
location,
};
const redisKey = generateKey(params, 'locationStorage');
return this.ds.get(redisKey, (err, bytesStored) => {
if (err) {
log.error('error getting metric', {
method: 'UtapiClient: getLocationMetric',
error: err,
});
return callback(errors.InternalError);
}
// if err and bytesStored are null, key does not exist yet
if (bytesStored === null) {
return callback(null, 0);
}
return callback(null, bytesStored);
});
}
/**
* Get storage used by bucket/account/user/service
* @param {object} params - params for the metrics
* @param {string} [params.bucket] - (optional) bucket name
* @param {string} [params.accountId] - (optional) account canonical ID
* @param {string} [params.userId] - (optional) account ID
* @param {string} [params.level] - (required) level of granularity
* @param {string} [params.service] - (required) service name (s3 for ex.)
* @param {object} log - Werelogs request logger
* @param {callback} callback - callback to call
* @return {undefined}
*/
getStorageUtilized(params, log, callback) {
const key = generateCounter(params, 'storageUtilizedCounter');
this.ds.get(key, (err, res) => {
if (err) {
log.error('error getting storage utilized', {
method: 'UtapiClient: getStorageUtilized',
error: err,
});
return callback(errors.InternalError);
}
return callback(null, res);
});
}
}
module.exports = UtapiClient;

265
lib/UtapiReindex.js Normal file
View File

@ -0,0 +1,265 @@
const childProcess = require('child_process');
const async = require('async');
const nodeSchedule = require('node-schedule');
const { jsutil } = require('arsenal');
const werelogs = require('werelogs');
const Datastore = require('./Datastore');
const RedisClient = require('../libV2/redis');
const REINDEX_SCHEDULE = '0 0 * * Sun';
const REINDEX_LOCK_KEY = 's3:utapireindex:lock';
const REINDEX_LOCK_TTL = (60 * 60) * 24;
const REINDEX_PYTHON_INTERPRETER = process.env.REINDEX_PYTHON_INTERPRETER !== undefined
? process.env.REINDEX_PYTHON_INTERPRETER
: 'python3.7';
const EXIT_CODE_SENTINEL_CONNECTION = 100;
class UtapiReindex {
constructor(config) {
this._enabled = false;
this._schedule = REINDEX_SCHEDULE;
this._redis = {
name: 'scality-s3',
sentinelPassword: '',
sentinels: [{
host: '127.0.0.1',
port: 16379,
}],
};
this._bucketd = {
host: '127.0.0.1',
port: 9000,
};
this._password = '';
this._log = new werelogs.Logger('UtapiReindex');
if (config && config.enabled) {
this._enabled = config.enabled;
}
if (config && config.schedule) {
this._schedule = config.schedule;
}
if (config && config.password) {
this._password = config.password;
}
if (config && config.redis) {
const {
name, sentinelPassword, sentinels,
} = config.redis;
this._redis.name = name || this._redis.name;
this._redis.sentinelPassword = sentinelPassword || this._redis.sentinelPassword;
this._redis.sentinels = sentinels || this._redis.sentinels;
}
if (config && config.bucketd) {
const { host, port } = config.bucketd;
this._bucketd.host = host || this._bucketd.host;
this._bucketd.port = port || this._bucketd.port;
}
if (config && config.log) {
const { level, dump } = config.log;
this._log = new werelogs.Logger('UtapiReindex', { level, dump });
}
this._onlyCountLatestWhenObjectLocked = (config && config.onlyCountLatestWhenObjectLocked === true);
this._requestLogger = this._log.newRequestLogger();
}
_getRedisClient() {
const client = new RedisClient({
sentinels: this._redis.sentinels,
name: this._redis.name,
sentinelPassword: this._redis.sentinelPassword,
password: this._password,
});
client.connect();
return client;
}
_lock() {
return this.ds.setExpire(REINDEX_LOCK_KEY, 'true', REINDEX_LOCK_TTL);
}
_unLock() {
return this.ds.del(REINDEX_LOCK_KEY);
}
_buildFlags(sentinel) {
const flags = {
/* eslint-disable camelcase */
sentinel_ip: sentinel.host,
sentinel_port: sentinel.port,
sentinel_cluster_name: this._redis.name,
bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`,
};
if (this._redis.sentinelPassword) {
flags.redis_password = this._redis.sentinelPassword;
}
/* eslint-enable camelcase */
const opts = [];
Object.keys(flags)
.forEach(flag => {
const name = `--${flag.replace(/_/g, '-')}`;
opts.push(name);
opts.push(flags[flag]);
});
if (this._onlyCountLatestWhenObjectLocked) {
opts.push('--only-latest-when-locked');
}
return opts;
}
_runScriptWithSentinels(path, remainingSentinels, done) {
const flags = this._buildFlags(remainingSentinels.shift());
this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`);
const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]);
process.stdout.on('data', data => {
this._requestLogger.info('received output from script', {
output: Buffer.from(data).toString(),
script: path,
});
});
process.stderr.on('data', data => {
this._requestLogger.debug('received error from script', {
output: Buffer.from(data).toString(),
script: path,
});
});
process.on('error', err => {
this._requestLogger.debug('failed to start process', {
error: err,
script: path,
});
});
process.on('close', code => {
if (code) {
this._requestLogger.error('script exited with error', {
statusCode: code,
script: path,
});
if (code === EXIT_CODE_SENTINEL_CONNECTION) {
if (remainingSentinels.length > 0) {
this._requestLogger.info('retrying with next sentinel host', {
script: path,
});
return this._runScriptWithSentinels(path, remainingSentinels, done);
}
this._requestLogger.error('no more sentinel host to try', {
script: path,
});
}
} else {
this._requestLogger.info('script exited successfully', {
statusCode: code,
script: path,
});
}
return done();
});
}
_runScript(path, done) {
const remainingSentinels = [...this._redis.sentinels];
this._runScriptWithSentinels(path, remainingSentinels, done);
}
_attemptLock(job) {
this._requestLogger.info('attempting to acquire the lock to begin job');
this._lock()
.then(res => {
if (res) {
this._requestLogger
.info('acquired the lock, proceeding with job');
job();
} else {
this._requestLogger
.info('the lock is already acquired, skipping job');
}
})
.catch(err => {
this._requestLogger.error(
'an error occurred when acquiring the lock, skipping job', {
stack: err && err.stack,
},
);
});
}
_attemptUnlock() {
this._unLock()
.catch(err => {
this._requestLogger
.error('an error occurred when removing the lock', {
stack: err && err.stack,
});
});
}
_connect(done) {
const doneOnce = jsutil.once(done);
const client = this._getRedisClient();
this.ds = new Datastore().setClient(client);
client
.on('ready', doneOnce)
.on('error', doneOnce);
}
_scheduleJob() {
this._connect(err => {
if (err) {
this._requestLogger.error(
'could not connect to datastore, skipping', {
error: err && err.stack,
},
);
return undefined;
}
return this._attemptLock(() => {
const scripts = [
`${__dirname}/reindex/s3_bucketd.py`,
`${__dirname}/reindex/reporting.py`,
];
return async.eachSeries(scripts, (script, next) => {
this._runScript(script, next);
}, () => {
this._attemptUnlock();
});
});
});
}
_job() {
const job = nodeSchedule.scheduleJob(this._schedule, () => this._scheduleJob());
if (!job) {
this._log.error('could not initiate job schedule');
return undefined;
}
job.on('scheduled', () => {
this._requestLogger = this._log.newRequestLogger();
this._requestLogger.info('utapi reindex job scheduled', {
schedule: this._schedule,
});
});
return undefined;
}
start() {
if (this._enabled) {
this._log.info('initiating job schedule', {
schedule: this._schedule,
});
this._job();
} else {
this._log.info('utapi reindex is disabled');
}
return this;
}
}
module.exports = UtapiReindex;

View File

@ -1,11 +1,12 @@
/* eslint-disable no-plusplus */
const assert = require('assert');
const async = require('async');
const { scheduleJob } = require('node-schedule');
const werelogs = require('werelogs');
const UtapiClient = require('./UtapiClient');
const Datastore = require('./Datastore');
const redisClient = require('../utils/redisClient');
const safeJsonParse = require('../utils/safeJsonParse');
const werelogs = require('werelogs');
const redisClientv2 = require('../utils/redisClientv2');
// Every five minutes. Cron-style scheduling used by node-schedule.
const REPLAY_SCHEDULE = '*/5 * * * *';
@ -35,13 +36,13 @@ class UtapiReplay {
this.disableReplay = true;
if (config) {
const message = 'missing required property in UtapiReplay ' +
'configuration';
const message = 'missing required property in UtapiReplay '
+ 'configuration';
assert(config.redis, `${message}: redis`);
assert(config.localCache, `${message}: localCache`);
this.utapiClient = new UtapiClient(config);
this.localCache = new Datastore()
.setClient(redisClient(config.localCache, this.log));
.setClient(redisClientv2(config.localCache, this.log));
if (config.replaySchedule) {
this.replaySchedule = config.replaySchedule;
}
@ -60,7 +61,7 @@ class UtapiReplay {
return this.localCache.setExpire('s3:utapireplay:lock', 'true', TTL);
}
/**
/**
* Delete the replay lock key. If there is an error during this command, do
* not handle it as the lock will expire after the value of `TTL`.
* @return {undefined}
@ -75,7 +76,9 @@ class UtapiReplay {
* @return {boolean} Returns `true` if object is valid, `false` otherwise.
*/
_validateElement(data) {
const { action, reqUid, params, timestamp } = data;
const {
action, reqUid, params, timestamp,
} = data;
if (!action || !reqUid || !params || !timestamp) {
this.log.fatal('missing required parameter in element',
{ method: 'UtapiReplay._validateElement' });
@ -184,17 +187,15 @@ class UtapiReplay {
this.log.info('disabled utapi replay scheduler');
return this;
}
const replay = scheduleJob(this.replaySchedule, () =>
this._setLock()
.then(res => {
// If `res` is not `null`, there is no pre-existing lock.
if (res) {
return this._checkLocalCache();
}
return undefined;
}));
replay.on('scheduled', date =>
this.log.info(`replay job started: ${date}`));
const replay = scheduleJob(this.replaySchedule, () => this._setLock()
.then(res => {
// If `res` is not `null`, there is no pre-existing lock.
if (res) {
return this._checkLocalCache();
}
return undefined;
}));
replay.on('scheduled', date => this.log.info(`replay job started: ${date}`));
this.log.info('enabled utapi replay scheduler', {
schedule: this.replaySchedule,
});

View File

@ -4,7 +4,6 @@
* @class
*/
class UtapiRequest {
constructor() {
this._log = null;
this._validator = null;
@ -15,6 +14,15 @@ class UtapiRequest {
this._datastore = null;
this._requestQuery = null;
this._requestPath = null;
this._vault = null;
}
getVault() {
return this._vault;
}
setVault() {
return this._vault;
}
/**
@ -267,7 +275,6 @@ class UtapiRequest {
getDatastore() {
return this._datastore;
}
}
module.exports = UtapiRequest;

View File

@ -6,7 +6,6 @@ const vaultclient = require('vaultclient');
*/
class Vault {
constructor(config) {
const { host, port } = config.vaultd;
if (config.https) {
@ -42,10 +41,11 @@ class Vault {
* @return {undefined}
*/
authenticateV4Request(params, requestContexts, callback) {
const { accessKey, signatureFromRequest, region, scopeDate,
stringToSign }
= params.data;
const log = params.log;
const {
accessKey, signatureFromRequest, region, scopeDate,
stringToSign,
} = params.data;
const { log } = params;
log.debug('authenticating V4 request');
const serializedRCs = requestContexts.map(rc => rc.serialize());
this._client.verifySignatureV4(
@ -59,7 +59,8 @@ class Vault {
}
return callback(null,
authInfo.message.body.authorizationResults);
});
},
);
}
/**
@ -76,7 +77,6 @@ class Vault {
return this._client.getCanonicalIdsByAccountIds(accountIds,
{ reqUid: log.getSerializedUids(), logger: log }, callback);
}
}
module.exports = Vault;

View File

@ -1,3 +1,4 @@
/* eslint-disable class-methods-use-this */
const assert = require('assert');
const map = require('async/map');
@ -52,6 +53,16 @@ class Memory {
this.data = {};
}
/**
* A simple wrapper provided for API compatibility with redis
* @param {Function} func - Function to call
* @param {callback} cb - callback
* @returns {undefined}
*/
call(func, cb) {
return func(this, cb);
}
/**
* Set key to hold a value
* @param {string} key - data key
@ -77,8 +88,8 @@ class Memory {
*/
get(key, cb) {
assert.strictEqual(typeof key, 'string');
process.nextTick(() => cb(null, this.data[key] === undefined ?
null : this.data[key]));
process.nextTick(() => cb(null, this.data[key] === undefined
? null : this.data[key]));
}
/**
@ -95,8 +106,8 @@ class Memory {
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a ' +
'number');
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val + 1).toString();
return cb(null, this.data[key]);
@ -119,8 +130,8 @@ class Memory {
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a ' +
'number');
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val + num).toString();
return cb(null, this.data[key]);
@ -141,8 +152,8 @@ class Memory {
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a ' +
'number');
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val - 1).toString();
return cb(null, this.data[key]);
@ -165,8 +176,8 @@ class Memory {
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a ' +
'number');
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val - num).toString();
return cb(null, this.data[key]);
@ -193,8 +204,7 @@ class Memory {
}
const valStr = value.toString();
// compares both arrays of data
const found = this.data[key].some(item =>
JSON.stringify(item) === JSON.stringify([score, valStr]));
const found = this.data[key].some(item => JSON.stringify(item) === JSON.stringify([score, valStr]));
if (!found) {
// as this is a sorted set emulation, it sorts the data by score
// after each insertion
@ -227,8 +237,8 @@ class Memory {
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf') ?
this.data[key][this.data[key].length - 1][0] : max;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length - 1][0] : max;
return cb(null, this.data[key].filter(item => item[0] >= minScore
&& item[0] <= maxScore).map(item => item[1]));
});
@ -255,8 +265,8 @@ class Memory {
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf') ?
this.data[key][this.data[key].length][0] : max;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length][0] : max;
const cloneKeyData = Object.assign(this.data[key]);
// Sort keys by scores in the decreasing order, if scores are equal
// sort by their value in the decreasing order
@ -292,11 +302,10 @@ class Memory {
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf') ?
this.data[key][this.data[key].length][0] : max;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length][0] : max;
const oldLen = this.data[key].length;
this.data[key] = this.data[key].filter(item =>
(item[0] < minScore || item[0] > maxScore));
this.data[key] = this.data[key].filter(item => (item[0] < minScore || item[0] > maxScore));
return cb(null, (oldLen - this.data[key].length));
});
}

117
lib/reindex/reporting.py Normal file
View File

@ -0,0 +1,117 @@
import argparse
import ast
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import re
import redis
import requests
import sys
from threading import Thread
import time
import urllib
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex:reporting')
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-b", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
return parser.parse_args()
def safe_print(content):
print("{0}".format(content))
class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password
r = redis.Redis(
host=ip,
port=port,
db=0,
password=password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {ip}:{port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)
res = 's3:%s:%s:storageUtilized:counter' % (resource, name)
total_size = r.get(res)
res = 's3:%s:%s:numberOfObjects:counter' % (resource, name)
files = r.get(res)
try:
return {'files': int(files), "total_size": int(total_size)}
except Exception as e:
return {'files': 0, "total_size": 0}
class S3ListBuckets():
def __init__(self, host='127.0.0.1:9000'):
self.bucketd_host = host
def run(self):
docs = []
url = "%s/default/bucket/users..bucket" % self.bucketd_host
session = requests.Session()
r = session.get(url, timeout=30)
if r.status_code == 200:
payload = json.loads(r.text)
for keys in payload['Contents']:
key = keys["key"]
r1 = re.match("(\w+)..\|..(\w+.*)", key)
docs.append(r1.groups())
return docs
return(self.userid, self.bucket, user, files, total_size)
if __name__ == '__main__':
options = get_options()
redis_conf = dict(
ip=options.sentinel_ip,
port=options.sentinel_port,
sentinel_cluster_name=options.sentinel_cluster_name,
password=options.redis_password
)
P = S3ListBuckets(options.bucketd_addr)
listbuckets = P.run()
userids = set([x for x, y in listbuckets])
executor = ThreadPoolExecutor(max_workers=1)
for userid, bucket in listbuckets:
U = askRedis(**redis_conf)
data = U.read('buckets', bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
data = U.read('buckets', 'mpuShadowBucket'+bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
executor.submit(safe_print, "")
for userid in sorted(userids):
U = askRedis(**redis_conf)
data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"])
executor.submit(safe_print, content)

586
lib/reindex/s3_bucketd.py Normal file
View File

@ -0,0 +1,586 @@
import argparse
import concurrent.futures as futures
import functools
import itertools
import json
import logging
import os
import re
import sys
import time
import urllib
from pathlib import Path
from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor
import redis
import requests
from requests import ConnectionError, HTTPError, Timeout
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex')
USERS_BUCKET = 'users..bucket'
MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
ACCOUNT_UPDATE_CHUNKSIZE = 100
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers")
parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request")
parser.add_argument("--only-latest-when-locked", action='store_true', help="Only index the latest version of a key when the bucket has a default object lock policy")
parser.add_argument("--debug", action='store_true', help="Enable debug logging")
parser.add_argument("--dry-run", action="store_true", help="Do not update redis")
group = parser.add_mutually_exclusive_group()
group.add_argument("-a", "--account", default=[], help="account canonical ID (all account buckets will be processed)", action="append", type=nonempty_string('account'))
group.add_argument("--account-file", default=None, help="file containing account canonical IDs, one ID per line", type=existing_file)
group.add_argument("-b", "--bucket", default=[], help="bucket name", action="append", type=nonempty_string('bucket'))
group.add_argument("--bucket-file", default=None, help="file containing bucket names, one bucket name per line", type=existing_file)
options = parser.parse_args()
if options.bucket_file:
with open(options.bucket_file) as f:
options.bucket = [line.strip() for line in f if line.strip()]
elif options.account_file:
with open(options.account_file) as f:
options.account = [line.strip() for line in f if line.strip()]
return options
def nonempty_string(flag):
def inner(value):
if not value.strip():
raise argparse.ArgumentTypeError("%s: value must not be empty"%flag)
return value
return inner
def existing_file(path):
path = Path(path).resolve()
if not path.exists():
raise argparse.ArgumentTypeError("File does not exist: %s"%path)
return path
def chunks(iterable, size):
it = iter(iterable)
chunk = tuple(itertools.islice(it,size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it,size))
def _encoded(func):
def inner(*args, **kwargs):
val = func(*args, **kwargs)
return urllib.parse.quote(val.encode('utf-8'))
return inner
Bucket = namedtuple('Bucket', ['userid', 'name', 'object_lock_enabled'])
MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id'])
BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size'])
class MaxRetriesReached(Exception):
def __init__(self, url):
super().__init__('Max retries reached for request to %s'%url)
class InvalidListing(Exception):
def __init__(self, bucket):
super().__init__('Invalid contents found while listing bucket %s'%bucket)
class BucketNotFound(Exception):
def __init__(self, bucket):
super().__init__('Bucket %s not found'%bucket)
class BucketDClient:
'''Performs Listing calls against bucketd'''
__url_attribute_format = '{addr}/default/attributes/{bucket}'
__url_bucket_format = '{addr}/default/bucket/{bucket}'
__headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"}
def __init__(self, bucketd_addr=None, max_retries=2, only_latest_when_locked=False):
self._bucketd_addr = bucketd_addr
self._max_retries = max_retries
self._only_latest_when_locked = only_latest_when_locked
self._session = requests.Session()
def _do_req(self, url, check_500=True, **kwargs):
# Add 1 for the initial request
for x in range(self._max_retries + 1):
try:
resp = self._session.get(url, timeout=30, verify=False, headers=self.__headers, **kwargs)
if check_500 and resp.status_code == 500:
_log.warning('500 from bucketd, sleeping 15 secs')
time.sleep(15)
continue
return resp
except (Timeout, ConnectionError) as e:
_log.exception(e)
_log.error('Error during listing, sleeping 5 secs %s'%url)
time.sleep(5)
raise MaxRetriesReached(url)
def _list_bucket(self, bucket, **kwargs):
'''
Lists a bucket lazily until "empty"
bucket: name of the bucket
kwargs: url parameters key=value
To support multiple next marker keys and param encoding, a function can
be passed as a parameters value. It will be call with the json decode
response body as its only argument and is expected to return the
parameters value. On the first request the function will be called with
`None` and should return its initial value. Return `None` for the param to be excluded.
'''
url = self.__url_bucket_format.format(addr=self._bucketd_addr, bucket=bucket)
static_params = {k: v for k, v in kwargs.items() if not callable(v)}
dynamic_params = {k: v for k, v in kwargs.items() if callable(v)}
is_truncated = True # Set to True for first loop
payload = None
while is_truncated:
params = static_params.copy() # Use a copy of the static params for a base
for key, func in dynamic_params.items():
params[key] = func(payload) # Call each of our dynamic params with the previous payload
try:
_log.debug('listing bucket bucket: %s params: %s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
resp = self._do_req(url, params=params)
if resp.status_code == 404:
_log.debug('Bucket not found bucket: %s'%bucket)
return
if resp.status_code == 200:
payload = resp.json()
except ValueError as e:
_log.exception(e)
_log.error('Invalid listing response body! bucket:%s params:%s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
continue
except MaxRetriesReached:
_log.error('Max retries reached listing bucket:%s'%bucket)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception during listing! bucket:%s params:%s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
raise
yield resp.status_code, payload
if isinstance(payload, dict):
is_truncated = payload.get('IsTruncated', False)
else:
is_truncated = len(payload) > 0
@functools.lru_cache(maxsize=16)
def _get_bucket_attributes(self, name):
url = self.__url_attribute_format.format(addr=self._bucketd_addr, bucket=name)
try:
resp = self._do_req(url)
if resp.status_code == 200:
return resp.json()
else:
_log.error('Error getting bucket attributes bucket:%s status_code:%s'%(name, resp.status_code))
raise BucketNotFound(name)
except ValueError as e:
_log.exception(e)
_log.error('Invalid attributes response body! bucket:%s'%name)
raise
except MaxRetriesReached:
_log.error('Max retries reached getting bucket attributes bucket:%s'%name)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception getting bucket attributes bucket:%s'%name)
raise
def get_bucket_md(self, name):
md = self._get_bucket_attributes(name)
canonId = md.get('owner')
if canonId is None:
_log.error('No owner found for bucket %s'%name)
raise InvalidListing(name)
return Bucket(canonId, name, md.get('objectLockEnabled', False))
def list_buckets(self, account=None):
def get_next_marker(p):
if p is None:
return ''
return p.get('Contents', [{}])[-1].get('key', '')
params = {
'delimiter': '',
'maxKeys': 1000,
'marker': get_next_marker
}
if account is not None:
params['prefix'] = '%s..|..' % account
for _, payload in self._list_bucket(USERS_BUCKET, **params):
buckets = []
for result in payload.get('Contents', []):
match = re.match("(\w+)..\|..(\w+.*)", result['key'])
bucket = Bucket(*match.groups(), False)
# We need to get the attributes for each bucket to determine if it is locked
if self._only_latest_when_locked:
bucket_attrs = self._get_bucket_attributes(bucket.name)
object_lock_enabled = bucket_attrs.get('objectLockEnabled', False)
bucket = bucket._replace(object_lock_enabled=object_lock_enabled)
buckets.append(bucket)
if buckets:
yield buckets
def list_mpus(self, bucket):
_bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name
def get_next_marker(p):
if p is None:
return 'overview..|..'
return p.get('NextKeyMarker', '')
def get_next_upload_id(p):
if p is None:
return 'None'
return p.get('NextUploadIdMarker', '')
params = {
'delimiter': '',
'keyMarker': '',
'maxKeys': 1000,
'queryPrefixLength': 0,
'listingType': 'MPU',
'splitter': '..|..',
'prefix': get_next_marker,
'uploadIdMarker': get_next_upload_id,
}
keys = []
for status_code, payload in self._list_bucket(_bucket, **params):
if status_code == 404:
break
for key in payload['Uploads']:
keys.append(MPU(
bucket=bucket,
key=key['key'],
upload_id=key['value']['UploadId']))
return keys
def _sum_objects(self, bucket, listing, only_latest_when_locked = False):
count = 0
total_size = 0
last_key = None
try:
for obj in listing:
if isinstance(obj['value'], dict):
# bucketd v6 returns a dict:
data = obj.get('value', {})
size = data["Size"]
else:
# bucketd v7 returns an encoded string
data = json.loads(obj['value'])
size = data.get('content-length', 0)
is_latest = obj['key'] != last_key
last_key = obj['key']
if only_latest_when_locked and bucket.object_lock_enabled and not is_latest:
_log.debug('Skipping versioned key: %s'%obj['key'])
continue
count += 1
total_size += size
except InvalidListing:
_log.error('Invalid contents in listing. bucket:%s'%bucket.name)
raise InvalidListing(bucket.name)
return count, total_size
def _extract_listing(self, key, listing):
for status_code, payload in listing:
contents = payload[key] if isinstance(payload, dict) else payload
if contents is None:
raise InvalidListing('')
for obj in contents:
yield obj
def count_bucket_contents(self, bucket):
def get_key_marker(p):
if p is None:
return ''
return p.get('NextKeyMarker', '')
def get_vid_marker(p):
if p is None:
return ''
return p.get('NextVersionIdMarker', '')
params = {
'listingType': 'DelimiterVersions',
'maxKeys': 1000,
'keyMarker': get_key_marker,
'versionIdMarker': get_vid_marker,
}
listing = self._list_bucket(bucket.name, **params)
count, total_size = self._sum_objects(bucket, self._extract_listing('Versions', listing), self._only_latest_when_locked)
return BucketContents(
bucket=bucket,
obj_count=count,
total_size=total_size
)
def count_mpu_parts(self, mpu):
shadow_bucket_name = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket = mpu.bucket._replace(name=shadow_bucket_name)
def get_prefix(p):
if p is None:
return mpu.upload_id
return p.get('Contents', [{}])[-1].get('key', '')
@_encoded
def get_next_marker(p):
prefix = get_prefix(p)
return prefix + '..|..00000'
params = {
'prefix': get_prefix,
'marker': get_next_marker,
'delimiter': '',
'maxKeys': 1000,
'listingType': 'Delimiter',
}
listing = self._list_bucket(shadow_bucket_name, **params)
count, total_size = self._sum_objects(shadow_bucket, self._extract_listing('Contents', listing))
return BucketContents(
bucket=shadow_bucket,
obj_count=0, # MPU parts are not counted towards numberOfObjects
total_size=total_size
)
def list_all_buckets(bucket_client):
return bucket_client.list_buckets()
def list_specific_accounts(bucket_client, accounts):
for account in accounts:
yield from bucket_client.list_buckets(account=account)
def list_specific_buckets(bucket_client, buckets):
batch = []
for bucket in buckets:
try:
batch.append(bucket_client.get_bucket_md(bucket))
except BucketNotFound:
_log.error('Failed to list bucket %s. Removing from results.'%bucket)
continue
yield batch
def index_bucket(client, bucket):
'''
Takes an instance of BucketDClient and a bucket name, and returns a
tuple of BucketContents for the passed bucket and its mpu shadow bucket.
'''
try:
bucket_total = client.count_bucket_contents(bucket)
mpus = client.list_mpus(bucket)
if not mpus:
return bucket_total
total_size = bucket_total.total_size
mpu_totals = [client.count_mpu_parts(m) for m in mpus]
for mpu in mpu_totals:
total_size += mpu.total_size
return bucket_total._replace(total_size=total_size)
except Exception as e:
_log.exception(e)
_log.error('Error during listing. Removing from results bucket:%s'%bucket.name)
raise InvalidListing(bucket.name)
def update_report(report, key, obj_count, total_size):
'''Convenience function to update the report dicts'''
if key in report:
report[key]['obj_count'] += obj_count
report[key]['total_size'] += total_size
else:
report[key] = {
'obj_count': obj_count,
'total_size': total_size,
}
def get_redis_client(options):
sentinel = redis.Redis(
host=options.sentinel_ip,
port=options.sentinel_port,
db=0,
password=options.redis_password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {options.sentinel_ip}:{options.sentinel_port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
return redis.Redis(
host=ip,
port=port,
db=0,
password=options.redis_password
)
def update_redis(client, resource, name, obj_count, total_size):
timestamp = int(time.time() - 15 * 60) * 1000
obj_count_key = 's3:%s:%s:numberOfObjects' % (resource, name)
total_size_key = 's3:%s:%s:storageUtilized' % (resource, name)
client.zremrangebyscore(obj_count_key, timestamp, timestamp)
client.zremrangebyscore(total_size_key, timestamp, timestamp)
client.zadd(obj_count_key, {obj_count: timestamp})
client.zadd(total_size_key, {total_size: timestamp})
client.set(obj_count_key + ':counter', obj_count)
client.set(total_size_key + ':counter', total_size)
def get_resources_from_redis(client, resource):
for key in redis_client.scan_iter('s3:%s:*:storageUtilized' % resource):
yield key.decode('utf-8').split(':')[2]
def log_report(resource, name, obj_count, total_size):
print('%s:%s:%s:%s'%(
resource,
name,
obj_count,
total_size
))
if __name__ == '__main__':
options = get_options()
if options.debug:
_log.setLevel(logging.DEBUG)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries, options.only_latest_when_locked)
redis_client = get_redis_client(options)
account_reports = {}
observed_buckets = set()
failed_accounts = set()
if options.account:
batch_generator = list_specific_accounts(bucket_client, options.account)
elif options.bucket:
batch_generator = list_specific_buckets(bucket_client, options.bucket)
else:
batch_generator = list_all_buckets(bucket_client)
with ThreadPoolExecutor(max_workers=options.worker) as executor:
for batch in batch_generator:
bucket_reports = {}
jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch }
for job in futures.as_completed(jobs.keys()):
try:
total = job.result() # Summed bucket and shadowbucket totals
except InvalidListing:
_bucket = jobs[job]
_log.error('Failed to list bucket %s. Removing from results.'%_bucket.name)
# Add the bucket to observed_buckets anyway to avoid clearing existing metrics
observed_buckets.add(_bucket.name)
# If we can not list one of an account's buckets we can not update its total
failed_accounts.add(_bucket.userid)
continue
observed_buckets.add(total.bucket.name)
update_report(bucket_reports, total.bucket.name, total.obj_count, total.total_size)
update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size)
# Bucket reports can be updated as we get them
if options.dry_run:
for bucket, report in bucket_reports.items():
_log.info(
"DryRun: resource buckets [%s] would be updated with obj_count %i and total_size %i" % (
bucket, report['obj_count'], report['total_size']
)
)
else:
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute()
stale_buckets = set()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket:
stale_buckets = { b for b in options.bucket if b not in observed_buckets }
elif options.account:
_log.warning('Stale buckets will not be cleared when using the --account or --account-file flags')
else:
stale_buckets = recorded_buckets.difference(observed_buckets)
_log.info('Found %s stale buckets' % len(stale_buckets))
if options.dry_run:
_log.info("DryRun: not updating stale buckets")
else:
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute()
# Account metrics are not updated if a bucket is specified
if options.bucket:
_log.warning('Account metrics will not be updated when using the --bucket or --bucket-file flags')
else:
# Don't update any accounts with failed listings
without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items())
if options.dry_run:
for userid, report in account_reports.items():
_log.info(
"DryRun: resource account [%s] would be updated with obj_count %i and total_size %i" % (
userid, report['obj_count'], report['total_size']
)
)
else:
# Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for userid, report in chunk:
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute()
if options.account:
for account in options.account:
if account in failed_accounts:
_log.error("No metrics updated for account %s, one or more buckets failed" % account)
# Include failed_accounts in observed_accounts to avoid clearing metrics
observed_accounts = failed_accounts.union(set(account_reports.keys()))
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
if options.account:
stale_accounts = { a for a in options.account if a not in observed_accounts }
else:
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts)
_log.info('Found %s stale accounts' % len(stale_accounts))
if options.dry_run:
_log.info("DryRun: not updating stale accounts")
else:
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for account in chunk:
update_redis(pipeline, 'accounts', account, 0, 0)
log_report('accounts', account, 0, 0)
pipeline.execute()

View File

@ -32,6 +32,7 @@ const keys = {
deleteObject: prefix => `${prefix}DeleteObject`,
multiObjectDelete: prefix => `${prefix}MultiObjectDelete`,
uploadPart: prefix => `${prefix}UploadPart`,
uploadPartCopy: prefix => `${prefix}UploadPartCopy`,
getObject: prefix => `${prefix}GetObject`,
getObjectAcl: prefix => `${prefix}GetObjectAcl`,
getObjectTagging: prefix => `${prefix}GetObjectTagging`,
@ -45,6 +46,15 @@ const keys = {
putBucketReplication: prefix => `${prefix}PutBucketReplication`,
getBucketReplication: prefix => `${prefix}GetBucketReplication`,
deleteBucketReplication: prefix => `${prefix}DeleteBucketReplication`,
putBucketObjectLock: prefix => `${prefix}PutBucketObjectLock`,
getBucketObjectLock: prefix => `${prefix}GetBucketObjectLock`,
putObjectRetention: prefix => `${prefix}PutObjectRetention`,
getObjectRetention: prefix => `${prefix}GetObjectRetention`,
putObjectLegalHold: prefix => `${prefix}PutObjectLegalHold`,
getObjectLegalHold: prefix => `${prefix}GetObjectLegalHold`,
replicateObject: prefix => `${prefix}ReplicateObject`,
replicateTags: prefix => `${prefix}ReplicateTags`,
replicateDelete: prefix => `${prefix}ReplicateDelete`,
incomingBytes: prefix => `${prefix}incomingBytes`,
outgoingBytes: prefix => `${prefix}outgoingBytes`,
};
@ -57,11 +67,13 @@ const keys = {
* @return {string} - prefix for the schema key
*/
function getSchemaPrefix(params, timestamp) {
const { bucket, accountId, userId, level, service } = params;
const {
bucket, accountId, userId, level, service, location,
} = params;
// `service` property must remain last because other objects also include it
const id = bucket || accountId || userId || service;
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:` :
`${service}:${level}:${id}:`;
const id = bucket || accountId || userId || location || service;
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:`
: `${service}:${level}:${id}:`;
return prefix;
}
@ -74,9 +86,13 @@ function getSchemaPrefix(params, timestamp) {
*/
function generateKey(params, metric, timestamp) {
const prefix = getSchemaPrefix(params, timestamp);
if (params.location) {
return `${prefix}locationStorage`;
}
return keys[metric](prefix);
}
/**
* Returns a list of the counters for a metric type
* @param {object} params - object with metric type and id as a property

View File

@ -1,3 +1,4 @@
/* eslint-disable class-methods-use-this */
const http = require('http');
const https = require('https');
const url = require('url');
@ -6,13 +7,12 @@ const { Clustering, errors, ipCheck } = require('arsenal');
const arsenalHttps = require('arsenal').https;
const { Logger } = require('werelogs');
const config = require('./Config');
const routes = require('../router/routes');
const Route = require('../router/Route');
const Router = require('../router/Router');
const UtapiRequest = require('../lib/UtapiRequest');
const Datastore = require('./Datastore');
const redisClient = require('../utils/redisClient');
const redisClientv2 = require('../utils/redisClientv2');
class UtapiServer {
/**
@ -27,7 +27,12 @@ class UtapiServer {
constructor(worker, port, datastore, logger, config) {
this.worker = worker;
this.port = port;
this.router = new Router(config);
this.vault = config.vaultclient;
if (!this.vault) {
const Vault = require('./Vault');
this.vault = new Vault(config);
}
this.router = new Router(config, this.vault);
this.logger = logger;
this.datastore = datastore;
this.server = null;
@ -70,6 +75,7 @@ class UtapiServer {
req.socket.setNoDelay();
const { query, path, pathname } = url.parse(req.url, true);
const utapiRequest = new UtapiRequest()
.setVault(this.vault)
.setRequest(req)
.setLog(this.logger.newRequestLogger())
.setResponse(res)
@ -86,15 +92,17 @@ class UtapiServer {
|| req.method === 'POST')) {
utapiRequest.setStatusCode(200);
const allowIp = ipCheck.ipMatchCidrList(
config.healthChecks.allowFrom, req.socket.remoteAddress);
config.healthChecks.allowFrom, req.socket.remoteAddress,
);
if (!allowIp) {
return this.errorResponse(utapiRequest, errors.AccessDenied);
}
const redisClient = this.datastore.getClient();
if (redisClient.status !== 'ready') {
if (!redisClient.isReady) {
return this.errorResponse(utapiRequest,
errors.InternalError.customizeDescription(
'Redis server is not ready'));
errors.InternalError.customizeDescription(
'Redis server is not ready',
));
}
return this.response(utapiRequest, {});
}
@ -121,8 +129,7 @@ class UtapiServer {
rejectUnauthorized: true,
}, (req, res) => this.requestListener(req, res, this.router));
} else {
this.server = http.createServer((req, res) =>
this.requestListener(req, res, this.router));
this.server = http.createServer((req, res) => this.requestListener(req, res, this.router));
}
this.server.on('listening', () => {
const addr = this.server.address() || {
@ -212,15 +219,18 @@ class UtapiServer {
* @property {object} params.log - logger configuration
* @return {undefined}
*/
function spawn(params) {
Object.assign(config, params);
const { workers, redis, log, port } = config;
function spawn(config) {
const {
workers, redis, log, port,
} = config;
const logger = new Logger('Utapi', { level: log.logLevel,
dump: log.dumpLevel });
const logger = new Logger('Utapi', {
level: log.logLevel,
dump: log.dumpLevel,
});
const cluster = new Clustering(workers, logger);
cluster.start(worker => {
const datastore = new Datastore().setClient(redisClient(redis, logger));
const datastore = new Datastore().setClient(redisClientv2(redis, logger));
const server = new UtapiServer(worker, port, datastore, logger, config);
server.startup();
});

7
libV2/cache/backend/index.js vendored Normal file
View File

@ -0,0 +1,7 @@
const MemoryCache = require('./memory');
const RedisCache = require('./redis');
module.exports = {
MemoryCache,
RedisCache,
};

110
libV2/cache/backend/memory.js vendored Normal file
View File

@ -0,0 +1,110 @@
const schema = require('../schema');
const constants = require('../../constants');
/**
* Returns null iff the value is undefined.
* Returns the passed value otherwise.
*
* @param {*} value - Any value
* @returns {*} - Passed value or null
*/
function orNull(value) {
return value === undefined ? null : value;
}
class MemoryCache {
constructor() {
this._data = {};
this._shards = {};
this._prefix = 'utapi';
this._expirations = {};
}
// eslint-disable-next-line class-methods-use-this
async connect() {
return true;
}
// eslint-disable-next-line class-methods-use-this
async disconnect() {
Object.values(this._expirations).forEach(clearTimeout);
return true;
}
_expireKey(key, delay) {
if (this._expirations[key]) {
clearTimeout(this._expirations[key]);
}
this._expirations[key] = setTimeout(() => delete this._data[key], delay * 1000);
}
async getKey(key) {
return this._data[key];
}
async setKey(key, data) {
this._data[key] = data;
return true;
}
async addToShard(shard, event) {
const metricKey = schema.getUtapiMetricKey(this._prefix, event);
this._data[metricKey] = event;
if (this._shards[shard]) {
this._shards[shard].push(metricKey);
} else {
this._shards[shard] = [metricKey];
}
return true;
}
async getKeysInShard(shard) {
return this._shards[shard] || [];
}
async fetchShard(shard) {
if (this._shards[shard]) {
return this._shards[shard].map(key => this._data[key]);
}
return [];
}
async deleteShardAndKeys(shard) {
(this._shards[shard] || []).forEach(key => {
delete this._data[key];
});
delete this._shards[shard];
return true;
}
async getShards() {
return Object.keys(this._shards);
}
async shardExists(shard) {
return this._shards[shard.toString()] !== undefined;
}
async updateCounters(metric) {
if (metric.sizeDelta) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, metric.account);
this._data[accountSizeKey] = (this._data[accountSizeKey] || 0) + metric.sizeDelta;
}
}
async updateAccountCounterBase(account, size) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
this._data[accountSizeKey] = 0;
this._data[accountSizeBaseKey] = size;
this._expireKey(accountSizeBaseKey, constants.counterBaseValueExpiration);
}
async fetchAccountSizeCounter(account) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
return [orNull(this._data[accountSizeKey]), orNull(this._data[accountSizeBaseKey])];
}
}
module.exports = MemoryCache;

194
libV2/cache/backend/redis.js vendored Normal file
View File

@ -0,0 +1,194 @@
const RedisClient = require('../../redis');
const schema = require('../schema');
const { LoggerContext } = require('../../utils');
const constants = require('../../constants');
const moduleLogger = new LoggerContext({
module: 'cache.backend.redis.RedisCache',
});
class RedisCache {
constructor(options, prefix) {
this._redis = null;
this._options = options;
this._prefix = prefix || 'utapi';
}
async connect() {
moduleLogger.debug('Connecting to redis...');
this._redis = new RedisClient(this._options);
this._redis.connect();
return true;
}
async disconnect() {
const logger = moduleLogger.with({ method: 'disconnect' });
if (this._redis) {
try {
logger.debug('closing connection to redis');
await this._redis.disconnect();
} catch (error) {
logger.error('error while closing connection to redis', {
error,
});
throw error;
}
this._redis = null;
} else {
logger.debug('disconnect called but no connection to redis found');
}
}
async getKey(key) {
return moduleLogger
.with({ method: 'getKey' })
.logAsyncError(() => this._redis.call(redis => redis.get(key)),
'error fetching key from redis', { key });
}
async setKey(key, value) {
return moduleLogger
.with({ method: 'setKey' })
.logAsyncError(async () => {
const res = await this._redis.call(redis => redis.set(key, value));
return res === 'OK';
}, 'error setting key in redis', { key });
}
async addToShard(shard, metric) {
const logger = moduleLogger.with({ method: 'addToShard' });
return logger
.logAsyncError(async () => {
const metricKey = schema.getUtapiMetricKey(this._prefix, metric);
const shardKey = schema.getShardKey(this._prefix, shard);
const shardMasterKey = schema.getShardMasterKey(this._prefix);
logger.debug('adding metric to shard', { metricKey, shardKey });
const [setResults, saddResults] = await this._redis
.call(redis => redis
.multi([
['set', metricKey, JSON.stringify(metric.getValue())],
['sadd', shardKey, metricKey],
['sadd', shardMasterKey, shardKey],
])
.exec());
let success = true;
if (setResults[1] !== 'OK') {
moduleLogger.error('failed to set metric key', {
metricKey,
shardKey,
res: setResults[1],
});
success = false;
}
if (saddResults[1] !== 1) {
moduleLogger.error('metric key already present in shard', {
metricKey,
shardKey,
res: saddResults[1],
});
success = false;
}
return success;
}, 'error during redis command');
}
async getKeysInShard(shard) {
return moduleLogger
.with({ method: 'getKeysInShard' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
return this._redis.call(redis => redis.smembers(shardKey));
}, 'error while fetching shard keys', { shard });
}
async fetchShard(shard) {
return moduleLogger
.with({ method: 'fetchShard' })
.logAsyncError(async () => {
const keys = await this.getKeysInShard(shard);
if (!keys.length) {
return [];
}
return this._redis.call(redis => redis.mget(...keys));
}, 'error while fetching shard data', { shard });
}
async deleteShardAndKeys(shard) {
return moduleLogger
.with({ method: 'deleteShardAndKeys' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
const shardMasterKey = schema.getShardMasterKey(this._prefix);
const keys = await this.getKeysInShard(shard);
return this._redis.call(
redis => redis.multi([
['del', shardKey, ...keys],
['srem', shardMasterKey, shardKey],
]).exec(),
);
}, 'error while deleting shard', { shard });
}
async shardExists(shard) {
return moduleLogger
.with({ method: 'shardExists' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
const res = await this._redis.call(redis => redis.exists(shardKey));
return res === 1;
}, 'error while checking shard', { shard });
}
async getShards() {
return moduleLogger
.with({ method: 'getShards' })
.logAsyncError(async () => {
const shardMasterKey = schema.getShardMasterKey(this._prefix);
return this._redis.call(redis => redis.smembers(shardMasterKey));
}, 'error while fetching shards');
}
async updateCounters(metric) {
return moduleLogger
.with({ method: 'updateCounter' })
.logAsyncError(async () => {
if (metric.sizeDelta) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, metric.account);
await this._redis.call(redis => redis.incrby(accountSizeKey, metric.sizeDelta));
}
}, 'error while updating metric counters');
}
async updateAccountCounterBase(account, size) {
return moduleLogger
.with({ method: 'updateAccountCounterBase' })
.logAsyncError(async () => {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
await this._redis.call(async redis => {
await redis.mset(accountSizeKey, 0, accountSizeBaseKey, size);
await redis.expire(accountSizeBaseKey, constants.counterBaseValueExpiration);
});
}, 'error while updating metric counter base');
}
async fetchAccountSizeCounter(account) {
return moduleLogger
.with({ method: 'fetchAccountSizeCounter' })
.logAsyncError(async () => {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
const [counter, base] = await this._redis.call(redis => redis.mget(accountSizeKey, accountSizeBaseKey));
return [
counter !== null ? parseInt(counter, 10) : null,
base !== null ? parseInt(base, 10) : null,
];
}, 'error fetching account size counters', { account });
}
}
module.exports = RedisCache;

58
libV2/cache/client.js vendored Normal file
View File

@ -0,0 +1,58 @@
const { shardFromTimestamp } = require('../utils');
class CacheClient {
constructor(config) {
this._prefix = config.prefix || 'utapi';
this._cacheBackend = config.cacheBackend;
this._counterBackend = config.counterBackend;
}
async connect() {
return Promise.all([
this._cacheBackend.connect(),
this._counterBackend.connect(),
]);
}
async disconnect() {
return Promise.all([
this._cacheBackend.disconnect(),
this._counterBackend.disconnect(),
]);
}
async pushMetric(metric) {
const shard = shardFromTimestamp(metric.timestamp);
if (!(await this._cacheBackend.addToShard(shard, metric))) {
return false;
}
await this._counterBackend.updateCounters(metric);
return true;
}
async getMetricsForShard(shard) {
return this._cacheBackend.fetchShard(shard);
}
async deleteShard(shard) {
return this._cacheBackend.deleteShardAndKeys(shard);
}
async shardExists(shard) {
return this._cacheBackend.shardExists(shard);
}
async getShards() {
return this._cacheBackend.getShards();
}
async updateAccountCounterBase(account, size) {
return this._counterBackend.updateAccountCounterBase(account, size);
}
async fetchAccountSizeCounter(account) {
return this._counterBackend.fetchAccountSizeCounter(account);
}
}
module.exports = CacheClient;

21
libV2/cache/index.js vendored Normal file
View File

@ -0,0 +1,21 @@
const config = require('../config');
const CacheClient = require('./client');
const { MemoryCache, RedisCache } = require('./backend');
const cacheTypes = {
redis: conf => new RedisCache(conf),
memory: () => new MemoryCache(),
};
const cacheBackend = cacheTypes[config.cache.backend](config.cache);
const counterBackend = cacheTypes[config.cache.backend](config.redis);
module.exports = {
CacheClient,
backends: {
MemoryCache,
RedisCache,
},
client: new CacheClient({ cacheBackend, counterBackend }),
};

27
libV2/cache/schema.js vendored Normal file
View File

@ -0,0 +1,27 @@
function getShardKey(prefix, shard) {
return `${prefix}:shard:${shard}`;
}
function getUtapiMetricKey(prefix, metric) {
return `${prefix}:events:${metric.uuid}`;
}
function getShardMasterKey(prefix) {
return `${prefix}:shard:master`;
}
function getAccountSizeCounterKey(prefix, account) {
return `${prefix}:counters:account:${account}:size`;
}
function getAccountSizeCounterBaseKey(prefix, account) {
return `${prefix}:counters:account:${account}:size:base`;
}
module.exports = {
getShardKey,
getUtapiMetricKey,
getShardMasterKey,
getAccountSizeCounterKey,
getAccountSizeCounterBaseKey,
};

329
libV2/client/index.js Normal file
View File

@ -0,0 +1,329 @@
const { callbackify } = require('util');
const { Transform } = require('stream');
const uuid = require('uuid');
const needle = require('needle');
// These modules are added via the `level-mem` package rather than individually
/* eslint-disable import/no-extraneous-dependencies */
const levelup = require('levelup');
const memdown = require('memdown');
const encode = require('encoding-down');
/* eslint-enable import/no-extraneous-dependencies */
const { UtapiMetric } = require('../models');
const {
LoggerContext,
logEventFilter,
asyncOrCallback,
buildFilterChain,
} = require('../utils');
const moduleLogger = new LoggerContext({
module: 'client',
});
class Chunker extends Transform {
constructor(options) {
super({ objectMode: true, ...options });
this._chunkSize = (options && options.chunkSize) || 100;
this._currentChunk = [];
}
_transform(chunk, encoding, callback) {
this._currentChunk.push(chunk);
if (this._currentChunk.length >= this._chunkSize) {
this.push(this._currentChunk);
this._currentChunk = [];
}
callback();
}
_flush(callback) {
if (this._currentChunk.length) {
this.push(this._currentChunk);
}
callback();
}
}
class Uploader extends Transform {
constructor(options) {
super({ objectMode: true, ...options });
this._ingest = options.ingest;
}
_transform(chunk, encoding, callback) {
this._ingest(chunk.map(i => new UtapiMetric(i.value)))
.then(() => {
this.push({
success: true,
keys: chunk.map(i => i.key),
});
callback();
},
error => {
this.push({
success: false,
keys: [],
});
moduleLogger.error('error uploading metrics from retry cache', { error });
callback();
});
}
}
class UtapiClient {
constructor(config) {
this._host = (config && config.host) || 'localhost';
this._port = (config && config.port) || '8100';
this._tls = (config && config.tls) || {};
this._transport = (config && config.tls) ? 'https' : 'http';
this._logger = (config && config.logger) || moduleLogger;
this._maxCachedMetrics = (config && config.maxCachedMetrics) || 200000; // roughly 100MB
this._numCachedMetrics = 0;
this._disableRetryCache = config && config.disableRetryCache;
this._retryCache = this._disableRetryCache
? null
: levelup(encode(memdown(), { valueEncoding: 'json' }));
this._drainTimer = null;
this._drainCanSchedule = true;
this._drainDelay = (config && config.drainDelay) || 30000;
this._suppressedEventFields = (config && config.suppressedEventFields) || null;
const eventFilters = (config && config.filter) || {};
this._shouldPushMetric = buildFilterChain(eventFilters);
if (Object.keys(eventFilters).length !== 0) {
logEventFilter((...args) => moduleLogger.info(...args), 'utapi event filter enabled', eventFilters);
}
}
async join() {
await this._flushRetryCacheToLogs();
this._retryCache.close();
}
async _pushToUtapi(metrics) {
const resp = await needle(
'post',
`${this._transport}://${this._host}:${this._port}/v2/ingest`,
metrics.map(metric => metric.getValue()),
{ json: true, ...this._tls },
);
if (resp.statusCode !== 200) {
throw Error('failed to push metric, server returned non 200 status code',
{ respCode: resp.statusCode, respMessage: resp.statusMessage });
}
}
async _addToRetryCache(metric) {
if (this._numCachedMetrics < this._maxCachedMetrics) {
try {
await this._retryCache.put(metric.uuid, metric.getValue());
this._numCachedMetrics += 1;
await this._scheduleDrain();
return true;
} catch (error) {
this._logger
.error('error adding metric to retry cache', { error });
this._emitMetricLogLine(metric, { reason: 'error' });
}
} else {
this._emitMetricLogLine(metric, { reason: 'overflow' });
}
return false;
}
async _drainRetryCache() {
return new Promise((resolve, reject) => {
let empty = true;
const toRemove = [];
this._retryCache.createReadStream()
.pipe(new Chunker())
.pipe(new Uploader({ ingest: this._pushToUtapi.bind(this) }))
.on('data', res => {
if (res.success) {
toRemove.push(...res.keys);
} else {
empty = false;
}
})
.on('end', () => {
this._retryCache.batch(
toRemove.map(key => ({ type: 'del', key })),
error => {
if (error) {
this._logger.error('error removing events from retry cache', { error });
reject(error);
return;
}
resolve(empty);
},
);
})
.on('error', reject);
});
}
async _drainRetryCachePreflight() {
try {
const resp = await needle(
'get',
`${this._transport}://${this._host}:${this._port}/_/healthcheck`,
this._tls,
);
return resp.statusCode === 200;
} catch (error) {
this._logger
.debug('drain preflight request failed', { error });
return false;
}
}
async _attemptDrain() {
if (await this._drainRetryCachePreflight()) {
let empty = false;
try {
empty = await this._drainRetryCache();
} catch (error) {
this._logger
.error('Error while draining cache', { error });
}
if (!empty) {
await this._scheduleDrain();
}
}
this._drainTimer = null;
}
async _scheduleDrain() {
if (this._drainCanSchedule && !this._drainTimer) {
this._drainTimer = setTimeout(this._attemptDrain.bind(this), this._drainDelay);
}
}
async _disableDrain() {
this._drainCanSchedule = false;
if (this._drainTimer) {
clearTimeout(this._drainTimer);
this._drainTimer = null;
}
}
_emitMetricLogLine(metric, extra) {
this._logger.info('utapi metric recovery log', {
event: metric.getValue(),
utapiRecovery: true,
...(extra || {}),
});
}
async _flushRetryCacheToLogs() {
const toRemove = [];
return new Promise((resolve, reject) => {
this._retryCache.createReadStream()
.on('data', entry => {
this._emitMetricLogLine(entry.value);
toRemove.push(entry.key);
})
.on('end', () => {
this._retryCache.batch(
toRemove.map(key => ({ type: 'del', key })),
error => {
if (error) {
this._logger.error('error removing events from retry cache', { error });
reject(error);
return;
}
resolve();
},
);
})
.on('error', err => reject(err));
});
}
async _pushMetric(data) {
let metric = data instanceof UtapiMetric
? data
: new UtapiMetric(data);
// If this event has been filtered then exit early
if (!this._shouldPushMetric(metric)) {
return;
}
// Assign a uuid if one isn't passed
if (!metric.uuid) {
metric.uuid = uuid.v4();
}
// Assign a timestamp if one isn't passed
if (!metric.timestamp) {
metric.timestamp = new Date().getTime();
}
if (this._suppressedEventFields !== null) {
const filteredData = Object.entries(metric.getValue())
.filter(([key]) => !this._suppressedEventFields.includes(key))
.reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
metric = new UtapiMetric(filteredData);
}
try {
await this._pushToUtapi([metric]);
} catch (error) {
if (!this._disableRetryCache) {
this._logger.error('unable to push metric, adding to retry cache', { error });
if (!await this._addToRetryCache(metric)) {
throw new Error('unable to store metric');
}
} else {
this._logger.debug('unable to push metric. retry cache disabled, not retrying ingestion.', { error });
}
}
}
pushMetric(data, cb) {
if (typeof cb === 'function') {
callbackify(this._pushMetric.bind(this))(data, cb);
return undefined;
}
return this._pushMetric(data);
}
/**
* Get the storageUtilized of a resource
*
* @param {string} level - level of metrics, currently only 'accounts' is supported
* @param {string} resource - id of the resource
* @param {Function|undefined} callback - optional callback
* @returns {Promise|undefined} - return a Promise if no callback is provided, undefined otherwise
*/
getStorage(level, resource, callback) {
if (level !== 'accounts') {
throw new Error('invalid level, only "accounts" is supported');
}
return asyncOrCallback(async () => {
const resp = await needle(
'get',
`${this._transport}://${this._host}:${this._port}/v2/storage/${level}/${resource}`,
this._tls,
);
if (resp.statusCode !== 200) {
throw new Error(`unable to retrieve metrics: ${resp.statusMessage}`);
}
return resp.body;
}, callback);
}
}
module.exports = UtapiClient;

View File

@ -0,0 +1,64 @@
{
"host": "127.0.0.1",
"port": 8100,
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"redis": {
"host": "127.0.0.1",
"port": 6379
},
"localCache": {
"host": "127.0.0.1",
"port": 6379
},
"warp10": {
"host": "127.0.0.1",
"port": 4802,
"nodeId": "single_node",
"requestTimeout": 60000,
"connectTimeout": 60000
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"cacheBackend": "memory",
"development": false,
"nodeId": "single_node",
"ingestionSchedule": "*/5 * * * * *",
"ingestionShardSize": 10,
"ingestionLagSeconds": 30,
"checkpointSchedule": "*/30 * * * * *",
"snapshotSchedule": "5 0 * * * *",
"repairSchedule": "0 */5 * * * *",
"reindexSchedule": "0 0 0 * * Sun",
"diskUsageSchedule": "0 */15 * * * *",
"bucketd": [ "localhost:9000" ],
"reindex": {
"enabled": true,
"schedule": "0 0 0 * * 6"
},
"diskUsage": {
"retentionDays": 45,
"expirationEnabled": false
},
"serviceUser": {
"arn": "arn:aws:iam::000000000000:user/scality-internal/service-utapi-user",
"enabled": false
},
"filter": {
"allow": {},
"deny": {}
},
"metrics" : {
"enabled": false,
"host": "localhost",
"ingestPort": 10902,
"checkpointPort": 10903,
"snapshotPort": 10904,
"diskUsagePort": 10905,
"reindexPort": 10906,
"repairPort": 10907
}
}

414
libV2/config/index.js Normal file
View File

@ -0,0 +1,414 @@
const fs = require('fs');
const path = require('path');
const Joi = require('@hapi/joi');
const assert = require('assert');
const defaults = require('./defaults.json');
const werelogs = require('werelogs');
const {
truthy, envNamespace, allowedFilterFields, allowedFilterStates,
} = require('../constants');
const configSchema = require('./schema');
// We need to require the specific file rather than the parent module to avoid a circular require
const { parseDiskSizeSpec } = require('../utils/disk');
function _splitTrim(char, text) {
return text.split(char).map(v => v.trim());
}
function _splitServer(text) {
assert.notStrictEqual(text.indexOf(':'), -1);
const [host, port] = _splitTrim(':', text);
return {
host,
port: Number.parseInt(port, 10),
};
}
function _splitNode(text) {
assert.notStrictEqual(text.indexOf('='), -1);
const [nodeId, hostname] = _splitTrim('=', text);
return {
nodeId,
..._splitServer(hostname),
};
}
const _typeCasts = {
bool: val => truthy.has(val.toLowerCase()),
int: val => parseInt(val, 10),
list: val => _splitTrim(',', val),
serverList: val => _splitTrim(',', val).map(_splitServer),
nodeList: val => _splitTrim(',', val).map(_splitNode),
diskSize: parseDiskSizeSpec,
};
function _definedInEnv(key) {
return process.env[`${envNamespace}_${key}`] !== undefined;
}
function _loadFromEnv(key, defaultValue, type) {
const envKey = `${envNamespace}_${key}`;
const value = process.env[envKey];
if (value !== undefined) {
if (type !== undefined) {
return type(value);
}
return value;
}
return defaultValue;
}
const defaultConfigPath = path.join(__dirname, '../../config.json');
class Config {
/**
* Returns a new Config instance merging the loaded config with the provided values.
* Passed values override loaded ones recursively.
*
* @param {object} overrides - an object using the same structure as the config file
* @returns {Config} - New Config instance
*/
constructor(overrides) {
this._basePath = path.join(__dirname, '../../');
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
this.host = undefined;
this.port = undefined;
this.healthChecks = undefined;
this.logging = { level: 'debug', dumpLevel: 'error' };
this.redis = undefined;
this.warp10 = undefined;
// read config automatically
const loadedConfig = this._loadConfig();
let parsedConfig = this._parseConfig(loadedConfig);
if (typeof overrides === 'object') {
parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
}
Object.assign(this, parsedConfig);
werelogs.configure({
level: Config.logging.level,
dump: Config.logging.dumpLevel,
});
}
static _readFile(path, encoding = 'utf-8') {
try {
return fs.readFileSync(path, { encoding });
} catch (error) {
// eslint-disable-next-line no-console
console.error({ message: `error reading file at ${path}`, error });
throw error;
}
}
static _readJSON(path) {
const data = Config._readFile(path);
try {
return JSON.parse(data);
} catch (error) {
// eslint-disable-next-line no-console
console.error({ message: `error parsing JSON from file at ${path}`, error });
throw error;
}
}
_loadDefaults() {
return defaults;
}
_loadUserConfig() {
return Joi.attempt(
Config._readJSON(this._configPath),
configSchema,
'invalid Utapi config',
);
}
_recursiveUpdateArray(parent, child) {
const ret = [];
for (let i = 0; i < Math.max(parent.length, child.length); i += 1) {
ret[i] = this._recursiveUpdate(parent[i], child[i]);
}
return ret;
}
_recursiveUpdateObject(parent, child) {
return Array.from(
new Set([
...Object.keys(parent),
...Object.keys(child)],
// eslint-disable-next-line function-paren-newline
))
.reduce((ret, key) => {
// eslint-disable-next-line no-param-reassign
ret[key] = this._recursiveUpdate(parent[key], child[key]);
return ret;
}, {});
}
/**
* Given two nested Object/Array combos, walk each and return a new object
* with values from child overwriting parent.
* @param {*} parent - Initial value
* @param {*} child - New value
* @returns {*} - Merged value
*/
_recursiveUpdate(parent, child) {
// If no parent value use the child
if (parent === undefined) {
return child;
}
// If no child value use the parent
if (child === undefined) {
return parent;
}
if (Array.isArray(parent) && Array.isArray(child)) {
return this._recursiveUpdateArray(parent, child);
}
if (typeof parent === 'object' && typeof child === 'object') {
return this._recursiveUpdateObject(parent, child);
}
return child;
}
_loadConfig() {
const defaultConf = this._loadDefaults();
const userConf = this._loadUserConfig();
return this._recursiveUpdateObject(defaultConf, userConf);
}
static _parseRedisConfig(prefix, config) {
const redisConf = {
retry: config.retry,
};
if (config.sentinels || _definedInEnv(`${prefix}_SENTINELS`)) {
redisConf.name = _loadFromEnv(`${prefix}_NAME`, config.name);
redisConf.sentinels = _loadFromEnv(
`${prefix}_SENTINELS`,
config.sentinels,
_typeCasts.serverList,
);
redisConf.sentinelPassword = _loadFromEnv(
`${prefix}_SENTINEL_PASSWORD`,
config.sentinelPassword,
);
redisConf.password = _loadFromEnv(
`${prefix}_PASSWORD`,
config.password,
);
} else {
redisConf.host = _loadFromEnv(
`${prefix}_HOST`,
config.host,
);
redisConf.port = _loadFromEnv(
`${prefix}_PORT`,
config.port,
_typeCasts.int,
);
redisConf.password = _loadFromEnv(
`${prefix}_PASSWORD`,
config.password,
);
}
return redisConf;
}
_loadCertificates(config) {
const { key, cert, ca } = config;
const keyPath = path.isAbsolute(key) ? key : path.join(this._basePath, key);
const certPath = path.isAbsolute(cert) ? cert : path.join(this._basePath, cert);
const certs = {
cert: Config._readFile(certPath, 'ascii'),
key: Config._readFile(keyPath, 'ascii'),
};
if (ca) {
const caPath = path.isAbsolute(ca) ? ca : path.join(this._basePath, ca);
certs.ca = Config._readFile(caPath, 'ascii');
}
return certs;
}
static _parseResourceFilters(config) {
const resourceFilters = {};
allowedFilterFields.forEach(
field => allowedFilterStates.forEach(
state => {
const configResources = (config[state] && config[state][field]) || null;
const envVar = `FILTER_${field.toUpperCase()}_${state.toUpperCase()}`;
const resources = _loadFromEnv(envVar, configResources, _typeCasts.list);
if (resources) {
if (resourceFilters[field]) {
throw new Error('You can not define both an allow and a deny list for an event field.');
}
resourceFilters[field] = { [state]: new Set(resources) };
}
},
),
);
return resourceFilters;
}
_parseConfig(config) {
const parsedConfig = {};
parsedConfig.development = _loadFromEnv('DEV_MODE', config.development, _typeCasts.bool);
parsedConfig.nodeId = _loadFromEnv('NODE_ID', config.nodeId);
parsedConfig.host = _loadFromEnv('HOST', config.host);
parsedConfig.port = _loadFromEnv('PORT', config.port, _typeCasts.int);
const healthCheckFromEnv = _loadFromEnv(
'ALLOW_HEALTHCHECK',
[],
_typeCasts.list,
);
parsedConfig.healthChecks = {
allowFrom: healthCheckFromEnv.concat(config.healthChecks.allowFrom),
};
const certPaths = {
cert: _loadFromEnv('TLS_CERT', config.certFilePaths.cert),
key: _loadFromEnv('TLS_KEY', config.certFilePaths.key),
ca: _loadFromEnv('TLS_CA', config.certFilePaths.ca),
};
if (certPaths.key && certPaths.cert) {
parsedConfig.tls = this._loadCertificates(certPaths);
} else if (certPaths.key || certPaths.cert) {
throw new Error('bad config: both certFilePaths.key and certFilePaths.cert must be defined');
}
parsedConfig.redis = Config._parseRedisConfig('REDIS', config.redis);
parsedConfig.cache = Config._parseRedisConfig('REDIS_CACHE', config.localCache);
parsedConfig.cache.backend = _loadFromEnv('CACHE_BACKEND', config.cacheBackend);
const warp10Conf = {
readToken: _loadFromEnv('WARP10_READ_TOKEN', config.warp10.readToken),
writeToken: _loadFromEnv('WARP10_WRITE_TOKEN', config.warp10.writeToken),
requestTimeout: _loadFromEnv('WARP10_REQUEST_TIMEOUT', config.warp10.requestTimeout, _typeCasts.int),
connectTimeout: _loadFromEnv('WARP10_CONNECT_TIMEOUT', config.warp10.connectTimeout, _typeCasts.int),
};
if (Array.isArray(config.warp10.hosts) || _definedInEnv('WARP10_HOSTS')) {
warp10Conf.hosts = _loadFromEnv('WARP10_HOSTS', config.warp10.hosts, _typeCasts.nodeList);
} else {
warp10Conf.hosts = [{
host: _loadFromEnv('WARP10_HOST', config.warp10.host),
port: _loadFromEnv('WARP10_PORT', config.warp10.port, _typeCasts.int),
nodeId: _loadFromEnv('WARP10_NODE_ID', config.warp10.nodeId),
}];
}
parsedConfig.warp10 = warp10Conf;
parsedConfig.logging = {
level: parsedConfig.development
? 'debug'
: _loadFromEnv('LOG_LEVEL', config.log.logLevel),
dumpLevel: _loadFromEnv(
'LOG_DUMP_LEVEL',
config.log.dumpLevel,
),
};
parsedConfig.ingestionSchedule = _loadFromEnv('INGESTION_SCHEDULE', config.ingestionSchedule);
parsedConfig.checkpointSchedule = _loadFromEnv('CHECKPOINT_SCHEDULE', config.checkpointSchedule);
parsedConfig.snapshotSchedule = _loadFromEnv('SNAPSHOT_SCHEDULE', config.snapshotSchedule);
parsedConfig.repairSchedule = _loadFromEnv('REPAIR_SCHEDULE', config.repairSchedule);
parsedConfig.reindexSchedule = _loadFromEnv('REINDEX_SCHEDULE', config.reindexSchedule);
parsedConfig.diskUsageSchedule = _loadFromEnv('DISK_USAGE_SCHEDULE', config.diskUsageSchedule);
parsedConfig.ingestionLagSeconds = _loadFromEnv(
'INGESTION_LAG_SECONDS',
config.ingestionLagSeconds,
_typeCasts.int,
);
parsedConfig.ingestionShardSize = _loadFromEnv(
'INGESTION_SHARD_SIZE',
config.ingestionShardSize,
_typeCasts.int,
);
const diskUsage = {
path: _loadFromEnv('DISK_USAGE_PATH', (config.diskUsage || {}).path),
hardLimit: _loadFromEnv('DISK_USAGE_HARD_LIMIT', (config.diskUsage || {}).hardLimit),
retentionDays: _loadFromEnv(
'METRIC_RETENTION_PERIOD',
(config.diskUsage || {}).retentionDays, _typeCasts.int,
),
expirationEnabled: _loadFromEnv(
'METRIC_EXPIRATION_ENABLED',
(config.diskUsage || {}).expirationEnabled, _typeCasts.bool,
),
};
if (diskUsage.hardLimit !== undefined) {
diskUsage.hardLimit = parseDiskSizeSpec(diskUsage.hardLimit);
}
if (!diskUsage.path && diskUsage.hardLimit !== undefined) {
throw Error('You must specify diskUsage.path to monitor for disk usage');
} else if (diskUsage.path && diskUsage.hardLimit === undefined) {
throw Error('diskUsage.hardLimit must be specified');
} else if (diskUsage.expirationEnabled && diskUsage.retentionDays === undefined) {
throw Error('diskUsage.retentionDays must be specified');
}
diskUsage.enabled = diskUsage.path !== undefined;
parsedConfig.diskUsage = diskUsage;
parsedConfig.vaultd = {
host: _loadFromEnv('VAULT_HOST', config.vaultd.host),
port: _loadFromEnv('VAULT_PORT', config.vaultd.port),
};
parsedConfig.bucketd = _loadFromEnv('BUCKETD_BOOTSTRAP', config.bucketd, _typeCasts.serverList);
parsedConfig.serviceUser = {
arn: _loadFromEnv('SERVICE_USER_ARN', config.serviceUser.arn),
enabled: _loadFromEnv('SERVICE_USER_ENABLED', config.serviceUser.enabled, _typeCasts.bool),
};
parsedConfig.filter = Config._parseResourceFilters(config.filter);
parsedConfig.metrics = {
enabled: _loadFromEnv('METRICS_ENABLED', config.metrics.enabled, _typeCasts.bool),
host: _loadFromEnv('METRICS_HOST', config.metrics.host),
ingestPort: _loadFromEnv('METRICS_PORT_INGEST', config.metrics.ingestPort, _typeCasts.int),
checkpointPort: _loadFromEnv('METRICS_PORT_CHECKPOINT', config.metrics.checkpointPort, _typeCasts.int),
snapshotPort: _loadFromEnv('METRICS_PORT_SNAPSHOT', config.metrics.snapshotPort, _typeCasts.int),
diskUsagePort: _loadFromEnv('METRICS_PORT_DISK_USAGE', config.metrics.diskUsagePort, _typeCasts.int),
reindexPort: _loadFromEnv('METRICS_PORT_REINDEX', config.metrics.reindexPort, _typeCasts.int),
repairPort: _loadFromEnv('METRICS_PORT_REPAIR', config.metrics.repairPort, _typeCasts.int),
};
return parsedConfig;
}
/**
* Returns a new Config instance merging the loaded config with the provided values.
* Passed values override loaded ones recursively.
*
* @param {object} newConfig - an object using the same structure as the config file
* @returns {Config} - New Config instance
*/
static merge(newConfig) {
return new Config(newConfig);
}
}
module.exports = new Config();

130
libV2/config/schema.js Normal file
View File

@ -0,0 +1,130 @@
const Joi = require('@hapi/joi');
const { allowedFilterFields, allowedFilterStates } = require('../constants');
const backoffSchema = Joi.object({
min: Joi.number(),
max: Joi.number(),
deadline: Joi.number(),
jitter: Joi.number(),
factor: Joi.number(),
});
const redisRetrySchema = Joi.object({
connectBackoff: backoffSchema,
});
const redisServerSchema = Joi.object({
host: Joi.string(),
port: Joi.number(),
password: Joi.string().allow(''),
retry: redisRetrySchema,
});
const redisSentinelSchema = Joi.object({
name: Joi.string().default('utapi'),
sentinels: Joi.array().items(Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
})),
password: Joi.string().default('').allow(''),
sentinelPassword: Joi.string().default('').allow(''),
retry: redisRetrySchema,
});
const warp10SingleHost = Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
readToken: Joi.string(),
writeToken: Joi.string(),
});
const warp10MultiHost = Joi.object({
hosts: Joi.array().items(Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
nodeId: Joi.string(),
})),
readToken: Joi.string(),
writeToken: Joi.string(),
});
const tlsSchema = Joi.object({
key: Joi.string(),
cert: Joi.string(),
ca: Joi.string(),
});
const schema = Joi.object({
host: Joi.string(),
port: Joi.number().port(),
certFilePaths: tlsSchema.default({}),
workers: Joi.number(),
development: Joi.boolean(),
log: Joi.object({
logLevel: Joi.alternatives()
.try('error', 'warn', 'info', 'debug', 'trace'),
dumpLevel: Joi.alternatives()
.try('error', 'warn', 'info', 'debug', 'trace'),
}),
redis: Joi.alternatives().try(redisServerSchema, redisSentinelSchema),
localCache: Joi.alternatives().try(redisServerSchema, redisSentinelSchema),
warp10: Joi.alternatives().try(warp10SingleHost, warp10MultiHost),
healthChecks: Joi.object({
allowFrom: Joi.array().items(Joi.string()),
}),
vaultd: Joi.object({
host: Joi.string().hostname(),
port: Joi.number().port(),
}),
reindex: Joi.object({
enabled: Joi.boolean(),
schedule: Joi.string(),
}),
bucketd: Joi.array().items(Joi.string()),
expireMetrics: Joi.boolean(),
expireMetricsTTL: Joi.number(),
cacheBackend: Joi.string().valid('memory', 'redis'),
nodeId: Joi.string(),
ingestionSchedule: Joi.string(),
ingestionShardSize: Joi.number().greater(0),
ingestionLagSeconds: Joi.number().greater(0),
checkpointSchedule: Joi.string(),
snapshotSchedule: Joi.string(),
repairSchedule: Joi.string(),
reindexSchedule: Joi.string(),
diskUsageSchedule: Joi.string(),
diskUsage: Joi.object({
path: Joi.string(),
retentionDays: Joi.number().greater(0),
expirationEnabled: Joi.boolean(),
hardLimit: Joi.string(),
}),
serviceUser: Joi.object({
arn: Joi.string(),
enabled: Joi.boolean(),
}),
filter: Joi.object(allowedFilterStates.reduce(
(filterObj, state) => {
filterObj[state] = allowedFilterFields.reduce(
(stateObj, field) => {
stateObj[field] = Joi.array().items(Joi.string());
return stateObj;
}, {},
);
return filterObj;
}, {},
)),
metrics: {
enabled: Joi.boolean(),
host: Joi.string(),
ingestPort: Joi.number().port(),
checkpointPort: Joi.number().port(),
snapshotPort: Joi.number().port(),
diskUsagePort: Joi.number().port(),
reindexPort: Joi.number().port(),
repairPort: Joi.number().port(),
},
});
module.exports = schema;

133
libV2/constants.js Normal file
View File

@ -0,0 +1,133 @@
const truthy = new Set([
'true',
'on',
'yes',
'y',
't',
'enabled',
'enable',
'1',
]);
const constants = {
envNamespace: 'UTAPI',
operations: [
'abortMultipartUpload',
'completeMultipartUpload',
'copyObject',
'createBucket',
'deleteBucket',
'deleteBucketCors',
'deleteBucketEncryption',
'deleteBucketLifecycle',
'deleteBucketReplication',
'deleteBucketTagging',
'deleteBucketWebsite',
'deleteObject',
'deleteObjectTagging',
'getBucketAcl',
'getBucketCors',
'getBucketEncryption',
'getBucketLifecycle',
'getBucketLocation',
'getBucketNotification',
'getBucketObjectLock',
'getBucketReplication',
'getBucketVersioning',
'getBucketTagging',
'getBucketWebsite',
'getObject',
'getObjectAcl',
'getObjectLegalHold',
'getObjectRetention',
'getObjectTagging',
'headBucket',
'headObject',
'initiateMultipartUpload',
'listBucket',
'listMultipartUploadParts',
'listMultipartUploads',
'multiObjectDelete',
'putBucketAcl',
'putBucketCors',
'putBucketEncryption',
'putBucketLifecycle',
'putBucketNotification',
'putBucketObjectLock',
'putBucketReplication',
'putBucketVersioning',
'putBucketTagging',
'putBucketWebsite',
'putDeleteMarkerObject',
'putObject',
'putObjectAcl',
'putObjectLegalHold',
'putObjectRetention',
'putObjectTagging',
'replicateDelete',
'replicateObject',
'replicateTags',
'uploadPart',
'uploadPartCopy',
],
eventFieldsToWarp10: {
operationId: 'op',
uuid: 'id',
bucket: 'bck',
object: 'obj',
versionId: 'vid',
account: 'acc',
user: 'usr',
location: 'loc',
objectDelta: 'objD',
sizeDelta: 'sizeD',
incomingBytes: 'inB',
outgoingBytes: 'outB',
operations: 'ops',
},
indexedEventFields: [
'acc',
'usr',
'bck',
],
serviceToWarp10Label: {
locations: 'loc',
accounts: 'acc',
users: 'usr',
buckets: 'bck',
},
warp10EventType: ':m:utapi/event:',
warp10RecordType: ':m:utapi/record:',
truthy,
checkpointLagSecs: 300,
snapshotLagSecs: 900,
repairLagSecs: 5,
counterBaseValueExpiration: 86400, // 24hrs
keyVersionSplitter: String.fromCharCode(0),
migrationChunksize: 500,
migrationOpTranslationMap: {
listBucketMultipartUploads: 'listMultipartUploads',
},
ingestionOpTranslationMap: {
putDeleteMarkerObject: 'deleteObject',
},
expirationChunkDuration: 900000000, // 15 minutes in microseconds
allowedFilterFields: [
'operationId',
'location',
'account',
'user',
'bucket',
],
allowedFilterStates: ['allow', 'deny'],
};
constants.operationToResponse = constants.operations
.reduce((prev, opId) => {
prev[opId] = `s3:${opId.charAt(0).toUpperCase() + opId.slice(1)}`;
return prev;
}, {});
module.exports = constants;

42
libV2/errors/errors.json Normal file
View File

@ -0,0 +1,42 @@
{
"AccessDenied": {
"code": 403,
"description": "Access Denied"
},
"InternalError": {
"code": 500,
"description": "The server encountered an internal error. Please retry the request."
},
"InvalidUri": {
"code": 400,
"description": "The requested URI does not represent any resource on the server."
},
"NotImplemented": {
"code": 501,
"description": "This operation has yet to be implemented."
},
"OperationIdNotFound": {
"code": 404,
"description": "The specified operation Id is not found."
},
"OperationTimedOut": {
"code": 500,
"description": "The operation could not be completed within the permitted time."
},
"ServiceUnavailable": {
"code": 503,
"description": "The server (or an internal component) is currently unavailable to receive requests. Please retry your request."
},
"InvalidRequest": {
"code": 400,
"description": "Request validation error"
},
"FailedMigration": {
"code": 1000,
"description": "failed to migrate metrics"
},
"FailedCorrection": {
"code": 1001,
"description": "failed to correct migrated metric"
}
}

27
libV2/errors/index.js Normal file
View File

@ -0,0 +1,27 @@
/* eslint-disable no-param-reassign */
const utapiErrors = require('./errors.json');
class UtapiError extends Error {
constructor(type, code, desc) {
super(type);
this.code = code;
this.description = desc;
this[type] = true;
this.utapiError = true;
}
customizeDescription(description) {
return new UtapiError(this.message, this.code, description);
}
}
function errorsGen() {
return Object.keys(utapiErrors)
.reduce((errors, name) => {
errors[name] = new UtapiError(name, utapiErrors[name].code,
utapiErrors[name].description);
return errors;
}, {});
}
module.exports = errorsGen();

16
libV2/metadata/client.js Normal file
View File

@ -0,0 +1,16 @@
const bucketclient = require('bucketclient');
const { BucketClientInterface } = require('arsenal').storage.metadata.bucketclient;
const config = require('../config');
const { LoggerContext } = require('../utils');
const moduleLogger = new LoggerContext({
module: 'metadata.client',
});
const params = {
bucketdBootstrap: config.bucketd,
https: config.tls,
};
module.exports = new BucketClientInterface(params, bucketclient, moduleLogger);

141
libV2/metadata/index.js Normal file
View File

@ -0,0 +1,141 @@
/* eslint-disable no-restricted-syntax */
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('./client');
const { LoggerContext, logger } = require('../utils');
const { keyVersionSplitter } = require('../constants');
const { usersBucket, splitter: mdKeySplitter, mpuBucketPrefix } = arsenal.constants;
const { BucketInfo } = arsenal.models;
const moduleLogger = new LoggerContext({
module: 'metadata.client',
});
const ebConfig = {
times: 10,
interval: retryCount => 50 * (2 ** retryCount),
};
const PAGE_SIZE = 1000;
async function _listingWrapper(bucket, params) {
return new Promise(
(resolve, reject) => metadata.listObject(
bucket,
params,
logger.newRequestLogger(),
(err, res) => {
if (err) {
reject(err);
return;
}
resolve(res);
},
),
);
}
function _listObject(bucket, prefix, hydrateFunc) {
const listingParams = { prefix, maxKeys: PAGE_SIZE, listingType: 'Basic' };
let gt;
return {
async* [Symbol.asyncIterator]() {
while (true) {
let res;
try {
// eslint-disable-next-line no-await-in-loop
res = await async.retryable(ebConfig, _listingWrapper)(bucket, { ...listingParams, gt });
} catch (error) {
moduleLogger.error('Error during listing', { error });
throw error;
}
for (const item of res) {
yield hydrateFunc ? hydrateFunc(item) : item;
}
if (res.length !== PAGE_SIZE) {
break;
}
gt = res[res.length - 1].key;
}
},
};
}
function listObjects(bucket) {
return _listObject(bucket, '', data => {
const { key, value } = data;
const [name, version] = key.split(keyVersionSplitter);
return {
name,
version,
value: JSON.parse(value),
};
});
}
function listBuckets() {
return _listObject(usersBucket, '', data => {
const { key, value } = data;
const [account, name] = key.split(mdKeySplitter);
return {
account,
name,
value: JSON.parse(value),
};
});
}
async function listMPUs(bucket) {
const mpuBucket = `${mpuBucketPrefix}${bucket}`;
return _listObject(mpuBucket, '', data => {
const { key, value } = data;
const [account, name] = key.split(mdKeySplitter);
return {
account,
name,
value: JSON.parse(value),
};
});
}
function bucketExists(bucket) {
return new Promise((resolve, reject) => metadata.getBucketAttributes(
bucket,
logger.newRequestLogger(),
err => {
if (err && (!err.is || !err.is.NoSuchBucket)) {
reject(err);
return;
}
resolve(err === null);
},
));
}
function getBucket(bucket) {
return new Promise((resolve, reject) => {
metadata.getBucketAttributes(
bucket,
logger.newRequestLogger(), (err, data) => {
if (err) {
reject(err);
return;
}
resolve(BucketInfo.fromObj(data));
},
);
});
}
module.exports = {
listBuckets,
listObjects,
listMPUs,
bucketExists,
getBucket,
};

71
libV2/models/Base.js Normal file
View File

@ -0,0 +1,71 @@
const Joi = require('@hapi/joi');
class BaseModel {
constructor(data) {
this._data = data || {};
}
_get(key, defaultValue) {
const val = this._data[key];
return val === undefined ? defaultValue : val;
}
_set(key, value) {
this._data[key] = value;
return this;
}
getValue() {
return this._data;
}
}
/*
Builds a flexible data container with automatic field checking using `hapi/joi`.
Getters and Setters are automatically created for fields so they can be accessed using `.` notation.
@param name - Name for the model. Used as the internal js class name.
@param schema - An object of joi schemas. Keys are used as field names and
the schemas are used to typecheck values.
@returns - A subclass of BaseModel
*/
function buildModel(name, schema) {
class Model extends BaseModel {
constructor(data) {
if (data !== undefined) {
Object.entries(data).forEach(([key, value]) => {
if (schema[key]) {
Joi.attempt(value, schema[key]);
}
});
}
super(data);
}
_set(key, value) {
if (schema[key]) {
Joi.attempt(value, schema[key]);
}
return super._set(key, value);
}
}
Object.defineProperty(Model, 'name', { value: name });
Object.keys(schema).forEach(key =>
Object.defineProperty(Model.prototype, key, {
// `function` is used rather than `=>` to work around context problem with `this`
/* eslint-disable func-names, object-shorthand */
get: function () {
return this._get(key);
},
set: function (value) {
this._set(key, value);
},
/* eslint-enable func-names, object-shorthand */
}));
return Model;
}
module.exports = {
BaseModel,
buildModel,
};

View File

@ -0,0 +1,75 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const { apiOperations } = require('../server/spec');
const ResponseContainer = require('./ResponseContainer');
const { httpRequestDurationSeconds } = require('../server/metrics');
const apiTags = Object.keys(apiOperations);
const apiOperationIds = Object.values(apiOperations)
.reduce((ids, ops) => {
ops.forEach(id => ids.add(id));
return ids;
}, new Set());
const contextSchema = {
host: Joi.string(),
protocol: Joi.string().valid('http', 'https'),
url: Joi.string().uri({ scheme: ['http', 'https'] }),
operationId: Joi.string().valid(...apiOperationIds),
tag: Joi.string().valid(...apiTags),
encrypted: Joi.boolean(),
logger: Joi.any(),
request: Joi.any(),
results: Joi.any(),
requestTimer: Joi.any(),
};
const RequestContextModel = buildModel('RequestContext', contextSchema);
class RequestContext extends RequestContextModel {
constructor(request) {
const host = request.headers.host || 'localhost';
const protocol = RequestContext._determineProtocol(request);
const encrypted = protocol === 'https';
const url = `${protocol}://${host}${request.url}`;
const tag = request.swagger.operation['x-router-controller'];
const { operationId } = request.swagger.operation;
const requestTimer = tag !== 'internal'
? httpRequestDurationSeconds.startTimer({ action: operationId })
: null;
request.logger.logger.addDefaultFields({
tag,
operationId,
service: 'utapi',
});
super({
request,
host,
url,
protocol,
operationId,
tag,
encrypted,
results: new ResponseContainer(),
logger: request.logger,
requestTimer,
});
}
static _determineProtocol(request) {
// Respect the X-Forwarded-Proto header if set
if (request.headers['x-forwarded-proto']) {
return request.headers['x-forwarded-proto'] === 'https' ? 'https' : 'http';
}
// Use req.connection.encrypted for fallback
return request.connection.encrypted !== undefined
&& request.connection.encrypted ? 'https' : 'http';
}
}
module.exports = RequestContext;

View File

@ -0,0 +1,31 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const orNull = schema => Joi.alternatives(schema, Joi.any().valid(null));
const responseSchema = {
body: Joi.any(),
statusCode: orNull(Joi.number().min(100).max(599)),
redirect: orNull(Joi.string().uri({ scheme: ['http', 'https'], allowRelative: true })),
};
const ResponseContainerModel = buildModel('RequestContext', responseSchema);
class ResponseContainer extends ResponseContainerModel {
constructor() {
super({ body: null, statusCode: null, redirect: null });
}
hasBody() {
return this.body !== null;
}
hasStatusCode() {
return this.statusCode !== null;
}
hasRedirect() {
return this.redirect !== null;
}
}
module.exports = ResponseContainer;

View File

@ -0,0 +1,22 @@
const Joi = require('@hapi/joi');
const { operations } = require('../constants');
const { buildModel } = require('./Base');
const metricSchema = {
operationId: Joi.string().valid(...operations),
uuid: Joi.string(),
timestamp: Joi.number(),
bucket: Joi.string(),
object: Joi.string(),
versionId: Joi.string(),
account: Joi.string(),
user: Joi.string(),
location: Joi.string(),
objectDelta: Joi.number(),
sizeDelta: Joi.number(),
incomingBytes: Joi.number(),
outgoingBytes: Joi.number(),
};
module.exports = buildModel('UtapiMetric', metricSchema);

View File

@ -0,0 +1,13 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const recordSchema = {
timestamp: Joi.number(),
objectDelta: Joi.number(),
sizeDelta: Joi.number(),
incomingBytes: Joi.number(),
outgoingBytes: Joi.number(),
operations: Joi.object(),
};
module.exports = buildModel('UtapiRecord', recordSchema);

13
libV2/models/index.js Normal file
View File

@ -0,0 +1,13 @@
const BaseModel = require('./Base');
const UtapiMetric = require('./UtapiMetric');
const UtapiRecord = require('./UtapiRecord');
const RequestContext = require('./RequestContext');
const ResponseContainer = require('./ResponseContainer');
module.exports = {
BaseModel,
UtapiMetric,
RequestContext,
ResponseContainer,
UtapiRecord,
};

45
libV2/process.js Normal file
View File

@ -0,0 +1,45 @@
const { EventEmitter } = require('events');
const os = require('os');
const { Command } = require('commander');
const { logger } = require('./utils');
class Process extends EventEmitter {
constructor(...options) {
super(...options);
this._program = null;
}
async setup() {
const cleanUpFunc = this.join.bind(this);
['SIGINT', 'SIGQUIT', 'SIGTERM'].forEach(eventName => {
process.on(eventName, cleanUpFunc);
});
process.on('uncaughtException', error => {
logger.error('uncaught exception',
{ error, stack: error.stack.split(os.EOL) });
cleanUpFunc();
});
this._program = new Command();
await this._setup();
}
async start() {
this._program.parse(process.argv);
await this._start();
}
async join() {
this.emit('exit');
await this._join();
}
/* eslint-disable class-methods-use-this,no-empty-function */
async _setup() {}
async _start() {}
async _join() {}
/* eslint-enable class-methods-use-this,no-empty-function */
}
module.exports = Process;

211
libV2/redis.js Normal file
View File

@ -0,0 +1,211 @@
const EventEmitter = require('events');
const { callbackify, promisify } = require('util');
const IORedis = require('ioredis');
const { jsutil } = require('arsenal');
const BackOff = require('backo');
const { whilst } = require('async');
const errors = require('./errors');
const { LoggerContext } = require('./utils/log');
const { asyncOrCallback } = require('./utils/func');
const moduleLogger = new LoggerContext({
module: 'redis',
});
const COMMAND_TIMEOUT = 10000;
const CONNECTION_TIMEOUT = 30000;
/**
* Creates a new Redis client instance
* @param {object} conf - redis configuration
* @param {string} conf.host - redis host
* @param {number} conf.port - redis port
* @param {string} [conf.password] - redis password (optional)
* @param {string} [conf.sentinelPassword] - sentinel password (optional)
* @param {Array<Object>} conf.sentinels - sentinels
* @param {Werelogs.Logger} log - Werelogs logger
* @return {Redis} - Redis client instance
*/
class RedisClient extends EventEmitter {
constructor(options) {
super();
this._redisOptions = options;
this._redis = null;
// Controls the use of additional command timeouts
// Only use if connecting to a sentinel cluster
this._useTimeouts = options.sentinels !== undefined;
this._inFlightTimeouts = this._useTimeouts ? new Set() : null;
this._runningRedisProbe = null;
this._isConnected = false;
this._isReady = false;
}
connect(callback) {
this._initClient(false);
if (callback) {
process.nextTick(callback);
}
}
disconnect(callback) {
return asyncOrCallback(async () => {
if (this._useTimeouts) {
Object.values(this._inFlightTimeouts)
.forEach(clearTimeout);
}
if (this._redis !== null) {
await this._redis.quit();
this._redis = null;
}
}, callback);
}
get isReady() {
return this._isConnected && this._isReady;
}
_initClient(startProbe = true) {
moduleLogger.debug('initializing redis client');
if (this._redis !== null) {
this._redis.off('connect', this._onConnect);
this._redis.off('ready', this._onReady);
this._redis.off('error', this._onError);
this._redis.disconnect();
}
this._isConnected = false;
this._isReady = false;
this._redis = new IORedis(this._redisOptions);
this._redis.on('connect', this._onConnect.bind(this));
this._redis.on('ready', this._onReady.bind(this));
this._redis.on('error', this._onError.bind(this));
if (startProbe && this._runningRedisProbe === null) {
this._runningRedisProbe = setInterval(this._probeRedis.bind(this), CONNECTION_TIMEOUT);
}
}
_probeRedis() {
if (this.isReady) {
moduleLogger.debug('redis client is ready, clearing reinitialize interval');
clearInterval(this._runningRedisProbe);
this._runningRedisProbe = null;
} else {
moduleLogger.warn('redis client has failed to become ready, reinitializing');
this._initClient();
}
}
_onConnect() {
this._isConnected = true;
this.emit('connect');
}
_onReady() {
this._isReady = true;
this.emit('ready');
}
_onError(error) {
this._isReady = false;
moduleLogger.error('error connecting to redis', { error });
if (this.listenerCount('error') > 0) {
this.emit('error', error);
}
}
_createCommandTimeout() {
let timer;
let onTimeout;
const cancelTimeout = jsutil.once(() => {
clearTimeout(timer);
this.off('timeout', onTimeout);
this._inFlightTimeouts.delete(timer);
});
const timeout = new Promise((_, reject) => {
timer = setTimeout(this.emit.bind(this, 'timeout'), COMMAND_TIMEOUT);
this._inFlightTimeouts.add(timer);
onTimeout = () => {
moduleLogger.warn('redis command timed out');
cancelTimeout();
this._initClient();
reject(errors.OperationTimedOut);
};
this.once('timeout', onTimeout);
});
return { timeout, cancelTimeout };
}
async _call(asyncFunc) {
const start = Date.now();
const { connectBackoff } = this._redisOptions.retry || {};
const backoff = new BackOff(connectBackoff);
const timeoutMs = (connectBackoff || {}).deadline || 2000;
let retried = false;
return new Promise((resolve, reject) => {
whilst(
next => { // WARNING: test is asynchronous in `async` v3
if (!connectBackoff && !this.isReady) {
moduleLogger.warn('redis not ready and backoff is not configured');
}
process.nextTick(next, null, !!connectBackoff && !this.isReady);
},
next => {
retried = true;
if ((Date.now() - start) > timeoutMs) {
moduleLogger.error('redis still not ready after max wait, giving up', { timeoutMs });
return next(errors.InternalError.customizeDescription(
'redis client is not ready',
));
}
const backoffDurationMs = backoff.duration();
moduleLogger.error('redis not ready, retrying', { backoffDurationMs });
return setTimeout(next, backoffDurationMs);
},
err => {
if (err) {
return reject(err);
}
if (retried) {
moduleLogger.info('redis connection recovered', {
recoveryOverheadMs: Date.now() - start,
});
}
const funcPromise = asyncFunc(this._redis);
if (!this._useTimeouts) {
// If timeouts are disabled simply return the Promise
return resolve(funcPromise);
}
const { timeout, cancelTimeout } = this._createCommandTimeout();
try {
// timeout always rejects so we can just return
return resolve(Promise.race([funcPromise, timeout]));
} finally {
cancelTimeout();
}
},
);
});
}
call(func, callback) {
if (callback !== undefined) {
// If a callback is provided `func` is assumed to also take a callback
// and is converted to a promise using promisify
return callbackify(this._call.bind(this))(promisify(func), callback);
}
return this._call(func);
}
}
module.exports = RedisClient;

View File

@ -0,0 +1,5 @@
const ApiController = require('../controller');
const controller = new ApiController('internal');
module.exports = controller.buildMap();

View File

@ -0,0 +1,6 @@
async function healthcheck(ctx) {
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
}
module.exports = healthcheck;

View File

@ -0,0 +1,14 @@
const { collectDefaultMetrics, register } = require('prom-client');
collectDefaultMetrics({
timeout: 10000,
gcDurationBuckets: [0.001, 0.01, 0.1, 1, 2, 5],
});
async function prometheusMetrics(ctx) {
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
ctx.results.body = await register.metrics();
}
module.exports = prometheusMetrics;

View File

@ -0,0 +1,5 @@
const ApiController = require('../controller');
const controller = new ApiController('metrics');
module.exports = controller.buildMap();

View File

@ -0,0 +1,63 @@
const errors = require('../../../errors');
const { serviceToWarp10Label } = require('../../../constants');
const { clients: warp10Clients } = require('../../../warp10');
const { client: cache } = require('../../../cache');
const { now, iterIfError } = require('../../../utils');
/**
*
* @param {RequestContext} ctx - request context
* @param {object} params - request parameters
* @param {string} params.level - metric level
* @param {string} params.resource - Id of the requested resource
* @returns {Promise<undefined>} -
*/
async function getStorage(ctx, params) {
const { level, resource } = params;
if (level !== 'accounts') {
throw errors.BadRequest
.customizeDescription(`Unsupported level "${level}". Only "accounts" is currently supported`);
}
const [counter, base] = await cache.fetchAccountSizeCounter(resource);
let storageUtilized;
if (base !== null) {
storageUtilized = counter + base;
} else {
const labelName = serviceToWarp10Label[params.level];
const labels = { [labelName]: resource };
const res = await iterIfError(warp10Clients, warp10 => {
const options = {
params: {
end: now(),
labels,
node: warp10.nodeId,
},
macro: 'utapi/getMetricsAt',
};
return warp10.exec(options);
}, error => ctx.logger.error('error while fetching metrics', { error }));
if (res.result.length === 0) {
ctx.logger.error('unable to retrieve metrics', { level, resource });
throw errors.InternalError;
}
const { sizeD: currentSize } = res.result[0];
await cache.updateAccountCounterBase(resource, currentSize);
storageUtilized = currentSize;
}
ctx.results.statusCode = 200;
ctx.results.body = {
storageUtilized: Math.max(storageUtilized, 0),
resource,
level,
};
}
module.exports = getStorage;

View File

@ -0,0 +1,27 @@
const errors = require('../../../errors');
const { UtapiMetric } = require('../../../models');
const { client: cacheClient } = require('../../../cache');
const { convertTimestamp } = require('../../../utils');
const { ingestionOpTranslationMap } = require('../../../constants');
async function ingestMetric(ctx, params) {
let metrics;
try {
metrics = params.body.map(m => new UtapiMetric({
...m,
timestamp: convertTimestamp(m.timestamp),
operationId: ingestionOpTranslationMap[m.operationId] || m.operationId,
}));
} catch (error) {
throw errors.InvalidRequest;
}
try {
await Promise.all(metrics.map(m => cacheClient.pushMetric(m)));
} catch (error) {
throw errors.ServiceUnavailable;
}
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
}
module.exports = ingestMetric;

View File

@ -0,0 +1,113 @@
const errors = require('../../../errors');
const { serviceToWarp10Label, operationToResponse } = require('../../../constants');
const { convertTimestamp, iterIfError } = require('../../../utils');
const { clients: warp10Clients } = require('../../../warp10');
const emptyOperationsResponse = Object.values(operationToResponse)
.reduce((prev, key) => {
prev[key] = 0;
return prev;
}, {});
const metricResponseKeys = {
buckets: 'bucketName',
accounts: 'accountId',
users: 'userId',
service: 'serviceName',
};
function positiveOrZero(value) {
return Math.max(value, 0);
}
async function listMetric(ctx, params) {
const labelName = serviceToWarp10Label[params.level];
const resources = params.body[params.level];
let [start, end] = params.body.timeRange;
if (end === undefined) {
end = Date.now();
}
let results;
try {
// A separate request will be made to warp 10 per requested resource
results = await Promise.all(
resources.map(async ({ resource, id }) => {
const labels = { [labelName]: id };
const res = await iterIfError(warp10Clients, warp10 => {
const options = {
params: {
start: convertTimestamp(start).toString(),
end: convertTimestamp(end).toString(),
labels,
node: warp10.nodeId,
},
macro: 'utapi/getMetrics',
};
return warp10.exec(options);
}, error => ctx.logger.error('error during warp 10 request', {
error,
requestParams: {
start,
end,
labels,
},
}));
if (res.result.length === 0) {
ctx.logger.error('unable to retrieve metrics', { resource, type: params.level });
throw errors.InternalError;
}
const rawMetrics = JSON.parse(res.result[0]);
// Due to various error cases it is possible for metrics in utapi to go negative.
// As this is nonsensical to the user we replace any negative values with zero.
const metrics = {
storageUtilized: rawMetrics.storageUtilized.map(positiveOrZero),
numberOfObjects: rawMetrics.numberOfObjects.map(positiveOrZero),
incomingBytes: positiveOrZero(rawMetrics.incomingBytes),
outgoingBytes: positiveOrZero(rawMetrics.outgoingBytes),
operations: rawMetrics.operations,
};
return {
resource,
metrics,
};
}),
);
} catch (error) {
ctx.logger.error('error fetching metrics from warp10', { error });
throw errors.InternalError;
}
// Convert the results from warp10 into the expected response format
const resp = results
.map(result => {
const operations = Object.entries(result.metrics.operations)
.reduce((prev, [key, value]) => {
prev[operationToResponse[key]] = value;
return prev;
}, {});
const metric = {
...result.metrics,
timeRange: [start, end],
operations: {
...emptyOperationsResponse,
...operations,
},
};
metric[metricResponseKeys[params.level]] = result.resource;
return metric;
});
ctx.results.body = resp;
ctx.results.statusCode = 200;
}
module.exports = listMetric;

188
libV2/server/controller.js Normal file
View File

@ -0,0 +1,188 @@
/* eslint-disable no-param-reassign */
const { apiOperations, apiOperationMiddleware } = require('./spec');
const { middleware: utapiMiddleware } = require('./middleware');
const RequestContext = require('../models/RequestContext');
const errors = require('../errors');
const { LoggerContext } = require('../utils');
const moduleLogger = new LoggerContext({
module: 'server.controller',
});
/**
* ApiController
* @param {string} tag - Controller tag to load, should match `x-router-controller` from openapi spec
* @returns {undefined}
*/
class APIController {
constructor(tag) {
this._handlers = APIController._collectHandlers(tag);
this._middleware = APIController._collectHandlerMiddleware(tag);
}
static _safeRequire(path) {
try {
// eslint-disable-next-line import/no-dynamic-require, global-require
return require(path);
} catch (error) {
if (error.code !== 'MODULE_NOT_FOUND') {
moduleLogger
.with({ method: 'APIController::_safeRequire' })
.error(`error while loading handler from ${path}`);
throw error;
}
return null;
}
}
static _notImplementedHandler(tag, operationId) {
// eslint-disable-next-line no-unused-vars
return async (ctx, params) => {
throw errors.NotImplemented.customizeDescription(
`the operation "${tag}::${operationId}" has not been implemented`,
);
};
}
static _getOperationHandler(tag, operationId) {
const op = APIController._safeRequire(`./API/${tag}/${operationId}`);
if (op === null) {
moduleLogger
.with({ method: 'APIController::_getOperationHandler' })
.error(`no handler for ${tag}:${operationId} found, using notImplemented handler`);
return APIController._notImplementedHandler(tag, operationId);
}
return op;
}
static _collectHandlers(tag) {
return Array.from(apiOperations[tag]).reduce((handlers, id) => {
handlers[id] = APIController._getOperationHandler(tag, id);
return handlers;
}, {});
}
static _collectHandlerMiddleware(tag) {
return Object.entries(apiOperationMiddleware[tag])
.reduce((handlers, [id, handler]) => {
const middleware = [];
if (handler.iplimit) {
middleware.push(utapiMiddleware.clientIpLimitMiddleware);
}
if (handler.authv4) {
middleware.push(utapiMiddleware.authV4Middleware);
}
handlers[id] = middleware;
return handlers;
}, {});
}
static _extractParams(req) {
return Object.entries(req.swagger.params)
.reduce((params, [key, value]) => {
params[key] = value.value;
return params;
}, {});
}
static async _writeResult(results, response) {
// If no results have been set return a 500
if (
!results.hasRedirect()
&& !results.hasBody()
&& !results.hasStatusCode()
) {
throw errors.InternalError;
}
// If we have a redirect, do it
if (results.hasRedirect()) {
response.redirect(results.redirect);
// If we have both a body & status, send both
} else if (results.hasBody() && results.hasStatusCode()) {
response.status(results.statusCode).send(results.body);
// If all we have is a status code, then send it with an empty body
} else if (results.hasStatusCode() && !results.hasBody()) {
response.sendStatus(results.statusCode);
// If no status code is set, but we have a body, assume `200` and send
} else if (results.hasBody() && !results.hasStatusCode()) {
response.status(200).send(results.body);
}
}
static _buildRequestContext(req) {
return new RequestContext(req);
}
/**
* callOperation
*
* Constructs the request context, extracts operation parameters, calls the
* operation handler, and writes its result.
*
* @param {function} handler - Function returning a Promise implementing the operation
* @param {Request} request - Express request object
* @param {Response} response - Express response object
* @param {Object} params - Extracted request parameters
* @returns {undefined} -
*/
static async _callOperation(handler, request, response, params) {
try {
await handler(request.ctx, params);
} catch (error) {
request.logger.error('error during operation', { error });
throw error;
}
request.logger.debug('writing operation result');
try {
await APIController._writeResult(request.ctx.results, response);
} catch (error) {
request.logger.error(
'error while writing operation result',
{ error },
);
throw error;
}
}
static async _callMiddleware(middleware, request, response, params) {
await middleware.reduce(
(chain, mw) => (chain
? chain.then(() => mw(request, response, params))
: mw(request, response, params)),
null,
);
}
static callOperation(operationId, handler, middleware, request, response, done) {
request.ctx = APIController._buildRequestContext(request);
const requestParams = APIController._extractParams(request);
request.logger.debug(`calling middleware for ${operationId}`);
APIController._callMiddleware(middleware, request, response, requestParams)
.then(() => {
request.logger.debug(`calling operation ${operationId}`);
return APIController._callOperation(handler, request, response, requestParams);
})
.then(
done,
done,
);
}
/**
* buildMap
*
* Constructs an object of `operationId`|`callOperation` pairs for use as a controller with oas-tools
* @returns {Object} - Map of operationIds to handler
*/
buildMap() {
return Object.entries(this._handlers)
.reduce((ops, [id, handler]) => {
ops[id] = (request, response, done) =>
APIController.callOperation(id, handler, this._middleware[id], request, response, done);
return ops;
}, {});
}
}
module.exports = APIController;

101
libV2/server/index.js Normal file
View File

@ -0,0 +1,101 @@
const http = require('http');
const https = require('https');
const express = require('express');
const bodyParser = require('body-parser');
const { ciphers, dhparam } = require('arsenal').https;
const Process = require('../process');
const config = require('../config');
const { initializeOasTools, middleware } = require('./middleware');
const { spec: apiSpec } = require('./spec');
const { client: cacheClient } = require('../cache');
const { LoggerContext } = require('../utils');
const moduleLogger = new LoggerContext({
module: 'server',
});
class UtapiServer extends Process {
constructor() {
super();
this._app = null;
this._server = null;
}
static async _createApp(spec) {
const app = express();
app.use(bodyParser.json({ strict: false }));
app.use(middleware.loggerMiddleware);
await initializeOasTools(spec, app);
app.use(middleware.errorMiddleware);
app.use(middleware.httpMetricsMiddleware);
app.use(middleware.responseLoggerMiddleware);
return app;
}
static _createHttpsAgent() {
const conf = {
ciphers: ciphers.ciphers,
dhparam: dhparam.dhparam,
cert: config.tls.cert,
key: config.tls.key,
ca: config.tls.ca ? [config.tls.ca] : null,
requestCert: false,
rejectUnauthorized: true,
};
const agent = new https.Agent(conf);
conf.agent = agent;
return conf;
}
static async _createServer(app) {
if (config.tls) {
return https.createServer(UtapiServer._createHttpsAgent(), app);
}
return http.createServer(app);
}
static async _startServer(server) {
moduleLogger
.with({
method: 'UtapiServer::_startServer',
cacheBackend: config.cacheBackend,
})
.info(`Server listening on ${config.port}`);
await server.listen(config.port);
}
async _setup() {
this._app = await UtapiServer._createApp(apiSpec);
this._server = await UtapiServer._createServer(this._app);
}
async _start() {
await cacheClient.connect();
await UtapiServer._startServer(this._server);
}
async _join() {
await this._server.close();
await cacheClient.disconnect();
}
}
async function startServer(conf) {
const server = new UtapiServer(conf);
try {
await server.setup();
await server.start();
} catch (error) {
moduleLogger
.with({ method: 'UtapiServer::startServer' })
.error('Unhandled Error!', { error: error.message });
await server.join();
throw error;
}
}
module.exports = {
UtapiServer,
startServer,
};

20
libV2/server/metrics.js Normal file
View File

@ -0,0 +1,20 @@
const promClient = require('prom-client');
const httpRequestsTotal = new promClient.Counter({
name: 's3_utapi_http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['action', 'code'],
});
const httpRequestDurationSeconds = new promClient.Histogram({
name: 's3_utapi_http_request_duration_seconds',
help: 'Duration of HTTP requests in seconds',
labelNames: ['action', 'code'],
// buckets for response time from 0.1ms to 60s
buckets: [0.0001, 0.005, 0.015, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 5.0, 15.0, 30.0, 60.0],
});
module.exports = {
httpRequestDurationSeconds,
httpRequestsTotal,
};

Some files were not shown because too many files have changed in this diff Show More