Compare commits

...

139 Commits

Author SHA1 Message Date
Vitaliy Filippov facc276e8b move away require libv2/config from libv2/redis 2024-08-13 02:17:04 +03:00
Vitaliy Filippov c8e3999fb3 Require defaults.json instead of fs.readFileSync 2024-08-13 01:14:02 +03:00
Vitaliy Filippov 9fa777cdba Split require utils to help webpack remove libV2 2024-08-13 01:10:22 +03:00
Vitaliy Filippov e6d48f3b47 Make vault client optional / support receiving its instance from outside 2024-07-23 19:22:54 +03:00
Vitaliy Filippov 0050625f81 Change git dependency URLs 2024-07-21 18:12:40 +03:00
Vitaliy Filippov 0a66c57a0a Remove yarn lock 2024-07-21 17:34:07 +03:00
Vitaliy Filippov 6711c4241a Forget LFS object 2024-07-21 17:34:07 +03:00
Jonathan Gramain 3800e4b185 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes' into w/8.1/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:09:15 -07:00
Jonathan Gramain 20667ff741 Merge remote-tracking branch 'origin/bugfix/UTAPI-105-useListOfSentinelNodes' into w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:06:43 -07:00
Jonathan Gramain 88d18f3eb6 UTAPI-105 bump version 2024-06-25 15:10:02 -07:00
Jonathan Gramain 426dfd0860 bf: UTAPI-105 UtapiReindex: use list of redis sentinels
Use a list of Redis sentinels that are running on stateful nodes only,
instead of localhost.

Previously, a stateless-only node wouldn't have a local sentinel node
running, causing UtapiReindex to fail.

Added a failover mechanism in case of connection error on the current
sentinel, to try each other one in turn.
2024-06-25 15:10:02 -07:00
bert-e ac4fd2c5f5 Merge branch 'improvement/UTAPI-103/support_reindex_by_account' into tmp/octopus/w/8.1/improvement/UTAPI-103/support_reindex_by_account 2024-06-12 18:28:11 +00:00
Taylor McKinnon 69b94c57aa impr(UTAPI-103): Remove undeclared variable from log message 2024-06-12 11:27:16 -07:00
Taylor McKinnon f5262b7875 impr(UTAPI-103): Support reindexing by acccount 2024-06-12 11:27:16 -07:00
Taylor McKinnon ee1c0fcd1b impr(UTAPI-103): Support multiple specified buckets and prep for account support 2024-06-12 11:27:16 -07:00
Taylor McKinnon 5efb70dc63 impr(UTAPI-103): Add --dry-run option 2024-06-12 11:27:16 -07:00
Taylor McKinnon 210ba2fd82 impr(UTAPI-103): Add BucketDClient.get_bucket_md() 2024-06-06 12:10:40 -07:00
Taylor McKinnon 34af848b93 impr(UTAPI-103): Add BucketNotFound Exeception for _get_bucket_attributes 2024-06-06 12:08:40 -07:00
Taylor McKinnon 402fd406e3 impr(UTAPI-103): Add small LRU cache to BucketDClient._get_bucket_attributes 2024-06-06 12:06:46 -07:00
bert-e f9ae694c0c Merge branch 'w/7.70/bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/8.1/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
bert-e 960d990e89 Merge branch 'bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/7.70/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
Taylor McKinnon 7fde3488b9 impr(UTAPI-101): Remove secrets: inherit from release workflow 2024-05-15 10:32:38 -07:00
Taylor McKinnon 79c2ff0c72 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-100/utapi_python_version_fix' into w/8.1/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:56:37 -07:00
Taylor McKinnon ae904b89bf Merge remote-tracking branch 'origin/bugfix/UTAPI-100/utapi_python_version_fix' into w/7.70/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:55:23 -07:00
Taylor McKinnon 60db367054 bf(UTAPI-100): Bump version 2024-05-06 11:20:17 -07:00
Taylor McKinnon c9ba521b6d bf(UTAPI-100): Remove use of 3.7+ only parameter 2024-05-06 11:16:58 -07:00
Francois Ferrand ce89418788
Update Release.md for ghcr migration
Issue: UTAPI-99
2024-04-18 15:55:13 +02:00
Francois Ferrand 5faaf493a5
Merge branch 'w/7.70/improvement/VAULT-567' into w/8.1/improvement/VAULT-567 2024-04-18 15:54:58 +02:00
Francois Ferrand da143dba67
Merge branch 'w/7.10/improvement/VAULT-567' into w/7.70/improvement/VAULT-567 2024-04-18 15:54:35 +02:00
Francois Ferrand 6e0ec16f00
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:54:04 +02:00
Francois Ferrand 4449f44c9a
Bump github actions
- docker-build@v2
- checkout@v4
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2
- ssh-to-runner@1.7.0

Issue: UTAPI-99
2024-04-18 15:53:26 +02:00
Francois Ferrand c4e786d6cd
Migrate to ghcr
Issue: UTAPI-99
2024-04-18 15:53:20 +02:00
Francois Ferrand bdb483e6b4
Merge branch 'improvement/UTAPI-99' into w/7.10/improvement/VAULT-567 2024-04-18 15:52:47 +02:00
Francois Ferrand 20916c6f0e
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:47:05 +02:00
Francois Ferrand 5976018d0e
Bump github actions
- checkout@v4
- setup-qemu@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2

Issue: UTAPI-99
2024-04-17 15:02:44 +02:00
Francois Ferrand 9e1f14ed17
Migrate to ghcr
Issue: UTAPI-99
2024-04-17 14:42:58 +02:00
bert-e 34699432ee Merge branch 'w/7.70/improvement/UTAPI-98/bump-redis' into tmp/octopus/w/8.1/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:38 +00:00
bert-e 438a25982d Merge branch 'improvement/UTAPI-98/bump-redis' into tmp/octopus/w/7.70/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:37 +00:00
Nicolas Humbert 8804e9ff69 UTAPI-98 Bump Redis version 2024-01-22 16:36:01 +01:00
Taylor McKinnon 27e1c44829 Merge remote-tracking branch 'origin/w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/8.1/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:38:41 -08:00
Taylor McKinnon e8882a28cc Merge remote-tracking branch 'origin/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:37:25 -08:00
Taylor McKinnon b93998118c impr(UTAPI-97): Bump version 2023-12-11 09:25:01 -08:00
Taylor McKinnon 9195835f70 impr(UTAPI-97): Add config option to reindex only latest version in object locked buckets 2023-12-11 09:25:01 -08:00
bert-e 8dfb06cdbc Merge branch 'w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/8.1/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
bert-e 934136635e Merge branch 'improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
Taylor McKinnon 9f36624799 impr(UTAPI-96): Switch to scality/actions/action-ssh-to-runner 2023-10-09 09:30:34 -07:00
Taylor McKinnon 59aa9b9ab9 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-92/bump_utapi_version' into w/8.1/bugfix/UTAPI-92/bump_utapi_version 2023-05-31 13:45:38 -07:00
Taylor McKinnon 9eecef0a24 Merge remote-tracking branch 'origin/bugfix/UTAPI-92/bump_utapi_version' into w/7.70/bugfix/UTAPI-92/bump_utapi_version 2023-05-31 13:44:34 -07:00
Taylor McKinnon c29af16e46 UTAPI-92: Bump version 2023-05-31 13:43:04 -07:00
bert-e 8757ac8bb0 Merge branch 'w/7.70/bugfix/UTAPI-92/fix_redis_password_config' into tmp/octopus/w/8.1/bugfix/UTAPI-92/fix_redis_password_config 2023-05-26 17:39:02 +00:00
bert-e 34ceac8563 Merge branch 'bugfix/UTAPI-92/fix_redis_password_config' into tmp/octopus/w/7.70/bugfix/UTAPI-92/fix_redis_password_config 2023-05-26 17:39:02 +00:00
Taylor McKinnon 7f9c9aa202 bf(UTAPI-92): Fix redis password loading 2023-05-25 15:03:36 -07:00
Taylor McKinnon 41b690aa5d Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-88/bump_version_7_10_13' into w/8.1/bugfix/UTAPI-88/bump_version_7_10_13 2023-04-11 16:10:46 -07:00
Taylor McKinnon 3f08327fe6 Merge remote-tracking branch 'origin/bugfix/UTAPI-88/bump_version_7_10_13' into w/7.70/bugfix/UTAPI-88/bump_version_7_10_13 2023-04-11 16:09:35 -07:00
Taylor McKinnon 84bc7e180f bf(UTAPI-88): Release 7.10.13 2023-04-11 16:07:23 -07:00
bert-e e328095606 Merge branches 'w/8.1/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' and 'q/1279/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/q/8.1 2023-04-10 23:34:43 +00:00
bert-e cb9d2b8d2b Merge branches 'w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' and 'q/1279/7.10/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/q/7.70 2023-04-10 23:34:42 +00:00
bert-e de73fe9ee0 Merge branch 'bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into q/7.10 2023-04-10 23:34:42 +00:00
bert-e 0d33f81e35 Merge branch 'w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/w/8.1/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric 2023-04-10 23:28:07 +00:00
bert-e 13fb668d94 Merge branch 'bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric' into tmp/octopus/w/7.70/bugfix/UTAPI-88-do-not-error-500-in-case-of-negative-metric 2023-04-10 23:28:07 +00:00
scality-gelbart 0fc08f3d7d bf(UTAPI-88): Replace transient state API error with info log message and 200 response 2023-04-10 16:27:21 -07:00
Naren 334c4c26a1 Merge remote-tracking branch 'origin/improvement/UTAPI-91-release-7-70-0' into w/8.1/improvement/UTAPI-91-release-7-70-0 2023-03-28 18:36:52 -07:00
Naren 5319a24704 impr: UTAPI-91 bump version to 7.70.0 2023-03-28 18:05:13 -07:00
Naren ed3628ef01 impr: UTAPI-90 bump version to 8.1.10 2023-03-15 11:20:42 -07:00
Naren 34e881f0e9 impr: UTAPI-90 upgrade bucketclient and vaultclient 2023-03-15 11:19:21 -07:00
Naren 13befbd535 Merge remote-tracking branch 'origin/improvement/UTAPI-90-upgrade-prom-client' into w/8.1/improvement/UTAPI-90-upgrade-prom-client 2023-03-15 11:12:08 -07:00
Naren 347cf3c1cb impr: UTAPI-90 bump version to 7.10.12 2023-03-15 11:03:06 -07:00
Naren 9b5fe56f48 impr: UTAPI-90 upgrade bucketclient and vaultclient 2023-03-15 11:02:22 -07:00
Naren 988f478957 impr: UTAPI-90 upgrade arsenal for prom-client upgrade 2023-03-14 18:54:16 -07:00
bert-e 5f24e749ea Merge branch 'improvement/UTAPI-89-update-metric-names' into tmp/octopus/w/8.1/improvement/UTAPI-89-update-metric-names 2023-02-28 16:56:12 +00:00
Naren 480bde079b impr UTAPI-89 update metric names 2023-02-28 08:54:25 -08:00
Taylor McKinnon 0ba5a02ba7
Bump version to 8.1.9 2022-10-26 11:47:44 -07:00
Taylor McKinnon e75ce33f35 Merge remote-tracking branch 'origin/bugfix/UTAPI-87/handle_zero_byte_objs_in_ver_susp_buck' into w/8.1/bugfix/UTAPI-87/handle_zero_byte_objs_in_ver_susp_buck 2022-10-25 13:36:06 -07:00
Taylor McKinnon 3ec818bca1 bf(UTAPI-87): Bump version to 7.10.11 2022-10-25 13:34:21 -07:00
Taylor McKinnon c3111dfadf bf(UTAPI-87): Handle deleting zero byte objects in version suspended buckets 2022-10-25 13:34:21 -07:00
Taylor McKinnon 451f88d27e Merge remote-tracking branch 'origin/bugfix/UTAPI-85/bump_version' into w/8.1/bugfix/UTAPI-85/bump_version 2022-10-17 14:47:06 -07:00
Taylor McKinnon 71f162169d bf(UTAPI-85): Bump version to 7.10.10 2022-10-17 14:45:26 -07:00
bert-e c0abf3e53f Merge branches 'w/8.1/bugfix/UTAPI-85/allow_host_port_override' and 'q/1271/7.10/bugfix/UTAPI-85/allow_host_port_override' into tmp/octopus/q/8.1 2022-10-17 21:24:40 +00:00
bert-e 3e740a2f6a Merge branch 'bugfix/UTAPI-85/allow_host_port_override' into q/7.10 2022-10-17 21:24:40 +00:00
bert-e 93134e6ccb Merge branches 'w/8.1/bugfix/UTAPI-82-v1-delete-inconsistency' and 'q/1267/7.10/bugfix/UTAPI-82-v1-delete-inconsistency' into tmp/octopus/q/8.1 2022-10-15 00:09:09 +00:00
bert-e b4b52c0de7 Merge branch 'bugfix/UTAPI-82-v1-delete-inconsistency' into q/7.10 2022-10-15 00:09:09 +00:00
Artem Bakalov 4faac178ef Merge remote-tracking branch 'origin/bugfix/UTAPI-82-v1-delete-inconsistency' into w/8.1/bugfix/UTAPI-82-v1-delete-inconsistency 2022-10-14 17:01:02 -07:00
Artem Bakalov 193d1a5d92 UTAPI-82 fix delete inconsistency 2022-10-14 16:55:16 -07:00
bert-e f90213d3d5 Merge branch 'bugfix/UTAPI-85/allow_host_port_override' into tmp/octopus/w/8.1/bugfix/UTAPI-85/allow_host_port_override 2022-10-13 18:24:33 +00:00
Taylor McKinnon 7eb35d51f4 bf(UTAPI-85): Allow host and port to be overridden 2022-10-13 11:02:31 -07:00
bert-e 2e04a5cc44 Merge branch 'improvement/UTAPI-83/provide_warp10_image' into tmp/octopus/w/8.1/improvement/UTAPI-83/provide_warp10_image 2022-10-05 20:17:37 +00:00
Taylor McKinnon 52520e4de1 impr(UTAPI-83): Add warp 10 release workflow 2022-10-05 13:17:05 -07:00
Taylor McKinnon 3391130d43 Merge remote-tracking branch 'origin/bugfix/UTAPI-84/fix_nodesvc_base_config' into w/8.1/bugfix/UTAPI-84/fix_nodesvc_base_config 2022-10-03 15:34:56 -07:00
Taylor McKinnon f3a9a57f58 bf(UTAPI-840): Fix nodesvc-base image config 2022-10-03 15:33:31 -07:00
Taylor McKinnon c0aa52beab Merge remote-tracking branch 'origin/feature/UTAPI-71/add_nodesvc_based_image_and_release_workflow' into w/8.1/feature/UTAPI-71/add_nodesvc_based_image_and_release_workflow 2022-09-23 10:46:41 -07:00
Taylor McKinnon 0ae108f15e ft(UTAPI-71): Rework release workflow to support S3C releases 2022-09-23 10:45:16 -07:00
Taylor McKinnon 2f99e1ddd5 ft(UTAPI-71): Split v2 tests into with/without sensision enabled 2022-09-23 10:45:16 -07:00
Taylor McKinnon cbeae49d47 ft(UTAPI-71): Fix sensision inside warp 10 image 2022-09-23 10:45:16 -07:00
Taylor McKinnon 64d3ecb10f ft(UTAPI-71): Call build-ci from tests 2022-09-23 10:45:16 -07:00
Taylor McKinnon df57f68b9a ft(UTAPI-71): Add build workflows 2022-09-23 10:45:16 -07:00
Taylor McKinnon db5a43f412 ft(UTAPI-71): Backport Dockerfile from development/8.1 branch 2022-09-22 10:52:20 -07:00
Taylor McKinnon 116a2108b0 ft(UTAPI-71): Add nodesvc-base based image 2022-09-22 10:52:20 -07:00
Taylor McKinnon 750cabc565 Merge remote-tracking branch 'origin/bugfix/UTAPI-81/add_bucket_tagging_methods' into w/8.1/bugfix/UTAPI-81/add_bucket_tagging_methods 2022-08-04 12:53:02 -07:00
Taylor McKinnon 469b862a69 bf(UTAPI-81): Add bucket tagging operations 2022-08-04 12:49:23 -07:00
Taylor McKinnon 62bf4d86e6 Merge remote-tracking branch 'origin/improvement/UTAPI-80/release_7_10_7' into w/8.1/improvement/UTAPI-80/release_7_10_7 2022-07-22 11:19:18 -07:00
Taylor McKinnon a072535050 impr(UTAPI-80): Release 7.10.7 2022-07-22 11:17:56 -07:00
bert-e 29b52a0346 Merge branch 'bugfix/UTAPI-78/fix_user_auth_with_no_resources' into tmp/octopus/w/8.1/bugfix/UTAPI-78/fix_user_auth_with_no_resources 2022-07-21 16:38:05 +00:00
Taylor McKinnon 1168720f98 bf(UTAPI-78): Fix second stage user auth with no resources 2022-07-20 09:37:34 -07:00
Jonathan Gramain ff5a75bb11 Merge remote-tracking branch 'origin/bugfix/UTAPI-77-bumpOasTools' into w/8.1/bugfix/UTAPI-77-bumpOasTools 2022-06-20 15:10:40 -07:00
Jonathan Gramain 84a025d430 bugfix: UTAPI-77 bump oas-tools to 2.2.2
Bump the dependency version of oas-tools to version 2.2.2, to fix a
vulnerability with mpath@0.5.0
2022-06-20 13:32:26 -07:00
bert-e 65726f6d0b Merge branches 'w/8.1/feature/UTAPI-76/breakout_leveldb_and_datalog' and 'q/1251/7.10/feature/UTAPI-76/breakout_leveldb_and_datalog' into tmp/octopus/q/8.1 2022-06-08 21:59:45 +00:00
bert-e 4fbcd109a7 Merge branch 'feature/UTAPI-76/breakout_leveldb_and_datalog' into q/7.10 2022-06-08 21:59:45 +00:00
bert-e eed137768d Merge branches 'w/8.1/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' and 'q/1249/7.10/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into tmp/octopus/q/8.1 2022-06-07 22:32:44 +00:00
bert-e a71e4d48d0 Merge branch 'feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into q/7.10 2022-06-07 22:32:44 +00:00
bert-e 0257e97bc2 Merge branch 'feature/UTAPI-76/breakout_leveldb_and_datalog' into tmp/octopus/w/8.1/feature/UTAPI-76/breakout_leveldb_and_datalog 2022-06-07 22:32:07 +00:00
Taylor McKinnon 7e596598fb ft(UTAPI-76): Breakout disk usage for leveldb and datalog 2022-06-07 15:31:27 -07:00
bert-e 55b640faba Merge branch 'feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps' into tmp/octopus/w/8.1/feature/UTAPI-75/Add_metrics_for_latest_check_snapshot_timestamps 2022-06-03 16:43:12 +00:00
Taylor McKinnon fd5bea5301 ft(UTAPI-75): Add metrics for latest checkpoint and snapshot 2022-06-03 09:40:27 -07:00
bert-e 54516db267 Merge branch 'feature/UTAPI-70/add_metrics_to_http_server' into tmp/octopus/w/8.1/feature/UTAPI-70/add_metrics_to_http_server 2022-05-26 16:50:41 +00:00
Taylor McKinnon 39eee54045 ft(UTAPI-70): Add http server metrics 2022-05-26 09:50:12 -07:00
bert-e c2f121d0d3 Merge branch 'feature/UTAPI-69/Add_async_task_metrics' into q/7.10 2022-05-26 16:32:49 +00:00
bert-e 1c6c159423 Merge branches 'w/8.1/feature/UTAPI-69/Add_async_task_metrics' and 'q/1239/7.10/feature/UTAPI-69/Add_async_task_metrics' into tmp/octopus/q/8.1 2022-05-26 16:32:49 +00:00
bert-e 22805fe7e7 Merge branch 'feature/UTAPI-69/Add_async_task_metrics' into tmp/octopus/w/8.1/feature/UTAPI-69/Add_async_task_metrics 2022-05-26 16:23:04 +00:00
Taylor McKinnon fbc7f3f442 ft(UTAPI-69): Add metrics for async tasks 2022-05-26 09:22:42 -07:00
bert-e 9e1761b0a4 Merge branch 'feature/UTAPI-67/Add_base_prometheus_framework' into q/7.10 2022-05-24 17:46:30 +00:00
bert-e ca82189fd7 Merge branches 'w/8.1/feature/UTAPI-67/Add_base_prometheus_framework' and 'q/1235/7.10/feature/UTAPI-67/Add_base_prometheus_framework' into tmp/octopus/q/8.1 2022-05-24 17:46:30 +00:00
bert-e 2f26d380f6 Merge branch 'feature/UTAPI-67/Add_base_prometheus_framework' into tmp/octopus/w/8.1/feature/UTAPI-67/Add_base_prometheus_framework 2022-05-24 17:12:06 +00:00
Taylor McKinnon 50a3ba2f18 ft(UTAPI-67): Add metrics framework to BaseTask 2022-05-24 10:07:48 -07:00
Taylor McKinnon 9f1552488c impr(UTAPI-66): Update Dockerfile with --network-concurrency 2022-05-18 10:06:08 -07:00
bert-e bf366e9472 Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:34:32 +00:00
Taylor McKinnon 5352f8467d remove unused require 2022-05-18 09:34:26 -07:00
bert-e 002a7ad1ca Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:33:37 +00:00
Taylor McKinnon a8f54966bc f 2022-05-18 09:33:32 -07:00
bert-e c2bff35bc6 Merge branch 'improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into tmp/octopus/w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-18 16:30:07 +00:00
Taylor McKinnon b92102eb65
Apply suggestions from code review
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2022-05-18 09:30:02 -07:00
Taylor McKinnon 280c4bae3a Merge remote-tracking branch 'origin/improvement/UTAPI-66/migrate_to_arsenal_7_10_18' into w/8.1/improvement/UTAPI-66/migrate_to_arsenal_7_10_18 2022-05-17 13:39:36 -07:00
Taylor McKinnon d9901609ae impr(UTAPI-66): Convert v2 code 2022-05-17 11:03:23 -07:00
Taylor McKinnon 4448f79088 impr(UTAPI-66): Convert v1 code 2022-05-17 11:01:30 -07:00
Taylor McKinnon 40fa94f0d7 impr(UTAPI-66): Update arsenal to 7.10.24 2022-05-17 10:56:15 -07:00
bert-e 3c09767315 Merge branch 'bugfix/UTAPI-72/add_missing_await_to_pushMetric' into tmp/octopus/w/8.1/bugfix/UTAPI-72/add_missing_await_to_pushMetric 2022-05-06 20:13:26 +00:00
Taylor McKinnon 43ca83cab7 bf(UTAPI-72): Fix CacheClient.pushMetric() to `await` this._cacheBackend.addToShard() 2022-05-06 12:46:35 -07:00
Erwan Bernard 2c1d25a50e Merge remote-tracking branch 'origin/w/7.10/feature/RELENG-5645/patch-usage-of-action-gh-release' into w/8.1/feature/RELENG-5645/patch-usage-of-action-gh-release 2022-04-01 17:01:05 +02:00
bert-e 7dd49ca418 Merge branch 'feature/RELENG-5645/patch-usage-of-action-gh-release' into tmp/octopus/w/7.10/feature/RELENG-5645/patch-usage-of-action-gh-release 2022-04-01 14:58:01 +00:00
Erwan Bernard 87cba51d75 [RELENG-5645] Patch usage of actions gh release 2022-04-01 15:56:31 +02:00
67 changed files with 1768 additions and 5791 deletions

View File

@ -1,8 +1,7 @@
FROM registry.scality.com/vault-dev/vault:c2607856
FROM ghcr.io/scality/vault:c2607856
ENV VAULT_DB_BACKEND LEVELDB
RUN chmod 400 tests/utils/keyfile
ENTRYPOINT yarn start

65
.github/workflows/build-ci.yaml vendored Normal file
View File

@ -0,0 +1,65 @@
name: build-ci-images
on:
workflow_call:
jobs:
warp10-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: warp10-ci
context: .
file: images/warp10/Dockerfile
lfs: true
redis-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-ci
context: .
file: images/redis/Dockerfile
redis-replica-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
needs:
- redis-ci
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-replica-ci
context: .github/docker/redis-replica
build-args: |
REDIS_IMAGE=ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
vault-ci:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push vault Image
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/vault
tags: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
cache-from: type=gha,scope=vault
cache-to: type=gha,mode=max,scope=vault

16
.github/workflows/build-dev.yaml vendored Normal file
View File

@ -0,0 +1,16 @@
name: build-dev-image
on:
push:
branches-ignore:
- 'development/**'
jobs:
build-dev:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}

39
.github/workflows/release-warp10.yaml vendored Normal file
View File

@ -0,0 +1,39 @@
name: release-warp10
on:
workflow_dispatch:
inputs:
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: true
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit
with:
name: warp10
context: .
file: images/warp10/Dockerfile
tag: ${{ github.event.inputs.tag }}
lfs: true
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: Release utapi/warp10:${{ github.event.inputs.tag }}-warp10
tag_name: ${{ github.event.inputs.tag }}-warp10
generate_release_notes: false
target_commitish: ${{ github.sha }}

View File

@ -3,42 +3,43 @@ name: release
on:
workflow_dispatch:
inputs:
dockerfile:
description: Dockerfile to build image from
type: choice
options:
- images/nodesvc-base/Dockerfile
- Dockerfile
required: true
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: false
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}
context: .
file: ${{ github.event.inputs.dockerfile}}
tag: ${{ github.event.inputs.tag }}
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v1
- name: Login to Registry
uses: docker/login-action@v1
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push utapi image
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: "registry.scality.com/utapi/utapi:${{ github.event.inputs.tag }}"
- name: Create Release
uses: softprops/action-gh-release@v1
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}
release_name: Release ${{ github.event.inputs.tag }}
generate_release_notes: true
target_commitish: ${{ github.sha }}

View File

@ -4,104 +4,45 @@ name: tests
on:
push:
branches-ignore:
- 'development/**'
- 'development/**'
workflow_dispatch:
inputs:
debug:
description: Debug (enable the ability to SSH to runners)
type: boolean
required: false
default: 'false'
connection-timeout-m:
type: number
required: false
description: Timeout for ssh connection to worker (minutes)
default: 30
jobs:
build:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
with:
lfs: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Login to GitHub Registry
uses: docker/login-action@v1.10.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Scality Registry
uses: docker/login-action@v1.10.0
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push redis CI image
uses: docker/build-push-action@v2.7.0
with:
push: true
file: images/redis/Dockerfile
context: '.'
tags: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
cache-from: type=gha,scope=redis
cache-to: type=gha,mode=max,scope=redis
- name: Build and push redis replica CI image
uses: docker/build-push-action@v2.7.0
with:
push: true
context: .github/docker/redis-replica
build-args: |
REDIS_IMAGE=ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
tags: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
cache-from: type=gha,scope=redis-replica
cache-to: type=gha,mode=max,scope=redis-replica
- name: Build and push warp10 Image
uses: docker/build-push-action@v2.7.0
with:
push: true
file: images/warp10/Dockerfile
context: '.'
tags: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
cache-from: type=gha,scope=warp10
cache-to: type=gha,mode=max,scope=warp10
- name: Build and push vault Image
uses: docker/build-push-action@v2.7.0
with:
push: true
context: '.github/docker/vault'
tags: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
cache-from: type=gha,scope=vault
cache-to: type=gha,mode=max,scope=vault
- name: Build and push utapi Image
uses: docker/build-push-action@v2.7.0
with:
push: true
context: '.'
tags: registry.scality.com/utapi-dev/utapi:${{ github.sha }}
cache-from: type=gha,scope=utapi
cache-to: type=gha,mode=max,scope=utapi
build-ci:
uses: ./.github/workflows/build-ci.yaml
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile
run: yarn install --frozen-lockfile --network-concurrency 1
- name: run static analysis tools on markdown
run: yarn run lint_md
- name: run static analysis tools on code
run: yarn run lint
tests:
needs: build
tests-v1:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
@ -112,17 +53,18 @@ jobs:
test:
- name: run unit tests
command: yarn test
env: {}
- name: run client tests
env:
UTAPI_METRICS_ENABLED: 'true'
- name: run v1 client tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:client
env: {}
- name: run server tests
- name: run v1 server tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:server
env: {}
- name: run cron tests
- name: run v1 cron tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:cron
env: {}
- name: run interval tests
- name: run v1 interval tests
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:interval
env: {}
services:
@ -146,7 +88,7 @@ jobs:
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:6.2
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -177,59 +119,31 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v2
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
cache: pip
- name: Install python deps
run: |
pip install requests
pip install redis
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile
run: yarn install --frozen-lockfile --network-concurrency 1
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
tests-with-vault:
needs: build
tests-v2-with-vault:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run v2 functional tests
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:v2
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: run v2 soft limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:softLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: run v2 hard limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:hardLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
@ -251,7 +165,7 @@ jobs:
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:6.2
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -294,31 +208,154 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v2
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
cache: pip
- name: Install python deps
run: |
pip install requests
pip install redis
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 for 60 seconds
run: sleep 60
- name: run v2 functional tests
run: bash ./.github/scripts/run_ft_tests.bash true ft_test:v2
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}
tests-v2-without-sensision:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run v2 soft limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:softLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: run v2 hard limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:hardLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ports:
- 4802:4802
- 8082:8082
- 9718:9718
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
vault:
image: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
ports:
- 8500:8500
- 8600:8600
- 8700:8700
- 8800:8800
options: >-
--health-cmd "curl http://localhost:8500/_/healthcheck"
--health-interval 10s
--health-timeout 5s
--health-retries 10
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 a little bit
run: sleep 60
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: failure()
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}

View File

@ -15,7 +15,7 @@ RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
RUN apt-get update \
&& apt-get install -y jq git python3 build-essential yarn --no-install-recommends \
&& yarn cache clean \
&& yarn install --frozen-lockfile --production --ignore-optional \
&& yarn install --frozen-lockfile --production --ignore-optional --network-concurrency=1 \
&& apt-get autoremove --purge -y python3 git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \

View File

@ -27,7 +27,7 @@ x-models:
services:
redis-0:
image: redis:5
image: redis:7.2.4
command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6379:6379
@ -35,7 +35,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}"
redis-1:
image: redis:5
image: redis:7.2.4
command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6380:6380
@ -43,7 +43,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}"
redis-sentinel-0:
image: redis:5
image: redis:7.2.4
command: |-
bash -c 'cat > /tmp/sentinel.conf <<EOF
port 16379

View File

@ -2,11 +2,10 @@
## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com).
Utapi has two namespaces there:
Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages).
Utapi has one namespace there:
* Production Namespace: registry.scality.com/utapi
* Dev Namespace: registry.scality.com/utapi-dev
* Namespace: ghcr.io/scality/utapi
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
@ -18,8 +17,8 @@ Tagged versions of utapi will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull registry.scality.com/utapi-dev/utapi:<commit hash>
docker pull registry.scality.com/utapi/utapi:<tag>
docker pull ghcr.io/scality/utapi:<commit hash>
docker pull ghcr.io/scality/utapi:<tag>
```
## Release Process

View File

@ -0,0 +1,20 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.5.0
ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json
WORKDIR ${HOME_DIR}/utapi
COPY ./package.json ./yarn.lock ${HOME_DIR}/utapi
# Remove when gitcache is sorted out
RUN rm /root/.gitconfig
RUN yarn install --production --frozen-lockfile --network-concurrency 1
COPY . ${HOME_DIR}/utapi
RUN chown -R ${USER} ${HOME_DIR}/utapi
USER ${USER}
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/${SUPERVISORD_CONF}"

View File

@ -1 +0,0 @@
*.jar filter=lfs diff=lfs merge=lfs -text

View File

@ -0,0 +1,2 @@
standalone.host = 0.0.0.0
standalone.port = 4802

View File

@ -13,7 +13,7 @@ RUN apk add zip unzip build-base \
&& cd .. \
&& go build -a -o /usr/local/go/warp10_sensision_exporter
FROM registry.scality.com/utapi/warp10:2.8.1-95-g73e7de80
FROM ghcr.io/scality/utapi/warp10:2.8.1-95-g73e7de80
# Override baked in version
# Remove when updating to a numbered release
@ -27,8 +27,6 @@ ENV SENSISION_DATA_DIR /data/sensision
ENV SENSISION_PORT 8082
# Modify Warp 10 default config
ENV standalone.host 0.0.0.0
ENV standalone.port 4802
ENV standalone.home /opt/warp10
ENV warpscript.repository.directory /usr/local/share/warpscript
ENV warp.token.file /static.tokens
@ -53,6 +51,6 @@ COPY --from=builder /usr/local/go/warp10_sensision_exporter /usr/local/bin/warp1
ADD ./images/warp10/s6 /etc
ADD ./warpscript /usr/local/share/warpscript
ADD ./images/warp10/static.tokens /
ADD ./images/warp10/90-default-host-port.conf $WARP10_CONF_TEMPLATES/90-default-host-port.conf
CMD /init

View File

@ -3,9 +3,8 @@
JAVA="/usr/bin/java"
JAVA_OPTS=""
VERSION=1.0.23
SENSISION_CONFIG=${SENSISION_DATA_DIR}/conf/sensision.conf
SENSISION_JAR=${SENSISION_HOME}/bin/sensision-${VERSION}.jar
SENSISION_JAR=${SENSISION_HOME}/bin/sensision-${SENSISION_VERSION}.jar
SENSISION_CP=${SENSISION_HOME}/etc:${SENSISION_JAR}
SENSISION_CLASS=io.warp10.sensision.Main
export MALLOC_ARENA_MAX=1

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:389d2135867c399a389901460c5f2cc09f4857d0c6d08632c2638c25fb150c46
size 15468553

View File

@ -1,35 +1,13 @@
/* eslint-disable no-bitwise */
const assert = require('assert');
const fs = require('fs');
const path = require('path');
/**
* Reads from a config file and returns the content as a config object
*/
class Config {
constructor() {
/*
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
this._basePath = path.resolve(__dirname, '..');
this.path = `${this._basePath}/config.json`;
if (process.env.UTAPI_CONFIG_FILE !== undefined) {
this.path = process.env.UTAPI_CONFIG_FILE;
}
// Read config automatically
this._getConfig();
}
_getConfig() {
let config;
try {
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
config = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
constructor(config) {
this.component = config.component;
this.port = 9500;
if (config.port !== undefined) {
@ -115,18 +93,26 @@ class Config {
}
}
this.vaultd = {};
if (config.vaultd) {
if (config.vaultd.port !== undefined) {
assert(Number.isInteger(config.vaultd.port)
&& config.vaultd.port > 0,
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
if (config.vaultclient) {
// Instance passed from outside
this.vaultclient = config.vaultclient;
this.vaultd = null;
} else {
// Connection data
this.vaultclient = null;
this.vaultd = {};
if (config.vaultd) {
if (config.vaultd.port !== undefined) {
assert(Number.isInteger(config.vaultd.port)
&& config.vaultd.port > 0,
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
}
}
}
@ -141,12 +127,11 @@ class Config {
const { key, cert, ca } = config.certFilePaths
? config.certFilePaths : {};
if (key && cert) {
const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`;
const certpath = (cert[0] === '/')
? cert : `${this._basePath}/${cert}`;
const keypath = key;
const certpath = cert;
let capath;
if (ca) {
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
capath = ca;
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`);
}
@ -172,8 +157,13 @@ class Config {
+ 'expireMetrics must be a boolean');
this.expireMetrics = config.expireMetrics;
}
return config;
if (config.onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof config.onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
}
}
}
module.exports = new Config();
module.exports = Config;

View File

@ -6,8 +6,6 @@ const async = require('async');
const { errors } = require('arsenal');
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
const s3metricResponseJSON = require('../models/s3metricResponse');
const config = require('./Config');
const Vault = require('./Vault');
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
@ -23,7 +21,6 @@ class ListMetrics {
constructor(metric, component) {
this.metric = metric;
this.service = component;
this.vault = new Vault(config);
}
/**
@ -83,9 +80,10 @@ class ListMetrics {
const resources = validator.get(this.metric);
const timeRange = validator.get('timeRange');
const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return this.vault.getCanonicalIds(resources, log, (err, list) => {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
@ -124,10 +122,11 @@ class ListMetrics {
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
const timeRange = [start - fifteenMinutes, end];
const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return this.vault.getCanonicalIds(resources, log, (err, list) => {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
@ -313,11 +312,10 @@ class ListMetrics {
});
if (!areMetricsPositive) {
return cb(errors.InternalError.customizeDescription(
'Utapi is in a transient state for this time period as '
+ 'metrics are being collected. Please try again in a few '
+ 'minutes.',
));
log.info('negative metric value found', {
error: resource,
method: 'ListMetrics.getMetrics',
});
}
/**
* Batch result is of the format

View File

@ -548,7 +548,9 @@ class UtapiClient {
if (this._isCounterEnabled(counterAction)) {
cmds.push(['incr', generateKey(p, counterAction, timestamp)]);
}
cmds.push(['zrangebyscore', generateStateKey(p, 'storageUtilized'), timestamp, timestamp]);
});
return this.ds.batch(cmds, (err, results) => {
if (err) {
log.error('error pushing metric', {
@ -582,13 +584,48 @@ class UtapiClient {
// empty.
actionCounter = Number.isNaN(actionCounter)
|| actionCounter < 0 ? 1 : actionCounter;
if (Number.isInteger(params.byteLength)) {
/* byteLength is passed in from cloudserver under the follow conditions:
* - bucket versioning is suspended
* - object version id is null
* - the content length of the object exists
* In this case, the master key is deleted and replaced with a delete marker.
* The decrement accounts for the deletion of the master key when utapi reports
* on the number of objects.
*/
actionCounter -= 1;
}
const key = generateStateKey(p, 'numberOfObjects');
const byteArr = results[index + commandsGroupSize - 1][1];
const oldByteLength = byteArr ? parseInt(byteArr[0], 10) : 0;
const newByteLength = member.serialize(Math.max(0, oldByteLength - params.byteLength));
cmds2.push(
['zremrangebyscore', key, timestamp, timestamp],
['zadd', key, timestamp, member.serialize(actionCounter)],
);
if (Number.isInteger(params.byteLength)) {
cmds2.push(
['decr', generateCounter(p, 'numberOfObjectsCounter')],
['decrby', generateCounter(p, 'storageUtilizedCounter'), params.byteLength],
);
}
if (byteArr) {
cmds2.push(
['zremrangebyscore', generateStateKey(p, 'storageUtilized'), timestamp, timestamp],
['zadd', generateStateKey(p, 'storageUtilized'), timestamp, newByteLength],
);
}
return true;
});
if (noErr) {
return this.ds.batch(cmds2, cb);
}

View File

@ -16,15 +16,19 @@ const REINDEX_PYTHON_INTERPRETER = process.env.REINDEX_PYTHON_INTERPRETER !== un
? process.env.REINDEX_PYTHON_INTERPRETER
: 'python3.7';
const EXIT_CODE_SENTINEL_CONNECTION = 100;
class UtapiReindex {
constructor(config) {
this._enabled = false;
this._schedule = REINDEX_SCHEDULE;
this._sentinel = {
host: '127.0.0.1',
port: 16379,
this._redis = {
name: 'scality-s3',
sentinelPassword: '',
sentinels: [{
host: '127.0.0.1',
port: 16379,
}],
};
this._bucketd = {
host: '127.0.0.1',
@ -42,14 +46,13 @@ class UtapiReindex {
if (config && config.password) {
this._password = config.password;
}
if (config && config.sentinel) {
if (config && config.redis) {
const {
host, port, name, sentinelPassword,
} = config.sentinel;
this._sentinel.host = host || this._sentinel.host;
this._sentinel.port = port || this._sentinel.port;
this._sentinel.name = name || this._sentinel.name;
this._sentinel.sentinelPassword = sentinelPassword || this._sentinel.sentinelPassword;
name, sentinelPassword, sentinels,
} = config.redis;
this._redis.name = name || this._redis.name;
this._redis.sentinelPassword = sentinelPassword || this._redis.sentinelPassword;
this._redis.sentinels = sentinels || this._redis.sentinels;
}
if (config && config.bucketd) {
const { host, port } = config.bucketd;
@ -61,17 +64,16 @@ class UtapiReindex {
this._log = new werelogs.Logger('UtapiReindex', { level, dump });
}
this._onlyCountLatestWhenObjectLocked = (config && config.onlyCountLatestWhenObjectLocked === true);
this._requestLogger = this._log.newRequestLogger();
}
_getRedisClient() {
const client = new RedisClient({
sentinels: [{
host: this._sentinel.host,
port: this._sentinel.port,
}],
name: this._sentinel.name,
sentinelPassword: this._sentinel.sentinelPassword,
sentinels: this._redis.sentinels,
name: this._redis.name,
sentinelPassword: this._redis.sentinelPassword,
password: this._password,
});
client.connect();
@ -86,17 +88,18 @@ class UtapiReindex {
return this.ds.del(REINDEX_LOCK_KEY);
}
_buildFlags() {
_buildFlags(sentinel) {
const flags = {
/* eslint-disable camelcase */
sentinel_ip: this._sentinel.host,
sentinel_port: this._sentinel.port,
sentinel_cluster_name: this._sentinel.name,
sentinel_ip: sentinel.host,
sentinel_port: sentinel.port,
sentinel_cluster_name: this._redis.name,
bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`,
};
if (this._sentinel.sentinelPassword) {
flags.redis_password = this._sentinel.sentinelPassword;
if (this._redis.sentinelPassword) {
flags.redis_password = this._redis.sentinelPassword;
}
/* eslint-enable camelcase */
const opts = [];
Object.keys(flags)
@ -105,11 +108,15 @@ class UtapiReindex {
opts.push(name);
opts.push(flags[flag]);
});
if (this._onlyCountLatestWhenObjectLocked) {
opts.push('--only-latest-when-locked');
}
return opts;
}
_runScript(path, done) {
const flags = this._buildFlags();
_runScriptWithSentinels(path, remainingSentinels, done) {
const flags = this._buildFlags(remainingSentinels.shift());
this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`);
const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]);
process.stdout.on('data', data => {
@ -136,6 +143,17 @@ class UtapiReindex {
statusCode: code,
script: path,
});
if (code === EXIT_CODE_SENTINEL_CONNECTION) {
if (remainingSentinels.length > 0) {
this._requestLogger.info('retrying with next sentinel host', {
script: path,
});
return this._runScriptWithSentinels(path, remainingSentinels, done);
}
this._requestLogger.error('no more sentinel host to try', {
script: path,
});
}
} else {
this._requestLogger.info('script exited successfully', {
statusCode: code,
@ -146,6 +164,11 @@ class UtapiReindex {
});
}
_runScript(path, done) {
const remainingSentinels = [...this._redis.sentinels];
this._runScriptWithSentinels(path, remainingSentinels, done);
}
_attemptLock(job) {
this._requestLogger.info('attempting to acquire the lock to begin job');
this._lock()

View File

@ -14,6 +14,15 @@ class UtapiRequest {
this._datastore = null;
this._requestQuery = null;
this._requestPath = null;
this._vault = null;
}
getVault() {
return this._vault;
}
setVault() {
return this._vault;
}
/**

View File

@ -1,16 +1,21 @@
import requests
import redis
import json
import argparse
import ast
import sys
import time
import urllib
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import re
import redis
import requests
import sys
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import time
import urllib
import argparse
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex:reporting')
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
@ -29,8 +34,19 @@ class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password
r = redis.Redis(host=ip, port=port, db=0, password=password)
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
r = redis.Redis(
host=ip,
port=port,
db=0,
password=password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {ip}:{port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)

View File

@ -1,5 +1,6 @@
import argparse
import concurrent.futures as futures
import functools
import itertools
import json
import logging
@ -8,9 +9,9 @@ import re
import sys
import time
import urllib
from pathlib import Path
from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
import redis
import requests
@ -24,6 +25,9 @@ MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
ACCOUNT_UPDATE_CHUNKSIZE = 100
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
@ -32,9 +36,38 @@ def get_options():
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers")
parser.add_argument("-b", "--bucket", default=None, help="Bucket to be processed")
parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request")
return parser.parse_args()
parser.add_argument("--only-latest-when-locked", action='store_true', help="Only index the latest version of a key when the bucket has a default object lock policy")
parser.add_argument("--debug", action='store_true', help="Enable debug logging")
parser.add_argument("--dry-run", action="store_true", help="Do not update redis")
group = parser.add_mutually_exclusive_group()
group.add_argument("-a", "--account", default=[], help="account canonical ID (all account buckets will be processed)", action="append", type=nonempty_string('account'))
group.add_argument("--account-file", default=None, help="file containing account canonical IDs, one ID per line", type=existing_file)
group.add_argument("-b", "--bucket", default=[], help="bucket name", action="append", type=nonempty_string('bucket'))
group.add_argument("--bucket-file", default=None, help="file containing bucket names, one bucket name per line", type=existing_file)
options = parser.parse_args()
if options.bucket_file:
with open(options.bucket_file) as f:
options.bucket = [line.strip() for line in f if line.strip()]
elif options.account_file:
with open(options.account_file) as f:
options.account = [line.strip() for line in f if line.strip()]
return options
def nonempty_string(flag):
def inner(value):
if not value.strip():
raise argparse.ArgumentTypeError("%s: value must not be empty"%flag)
return value
return inner
def existing_file(path):
path = Path(path).resolve()
if not path.exists():
raise argparse.ArgumentTypeError("File does not exist: %s"%path)
return path
def chunks(iterable, size):
it = iter(iterable)
@ -49,7 +82,7 @@ def _encoded(func):
return urllib.parse.quote(val.encode('utf-8'))
return inner
Bucket = namedtuple('Bucket', ['userid', 'name'])
Bucket = namedtuple('Bucket', ['userid', 'name', 'object_lock_enabled'])
MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id'])
BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size'])
@ -61,15 +94,21 @@ class InvalidListing(Exception):
def __init__(self, bucket):
super().__init__('Invalid contents found while listing bucket %s'%bucket)
class BucketNotFound(Exception):
def __init__(self, bucket):
super().__init__('Bucket %s not found'%bucket)
class BucketDClient:
'''Performs Listing calls against bucketd'''
__url_format = '{addr}/default/bucket/{bucket}'
__url_attribute_format = '{addr}/default/attributes/{bucket}'
__url_bucket_format = '{addr}/default/bucket/{bucket}'
__headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"}
def __init__(self, bucketd_addr=None, max_retries=2):
def __init__(self, bucketd_addr=None, max_retries=2, only_latest_when_locked=False):
self._bucketd_addr = bucketd_addr
self._max_retries = max_retries
self._only_latest_when_locked = only_latest_when_locked
self._session = requests.Session()
def _do_req(self, url, check_500=True, **kwargs):
@ -101,7 +140,7 @@ class BucketDClient:
parameters value. On the first request the function will be called with
`None` and should return its initial value. Return `None` for the param to be excluded.
'''
url = self.__url_format.format(addr=self._bucketd_addr, bucket=bucket)
url = self.__url_bucket_format.format(addr=self._bucketd_addr, bucket=bucket)
static_params = {k: v for k, v in kwargs.items() if not callable(v)}
dynamic_params = {k: v for k, v in kwargs.items() if callable(v)}
is_truncated = True # Set to True for first loop
@ -114,6 +153,9 @@ class BucketDClient:
_log.debug('listing bucket bucket: %s params: %s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
resp = self._do_req(url, params=params)
if resp.status_code == 404:
_log.debug('Bucket not found bucket: %s'%bucket)
return
if resp.status_code == 200:
payload = resp.json()
except ValueError as e:
@ -135,7 +177,37 @@ class BucketDClient:
else:
is_truncated = len(payload) > 0
def list_buckets(self, name = None):
@functools.lru_cache(maxsize=16)
def _get_bucket_attributes(self, name):
url = self.__url_attribute_format.format(addr=self._bucketd_addr, bucket=name)
try:
resp = self._do_req(url)
if resp.status_code == 200:
return resp.json()
else:
_log.error('Error getting bucket attributes bucket:%s status_code:%s'%(name, resp.status_code))
raise BucketNotFound(name)
except ValueError as e:
_log.exception(e)
_log.error('Invalid attributes response body! bucket:%s'%name)
raise
except MaxRetriesReached:
_log.error('Max retries reached getting bucket attributes bucket:%s'%name)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception getting bucket attributes bucket:%s'%name)
raise
def get_bucket_md(self, name):
md = self._get_bucket_attributes(name)
canonId = md.get('owner')
if canonId is None:
_log.error('No owner found for bucket %s'%name)
raise InvalidListing(name)
return Bucket(canonId, name, md.get('objectLockEnabled', False))
def list_buckets(self, account=None):
def get_next_marker(p):
if p is None:
@ -147,19 +219,24 @@ class BucketDClient:
'maxKeys': 1000,
'marker': get_next_marker
}
if account is not None:
params['prefix'] = '%s..|..' % account
for _, payload in self._list_bucket(USERS_BUCKET, **params):
buckets = []
for result in payload['Contents']:
for result in payload.get('Contents', []):
match = re.match("(\w+)..\|..(\w+.*)", result['key'])
bucket = Bucket(*match.groups())
if name is None or bucket.name == name:
buckets.append(bucket)
bucket = Bucket(*match.groups(), False)
# We need to get the attributes for each bucket to determine if it is locked
if self._only_latest_when_locked:
bucket_attrs = self._get_bucket_attributes(bucket.name)
object_lock_enabled = bucket_attrs.get('objectLockEnabled', False)
bucket = bucket._replace(object_lock_enabled=object_lock_enabled)
buckets.append(bucket)
if buckets:
yield buckets
if name is not None:
# Break on the first matching bucket if a name is given
break
def list_mpus(self, bucket):
_bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name
@ -196,18 +273,12 @@ class BucketDClient:
upload_id=key['value']['UploadId']))
return keys
def _sum_objects(self, bucket, listing):
def _sum_objects(self, bucket, listing, only_latest_when_locked = False):
count = 0
total_size = 0
last_master = None
last_size = None
for status_code, payload in listing:
contents = payload['Contents'] if isinstance(payload, dict) else payload
if contents is None:
_log.error('Invalid contents in listing. bucket:%s status_code:%s'%(bucket, status_code))
raise InvalidListing(bucket)
for obj in contents:
count += 1
last_key = None
try:
for obj in listing:
if isinstance(obj['value'], dict):
# bucketd v6 returns a dict:
data = obj.get('value', {})
@ -216,39 +287,51 @@ class BucketDClient:
# bucketd v7 returns an encoded string
data = json.loads(obj['value'])
size = data.get('content-length', 0)
is_latest = obj['key'] != last_key
last_key = obj['key']
if only_latest_when_locked and bucket.object_lock_enabled and not is_latest:
_log.debug('Skipping versioned key: %s'%obj['key'])
continue
count += 1
total_size += size
# If versioned, subtract the size of the master to avoid double counting
if last_master is not None and obj['key'].startswith(last_master + '\x00'):
_log.debug('Detected versioned key: %s - subtracting master size: %i'% (
obj['key'],
last_size,
))
total_size -= last_size
count -= 1
last_master = None
# Only save master versions
elif '\x00' not in obj['key']:
last_master = obj['key']
last_size = size
except InvalidListing:
_log.error('Invalid contents in listing. bucket:%s'%bucket.name)
raise InvalidListing(bucket.name)
return count, total_size
def _extract_listing(self, key, listing):
for status_code, payload in listing:
contents = payload[key] if isinstance(payload, dict) else payload
if contents is None:
raise InvalidListing('')
for obj in contents:
yield obj
def count_bucket_contents(self, bucket):
def get_next_marker(p):
if p is None or len(p) == 0:
def get_key_marker(p):
if p is None:
return ''
return p[-1].get('key', '')
return p.get('NextKeyMarker', '')
def get_vid_marker(p):
if p is None:
return ''
return p.get('NextVersionIdMarker', '')
params = {
'listingType': 'Basic',
'listingType': 'DelimiterVersions',
'maxKeys': 1000,
'gt': get_next_marker,
'keyMarker': get_key_marker,
'versionIdMarker': get_vid_marker,
}
count, total_size = self._sum_objects(bucket.name, self._list_bucket(bucket.name, **params))
listing = self._list_bucket(bucket.name, **params)
count, total_size = self._sum_objects(bucket, self._extract_listing('Versions', listing), self._only_latest_when_locked)
return BucketContents(
bucket=bucket,
obj_count=count,
@ -256,7 +339,8 @@ class BucketDClient:
)
def count_mpu_parts(self, mpu):
_bucket = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket_name = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket = mpu.bucket._replace(name=shadow_bucket_name)
def get_prefix(p):
if p is None:
@ -276,13 +360,31 @@ class BucketDClient:
'listingType': 'Delimiter',
}
count, total_size = self._sum_objects(_bucket, self._list_bucket(_bucket, **params))
listing = self._list_bucket(shadow_bucket_name, **params)
count, total_size = self._sum_objects(shadow_bucket, self._extract_listing('Contents', listing))
return BucketContents(
bucket=mpu.bucket._replace(name=_bucket),
bucket=shadow_bucket,
obj_count=0, # MPU parts are not counted towards numberOfObjects
total_size=total_size
)
def list_all_buckets(bucket_client):
return bucket_client.list_buckets()
def list_specific_accounts(bucket_client, accounts):
for account in accounts:
yield from bucket_client.list_buckets(account=account)
def list_specific_buckets(bucket_client, buckets):
batch = []
for bucket in buckets:
try:
batch.append(bucket_client.get_bucket_md(bucket))
except BucketNotFound:
_log.error('Failed to list bucket %s. Removing from results.'%bucket)
continue
yield batch
def index_bucket(client, bucket):
'''
@ -322,9 +424,16 @@ def get_redis_client(options):
host=options.sentinel_ip,
port=options.sentinel_port,
db=0,
password=options.redis_password
password=options.redis_password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
try:
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {options.sentinel_ip}:{options.sentinel_port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
return redis.Redis(
host=ip,
port=port,
@ -358,16 +467,24 @@ def log_report(resource, name, obj_count, total_size):
if __name__ == '__main__':
options = get_options()
if options.bucket is not None and not options.bucket.strip():
print('You must provide a bucket name with the --bucket flag')
sys.exit(1)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries)
if options.debug:
_log.setLevel(logging.DEBUG)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries, options.only_latest_when_locked)
redis_client = get_redis_client(options)
account_reports = {}
observed_buckets = set()
failed_accounts = set()
if options.account:
batch_generator = list_specific_accounts(bucket_client, options.account)
elif options.bucket:
batch_generator = list_specific_buckets(bucket_client, options.bucket)
else:
batch_generator = list_all_buckets(bucket_client)
with ThreadPoolExecutor(max_workers=options.worker) as executor:
for batch in bucket_client.list_buckets(options.bucket):
for batch in batch_generator:
bucket_reports = {}
jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch }
for job in futures.as_completed(jobs.keys()):
@ -386,51 +503,84 @@ if __name__ == '__main__':
update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size)
# Bucket reports can be updated as we get them
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute()
if options.dry_run:
for bucket, report in bucket_reports.items():
_log.info(
"DryRun: resource buckets [%s] would be updated with obj_count %i and total_size %i" % (
bucket, report['obj_count'], report['total_size']
)
)
else:
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute()
stale_buckets = set()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket is None:
stale_buckets = recorded_buckets.difference(observed_buckets)
elif observed_buckets and options.bucket not in recorded_buckets:
# The provided bucket does not exist, so clean up any metrics
stale_buckets = { options.bucket }
if options.bucket:
stale_buckets = { b for b in options.bucket if b not in observed_buckets }
elif options.account:
_log.warning('Stale buckets will not be cleared when using the --account or --account-file flags')
else:
stale_buckets = set()
stale_buckets = recorded_buckets.difference(observed_buckets)
_log.info('Found %s stale buckets' % len(stale_buckets))
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute()
if options.dry_run:
_log.info("DryRun: not updating stale buckets")
else:
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute()
# Account metrics are not updated if a bucket is specified
if options.bucket is None:
if options.bucket:
_log.warning('Account metrics will not be updated when using the --bucket or --bucket-file flags')
else:
# Don't update any accounts with failed listings
without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items())
# Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for userid, report in chunk:
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute()
if options.dry_run:
for userid, report in account_reports.items():
_log.info(
"DryRun: resource account [%s] would be updated with obj_count %i and total_size %i" % (
userid, report['obj_count'], report['total_size']
)
)
else:
# Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for userid, report in chunk:
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute()
if options.account:
for account in options.account:
if account in failed_accounts:
_log.error("No metrics updated for account %s, one or more buckets failed" % account)
# Include failed_accounts in observed_accounts to avoid clearing metrics
observed_accounts = failed_accounts.union(set(account_reports.keys()))
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts)
if options.account:
stale_accounts = { a for a in options.account if a not in observed_accounts }
else:
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts)
_log.info('Found %s stale accounts' % len(stale_accounts))
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for account in chunk:
update_redis(pipeline, 'accounts', account, 0, 0)
log_report('accounts', account, 0, 0)
pipeline.execute()
if options.dry_run:
_log.info("DryRun: not updating stale accounts")
else:
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for account in chunk:
update_redis(pipeline, 'accounts', account, 0, 0)
log_report('accounts', account, 0, 0)
pipeline.execute()

View File

@ -7,7 +7,6 @@ const { Clustering, errors, ipCheck } = require('arsenal');
const arsenalHttps = require('arsenal').https;
const { Logger } = require('werelogs');
const config = require('./Config');
const routes = require('../router/routes');
const Route = require('../router/Route');
const Router = require('../router/Router');
@ -28,7 +27,12 @@ class UtapiServer {
constructor(worker, port, datastore, logger, config) {
this.worker = worker;
this.port = port;
this.router = new Router(config);
this.vault = config.vaultclient;
if (!this.vault) {
const Vault = require('./Vault');
this.vault = new Vault(config);
}
this.router = new Router(config, this.vault);
this.logger = logger;
this.datastore = datastore;
this.server = null;
@ -71,6 +75,7 @@ class UtapiServer {
req.socket.setNoDelay();
const { query, path, pathname } = url.parse(req.url, true);
const utapiRequest = new UtapiRequest()
.setVault(this.vault)
.setRequest(req)
.setLog(this.logger.newRequestLogger())
.setResponse(res)
@ -214,8 +219,7 @@ class UtapiServer {
* @property {object} params.log - logger configuration
* @return {undefined}
*/
function spawn(params) {
Object.assign(config, params);
function spawn(config) {
const {
workers, redis, log, port,
} = config;

View File

@ -23,7 +23,7 @@ class CacheClient {
async pushMetric(metric) {
const shard = shardFromTimestamp(metric.timestamp);
if (!this._cacheBackend.addToShard(shard, metric)) {
if (!(await this._cacheBackend.addToShard(shard, metric))) {
return false;
}
await this._counterBackend.updateCounters(metric);

View File

@ -23,10 +23,6 @@
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"vaultd": {
"host": "127.0.0.1",
"port": 8500
},
"cacheBackend": "memory",
"development": false,
"nodeId": "single_node",
@ -54,5 +50,15 @@
"filter": {
"allow": {},
"deny": {}
},
"metrics" : {
"enabled": false,
"host": "localhost",
"ingestPort": 10902,
"checkpointPort": 10903,
"snapshotPort": 10904,
"diskUsagePort": 10905,
"reindexPort": 10906,
"repairPort": 10907
}
}

View File

@ -2,6 +2,8 @@ const fs = require('fs');
const path = require('path');
const Joi = require('@hapi/joi');
const assert = require('assert');
const defaults = require('./defaults.json');
const werelogs = require('werelogs');
const {
truthy, envNamespace, allowedFilterFields, allowedFilterStates,
@ -71,7 +73,6 @@ class Config {
constructor(overrides) {
this._basePath = path.join(__dirname, '../../');
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
this._defaultsPath = path.join(__dirname, 'defaults.json');
this.host = undefined;
this.port = undefined;
@ -89,6 +90,11 @@ class Config {
parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
}
Object.assign(this, parsedConfig);
werelogs.configure({
level: Config.logging.level,
dump: Config.logging.dumpLevel,
});
}
static _readFile(path, encoding = 'utf-8') {
@ -113,7 +119,7 @@ class Config {
}
_loadDefaults() {
return Config._readJSON(this._defaultsPath);
return defaults;
}
_loadUserConfig() {
@ -192,6 +198,10 @@ class Config {
`${prefix}_SENTINEL_PASSWORD`,
config.sentinelPassword,
);
redisConf.password = _loadFromEnv(
`${prefix}_PASSWORD`,
config.password,
);
} else {
redisConf.host = _loadFromEnv(
`${prefix}_HOST`,
@ -375,6 +385,17 @@ class Config {
parsedConfig.filter = Config._parseResourceFilters(config.filter);
parsedConfig.metrics = {
enabled: _loadFromEnv('METRICS_ENABLED', config.metrics.enabled, _typeCasts.bool),
host: _loadFromEnv('METRICS_HOST', config.metrics.host),
ingestPort: _loadFromEnv('METRICS_PORT_INGEST', config.metrics.ingestPort, _typeCasts.int),
checkpointPort: _loadFromEnv('METRICS_PORT_CHECKPOINT', config.metrics.checkpointPort, _typeCasts.int),
snapshotPort: _loadFromEnv('METRICS_PORT_SNAPSHOT', config.metrics.snapshotPort, _typeCasts.int),
diskUsagePort: _loadFromEnv('METRICS_PORT_DISK_USAGE', config.metrics.diskUsagePort, _typeCasts.int),
reindexPort: _loadFromEnv('METRICS_PORT_REINDEX', config.metrics.reindexPort, _typeCasts.int),
repairPort: _loadFromEnv('METRICS_PORT_REPAIR', config.metrics.repairPort, _typeCasts.int),
};
return parsedConfig;
}

View File

@ -115,7 +115,16 @@ const schema = Joi.object({
return filterObj;
}, {},
)),
metrics: {
enabled: Joi.boolean(),
host: Joi.string(),
ingestPort: Joi.number().port(),
checkpointPort: Joi.number().port(),
snapshotPort: Joi.number().port(),
diskUsagePort: Joi.number().port(),
reindexPort: Joi.number().port(),
repairPort: Joi.number().port(),
},
});
module.exports = schema;

View File

@ -22,6 +22,7 @@ const constants = {
'deleteBucketEncryption',
'deleteBucketLifecycle',
'deleteBucketReplication',
'deleteBucketTagging',
'deleteBucketWebsite',
'deleteObject',
'deleteObjectTagging',
@ -34,6 +35,7 @@ const constants = {
'getBucketObjectLock',
'getBucketReplication',
'getBucketVersioning',
'getBucketTagging',
'getBucketWebsite',
'getObject',
'getObjectAcl',
@ -55,6 +57,7 @@ const constants = {
'putBucketObjectLock',
'putBucketReplication',
'putBucketVersioning',
'putBucketTagging',
'putBucketWebsite',
'putDeleteMarkerObject',
'putObject',

View File

@ -1,5 +1,5 @@
const BucketClientInterface = require('arsenal/lib/storage/metadata/bucketclient/BucketClientInterface');
const bucketclient = require('bucketclient');
const { BucketClientInterface } = require('arsenal').storage.metadata.bucketclient;
const config = require('../config');
const { LoggerContext } = require('../utils');

View File

@ -108,7 +108,7 @@ function bucketExists(bucket) {
bucket,
logger.newRequestLogger(),
err => {
if (err && !err.NoSuchBucket) {
if (err && (!err.is || !err.is.NoSuchBucket)) {
reject(err);
return;
}

View File

@ -3,6 +3,7 @@ const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const { apiOperations } = require('../server/spec');
const ResponseContainer = require('./ResponseContainer');
const { httpRequestDurationSeconds } = require('../server/metrics');
const apiTags = Object.keys(apiOperations);
const apiOperationIds = Object.values(apiOperations)
@ -21,6 +22,7 @@ const contextSchema = {
logger: Joi.any(),
request: Joi.any(),
results: Joi.any(),
requestTimer: Joi.any(),
};
const RequestContextModel = buildModel('RequestContext', contextSchema);
@ -34,6 +36,10 @@ class RequestContext extends RequestContextModel {
const tag = request.swagger.operation['x-router-controller'];
const { operationId } = request.swagger.operation;
const requestTimer = tag !== 'internal'
? httpRequestDurationSeconds.startTimer({ action: operationId })
: null;
request.logger.logger.addDefaultFields({
tag,
operationId,
@ -50,6 +56,7 @@ class RequestContext extends RequestContextModel {
encrypted,
results: new ResponseContainer(),
logger: request.logger,
requestTimer,
});
}

View File

@ -6,7 +6,8 @@ const BackOff = require('backo');
const { whilst } = require('async');
const errors = require('./errors');
const { LoggerContext, asyncOrCallback } = require('./utils');
const { LoggerContext } = require('./utils/log');
const { asyncOrCallback } = require('./utils/func');
const moduleLogger = new LoggerContext({
module: 'redis',

View File

@ -96,7 +96,7 @@ async function listMetric(ctx, params) {
const metric = {
...result.metrics,
timeRange: [ start, end ],
timeRange: [start, end],
operations: {
...emptyOperationsResponse,
...operations,

View File

@ -28,6 +28,7 @@ class UtapiServer extends Process {
app.use(middleware.loggerMiddleware);
await initializeOasTools(spec, app);
app.use(middleware.errorMiddleware);
app.use(middleware.httpMetricsMiddleware);
app.use(middleware.responseLoggerMiddleware);
return app;
}

20
libV2/server/metrics.js Normal file
View File

@ -0,0 +1,20 @@
const promClient = require('prom-client');
const httpRequestsTotal = new promClient.Counter({
name: 's3_utapi_http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['action', 'code'],
});
const httpRequestDurationSeconds = new promClient.Histogram({
name: 's3_utapi_http_request_duration_seconds',
help: 'Duration of HTTP requests in seconds',
labelNames: ['action', 'code'],
// buckets for response time from 0.1ms to 60s
buckets: [0.0001, 0.005, 0.015, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 5.0, 15.0, 30.0, 60.0],
});
module.exports = {
httpRequestDurationSeconds,
httpRequestsTotal,
};

View File

@ -6,6 +6,7 @@ const config = require('../config');
const { logger, buildRequestLogger } = require('../utils');
const errors = require('../errors');
const { translateAndAuthorize } = require('../vault');
const metricHandlers = require('./metrics');
const oasOptions = {
controllers: path.join(__dirname, './API/'),
@ -55,6 +56,23 @@ function responseLoggerMiddleware(req, res, next) {
}
}
function httpMetricsMiddleware(request, response, next) {
// If the request.ctx is undefined then this is an internal oasTools request (/_/docs)
// No metrics should be pushed
if (config.metrics.enabled && request.ctx && request.ctx.tag !== 'internal') {
metricHandlers.httpRequestsTotal
.labels({
action: request.ctx.operationId,
code: response.statusCode,
}).inc(1);
request.ctx.requestTimer({ code: response.statusCode });
}
if (next) {
next();
}
}
// next is purposely not called as all error responses are handled here
// eslint-disable-next-line no-unused-vars
function errorMiddleware(err, req, res, next) {
@ -82,7 +100,7 @@ function errorMiddleware(err, req, res, next) {
code,
message,
});
responseLoggerMiddleware(req, res);
responseLoggerMiddleware(req, res, () => httpMetricsMiddleware(req, res));
}
// eslint-disable-next-line no-unused-vars
@ -119,7 +137,7 @@ async function authV4Middleware(request, response, params) {
} catch (error) {
request.logger.error('error during authentication', { error });
// rethrow any access denied errors
if (error.AccessDenied) {
if ((error.is && error.is.AccessDenied) || (error.utapiError && error.AccessDenied)) {
throw error;
}
throw errors.InternalError;
@ -158,5 +176,6 @@ module.exports = {
responseLoggerMiddleware,
authV4Middleware,
clientIpLimitMiddleware,
httpMetricsMiddleware,
},
};

View File

@ -1,10 +1,12 @@
const assert = require('assert');
const cron = require('node-schedule');
const cronparser = require('cron-parser');
const promClient = require('prom-client');
const { DEFAULT_METRICS_ROUTE } = require('arsenal').network.probe.ProbeServer;
const { client: cacheClient } = require('../cache');
const Process = require('../process');
const { LoggerContext, iterIfError } = require('../utils');
const { LoggerContext, iterIfError, startProbeServer } = require('../utils');
const logger = new LoggerContext({
module: 'BaseTask',
@ -22,6 +24,11 @@ class BaseTask extends Process {
this._scheduler = null;
this._defaultSchedule = Now;
this._defaultLag = 0;
this._enableMetrics = options.enableMetrics || false;
this._metricsHost = options.metricsHost || 'localhost';
this._metricsPort = options.metricsPort || 9001;
this._metricsHandlers = null;
this._probeServer = null;
}
async _setup(includeDefaultOpts = true) {
@ -39,6 +46,75 @@ class BaseTask extends Process {
.option('-l, --lag <lag>', 'Set a custom lag time in seconds', v => parseInt(v, 10))
.option('-n, --node-id <id>', 'Set a custom node id');
}
if (this._enableMetrics) {
promClient.collectDefaultMetrics({
timeout: 10000,
gcDurationBuckets: [0.001, 0.01, 0.1, 1, 2, 5],
});
this._metricsHandlers = {
...this._registerDefaultMetricHandlers(),
...this._registerMetricHandlers(),
};
await this._createProbeServer();
}
}
_registerDefaultMetricHandlers() {
const taskName = this.constructor.name;
// Get the name of our subclass in snake case format eg BaseClass => _base_class
const taskNameSnake = taskName.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
const executionDuration = new promClient.Gauge({
name: `s3_utapi${taskNameSnake}_duration_seconds`,
help: `Execution time of the ${taskName} task`,
labelNames: ['origin', 'containerName'],
});
const executionAttempts = new promClient.Counter({
name: `s3_utapi${taskNameSnake}_attempts_total`,
help: `Total number of attempts to execute the ${taskName} task`,
labelNames: ['origin', 'containerName'],
});
const executionFailures = new promClient.Counter({
name: `s3_utapi${taskNameSnake}_failures_total`,
help: `Total number of failures executing the ${taskName} task`,
labelNames: ['origin', 'containerName'],
});
return {
executionDuration,
executionAttempts,
executionFailures,
};
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
return {};
}
async _createProbeServer() {
this._probeServer = await startProbeServer({
bindAddress: this._metricsHost,
port: this._metricsPort,
});
this._probeServer.addHandler(
DEFAULT_METRICS_ROUTE,
(res, log) => {
log.debug('metrics requested');
res.writeHead(200, {
'Content-Type': promClient.register.contentType,
});
promClient.register.metrics().then(metrics => {
res.end(metrics);
});
},
);
}
get schedule() {
@ -79,12 +155,23 @@ class BaseTask extends Process {
}
async execute() {
let endTimer;
if (this._enableMetrics) {
endTimer = this._metricsHandlers.executionDuration.startTimer();
this._metricsHandlers.executionAttempts.inc(1);
}
try {
const timestamp = new Date() * 1000; // Timestamp in microseconds;
const laggedTimestamp = timestamp - (this.lag * 1000000);
await this._execute(laggedTimestamp);
} catch (error) {
logger.error('Error during task execution', { error });
this._metricsHandlers.executionFailures.inc(1);
}
if (this._enableMetrics) {
endTimer();
}
}
@ -94,6 +181,9 @@ class BaseTask extends Process {
}
async _join() {
if (this._probeServer !== null) {
this._probeServer.stop();
}
return this._cache.disconnect();
}

View File

@ -1,3 +1,4 @@
const promClient = require('prom-client');
const BaseTask = require('./BaseTask');
const config = require('../config');
const { checkpointLagSecs, indexedEventFields } = require('../constants');
@ -9,11 +10,88 @@ const logger = new LoggerContext({
class CreateCheckpoint extends BaseTask {
constructor(options) {
super(options);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.checkpointPort,
...options,
});
this._defaultSchedule = config.checkpointSchedule;
this._defaultLag = checkpointLagSecs;
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const created = new promClient.Counter({
name: 's3_utapi_create_checkpoint_created_total',
help: 'Total number of checkpoints created',
labelNames: ['origin', 'containerName'],
});
const getLastCheckpoint = this._getLastCheckpoint.bind(this);
const lastCheckpoint = new promClient.Gauge({
name: 's3_utapi_create_checkpoint_last_checkpoint_seconds',
help: 'Timestamp of the last successfully created checkpoint',
labelNames: ['origin', 'containerName'],
async collect() {
try {
const timestamp = await getLastCheckpoint();
if (timestamp !== null) {
this.set(timestamp);
}
} catch (error) {
logger.error('error during metric collection', { error });
}
},
});
return {
created,
lastCheckpoint,
};
}
/**
* Metrics for CreateCheckpoint
* @typedef {Object} CreateCheckpointMetrics
* @property {number} created - Number of checkpoints created
*/
/**
*
* @param {CreateCheckpointMetrics} metrics - Metric values to push
* @returns {undefined}
*/
_pushMetrics(metrics) {
if (!this._enableMetrics) {
return;
}
if (metrics.created !== undefined) {
this._metricsHandlers.created.inc(metrics.created);
}
}
async _getLastCheckpoint() {
const resp = await this.withWarp10(async warp10 => warp10.fetch({
className: 'utapi.checkpoint.master',
labels: {
node: warp10.nodeId,
},
start: 'now',
stop: -1,
}));
if (!resp.result || (resp.result.length === 0 || resp.result[0] === '' || resp.result[0] === '[]')) {
return null;
}
const result = JSON.parse(resp.result[0])[0];
const timestamp = result.v[0][0];
return timestamp / 1000000;// Convert timestamp from microseconds to seconds
}
async _execute(timestamp) {
logger.debug('creating checkpoints', { checkpointTimestamp: timestamp });
const status = await this.withWarp10(async warp10 => {
@ -29,6 +107,7 @@ class CreateCheckpoint extends BaseTask {
});
if (status.result[0]) {
logger.info(`created ${status.result[0] || 0} checkpoints`);
this._pushMetrics({ created: status.result[0] });
}
}
}

View File

@ -1,3 +1,4 @@
const promClient = require('prom-client');
const BaseTask = require('./BaseTask');
const config = require('../config');
const { snapshotLagSecs } = require('../constants');
@ -9,11 +10,88 @@ const logger = new LoggerContext({
class CreateSnapshot extends BaseTask {
constructor(options) {
super(options);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.snapshotPort,
...options,
});
this._defaultSchedule = config.snapshotSchedule;
this._defaultLag = snapshotLagSecs;
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const created = new promClient.Counter({
name: 's3_utapi_create_snapshot_created_total',
help: 'Total number of snapshots created',
labelNames: ['origin', 'containerName'],
});
const getLastSnapshot = this._getLastSnapshot.bind(this);
const lastSnapshot = new promClient.Gauge({
name: 's3_utapi_create_snapshot_last_snapshot_seconds',
help: 'Timestamp of the last successfully created snapshot',
labelNames: ['origin', 'containerName'],
async collect() {
try {
const timestamp = await getLastSnapshot();
if (timestamp !== null) {
this.set(timestamp);
}
} catch (error) {
logger.error('error during metric collection', { error });
}
},
});
return {
created,
lastSnapshot,
};
}
/**
* Metrics for CreateSnapshot
* @typedef {Object} CreateSnapshotMetrics
* @property {number} created - Number of snapshots created
*/
/**
*
* @param {CreateSnapshotMetrics} metrics - Metric values to push
* @returns {undefined}
*/
_pushMetrics(metrics) {
if (!this._enableMetrics) {
return;
}
if (metrics.created !== undefined) {
this._metricsHandlers.created.inc(metrics.created);
}
}
async _getLastSnapshot() {
const resp = await this.withWarp10(async warp10 => warp10.fetch({
className: 'utapi.snapshot.master',
labels: {
node: warp10.nodeId,
},
start: 'now',
stop: -1,
}));
if (!resp.result || (resp.result.length === 0 || resp.result[0] === '' || resp.result[0] === '[]')) {
return null;
}
const result = JSON.parse(resp.result[0])[0];
const timestamp = result.v[0][0];
return timestamp / 1000000;// Convert timestamp from microseconds to seconds
}
async _execute(timestamp) {
logger.debug('creating snapshots', { snapshotTimestamp: timestamp });
@ -29,6 +107,7 @@ class CreateSnapshot extends BaseTask {
});
if (status.result[0]) {
logger.info(`created ${status.result[0]} snapshots`);
this._pushMetrics({ created: status.result[0] });
}
}
}

View File

@ -1,4 +1,7 @@
const async = require('async');
const Path = require('path');
const fs = require('fs');
const promClient = require('prom-client');
const BaseTask = require('./BaseTask');
const config = require('../config');
const { expirationChunkDuration } = require('../constants');
@ -16,9 +19,13 @@ const ACTION_THRESHOLD = 0.95;
class MonitorDiskUsage extends BaseTask {
constructor(options) {
super(
options,
);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.diskUsagePort,
...options,
});
this._defaultSchedule = config.diskUsageSchedule;
this._defaultLag = 0;
this._path = config.diskUsage.path;
@ -42,6 +49,88 @@ class MonitorDiskUsage extends BaseTask {
);
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const isLocked = new promClient.Gauge({
name: 's3_utapi_monitor_disk_usage_is_locked',
help: 'Indicates whether the monitored warp 10 has had writes disabled',
labelNames: ['origin', 'containerName'],
});
const leveldbBytes = new promClient.Gauge({
name: 's3_utapi_monitor_disk_usage_leveldb_bytes',
help: 'Total bytes used by warp 10 leveldb',
labelNames: ['origin', 'containerName'],
});
const datalogBytes = new promClient.Gauge({
name: 's3_utapi_monitor_disk_usage_datalog_bytes',
help: 'Total bytes used by warp 10 datalog',
labelNames: ['origin', 'containerName'],
});
const hardLimitRatio = new promClient.Gauge({
name: 's3_utapi_monitor_disk_usage_hard_limit_ratio',
help: 'Percent of the hard limit used by warp 10',
labelNames: ['origin', 'containerName'],
});
const hardLimitSetting = new promClient.Gauge({
name: 's3_utapi_monitor_disk_usage_hard_limit_bytes',
help: 'The hard limit setting in bytes',
labelNames: ['origin', 'containerName'],
});
return {
isLocked,
leveldbBytes,
datalogBytes,
hardLimitRatio,
hardLimitSetting,
};
}
/**
* Metrics for MonitorDiskUsage
* @typedef {Object} MonitorDiskUsageMetrics
* @property {boolean} isLocked - Indicates if writes have been disabled for the monitored warp10
* @property {number} leveldbBytes - Total bytes used by warp 10 leveldb
* @property {number} datalogBytes - Total bytes used by warp 10 datalog
* @property {number} hardLimitRatio - Percent of the hard limit used by warp 10
* @property {number} hardLimitSetting - The hard limit setting in bytes
*/
/**
*
* @param {MonitorDiskUsageMetrics} metrics - Metric values to push
* @returns {undefined}
*/
_pushMetrics(metrics) {
if (!this._enableMetrics) {
return;
}
if (metrics.isLocked !== undefined) {
this._metricsHandlers.isLocked.set(metrics.isLocked ? 1 : 0);
}
if (metrics.leveldbBytes !== undefined) {
this._metricsHandlers.leveldbBytes.set(metrics.leveldbBytes);
}
if (metrics.datalogBytes !== undefined) {
this._metricsHandlers.datalogBytes.set(metrics.datalogBytes);
}
if (metrics.hardLimitRatio !== undefined) {
this._metricsHandlers.hardLimitRatio.set(metrics.hardLimitRatio);
}
if (metrics.hardLimitSetting !== undefined) {
this._metricsHandlers.hardLimitSetting.set(metrics.hardLimitSetting);
}
}
get isLeader() {
return this._program.leader !== undefined;
}
@ -54,9 +143,13 @@ class MonitorDiskUsage extends BaseTask {
return this._program.lock !== undefined;
}
_getUsage() {
moduleLogger.debug(`calculating disk usage for ${this._path}`);
return getFolderSize(this._path);
// eslint-disable-next-line class-methods-use-this
async _getUsage(path) {
moduleLogger.debug(`calculating disk usage for ${path}`);
if (!fs.existsSync(path)) {
throw Error(`failed to calculate usage for non-existent path ${path}`);
}
return getFolderSize(path);
}
async _expireMetrics(timestamp) {
@ -102,7 +195,7 @@ class MonitorDiskUsage extends BaseTask {
}
_checkHardLimit(size, nodeId) {
const hardPercentage = (size / this._hardLimit).toFixed(2);
const hardPercentage = parseFloat((size / this._hardLimit).toFixed(2));
const hardLimitHuman = formatDiskSize(this._hardLimit);
const hardLogger = moduleLogger.with({
size,
@ -113,6 +206,8 @@ class MonitorDiskUsage extends BaseTask {
nodeId,
});
this._pushMetrics({ hardLimitRatio: hardPercentage });
const msg = `Using ${hardPercentage * 100}% of the ${hardLimitHuman} hard limit on ${nodeId}`;
if (hardPercentage < WARN_THRESHOLD) {
@ -150,12 +245,14 @@ class MonitorDiskUsage extends BaseTask {
if (this.isManualUnlock) {
moduleLogger.info('manually unlocking warp 10', { nodeId: this.nodeId });
await this._enableWarp10Updates();
this._pushMetrics({ isLocked: false });
return;
}
if (this.isManualLock) {
moduleLogger.info('manually locking warp 10', { nodeId: this.nodeId });
await this._disableWarp10Updates();
this._pushMetrics({ isLocked: true });
return;
}
@ -170,16 +267,21 @@ class MonitorDiskUsage extends BaseTask {
return;
}
let size = null;
let leveldbBytes = null;
let datalogBytes = null;
try {
size = await this._getUsage();
leveldbBytes = await this._getUsage(Path.join(this._path, 'leveldb'));
datalogBytes = await this._getUsage(Path.join(this._path, 'datalog'));
} catch (error) {
moduleLogger.error(`error calculating disk usage for ${this._path}`, { error });
return;
}
this._pushMetrics({ leveldbBytes, datalogBytes });
const size = leveldbBytes + datalogBytes;
if (this._hardLimit !== null) {
moduleLogger.info(`warp 10 leveldb using ${formatDiskSize(size)} of disk space`, { usage: size });
moduleLogger.info(`warp 10 using ${formatDiskSize(size)} of disk space`, { leveldbBytes, datalogBytes });
const shouldLock = this._checkHardLimit(size, this.nodeId);
if (shouldLock) {
@ -190,6 +292,7 @@ class MonitorDiskUsage extends BaseTask {
{ nodeId: this.nodeId });
await this._enableWarp10Updates();
}
this._pushMetrics({ isLocked: shouldLock, hardLimitSetting: this._hardLimit });
}
}
}

View File

@ -1,5 +1,6 @@
const assert = require('assert');
const async = require('async');
const promClient = require('prom-client');
const BaseTask = require('./BaseTask');
const { UtapiMetric } = require('../models');
const config = require('../config');
@ -16,12 +17,88 @@ const checkpointLagMicroseconds = convertTimestamp(checkpointLagSecs);
class IngestShardTask extends BaseTask {
constructor(options) {
super(options);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.ingestPort,
...options,
});
this._defaultSchedule = config.ingestionSchedule;
this._defaultLag = config.ingestionLagSeconds;
this._stripEventUUID = options.stripEventUUID !== undefined ? options.stripEventUUID : true;
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const ingestedTotal = new promClient.Counter({
name: 's3_utapi_ingest_shard_task_ingest_total',
help: 'Total number of metrics ingested',
labelNames: ['origin', 'containerName'],
});
const ingestedSlow = new promClient.Counter({
name: 's3_utapi_ingest_shard_task_slow_total',
help: 'Total number of slow metrics ingested',
labelNames: ['origin', 'containerName'],
});
const ingestedShards = new promClient.Counter({
name: 's3_utapi_ingest_shard_task_shard_ingest_total',
help: 'Total number of metric shards ingested',
labelNames: ['origin', 'containerName'],
});
const shardAgeTotal = new promClient.Counter({
name: 's3_utapi_ingest_shard_task_shard_age_total',
help: 'Total aggregated age of shards',
labelNames: ['origin', 'containerName'],
});
return {
ingestedTotal,
ingestedSlow,
ingestedShards,
shardAgeTotal,
};
}
/**
* Metrics for IngestShardTask
* @typedef {Object} IngestShardMetrics
* @property {number} ingestedTotal - Number of events ingested
* @property {number} ingestedSlow - Number of slow events ingested
* @property {number} ingestedShards - Number of metric shards ingested
* @property {number} shardAgeTotal - Aggregated age of shards
*/
/**
*
* @param {IngestShardMetrics} metrics - Metric values to push
* @returns {undefined}
*/
_pushMetrics(metrics) {
if (!this._enableMetrics) {
return;
}
if (metrics.ingestedTotal !== undefined) {
this._metricsHandlers.ingestedTotal.inc(metrics.ingestedTotal);
}
if (metrics.ingestedSlow !== undefined) {
this._metricsHandlers.ingestedSlow.inc(metrics.ingestedSlow);
}
if (metrics.ingestedShards !== undefined) {
this._metricsHandlers.ingestedShards.inc(metrics.ingestedShards);
}
if (metrics.shardAgeTotal !== undefined) {
this._metricsHandlers.shardAgeTotal.inc(metrics.shardAgeTotal);
}
}
_hydrateEvent(data, stripTimestamp = false) {
const event = JSON.parse(data);
if (this._stripEventUUID) {
@ -47,6 +124,8 @@ class IngestShardTask extends BaseTask {
return;
}
let shardAgeTotal = 0;
let ingestedShards = 0;
await async.eachLimit(toIngest, 10,
async shard => {
if (await this._cache.shardExists(shard)) {
@ -84,6 +163,13 @@ class IngestShardTask extends BaseTask {
assert.strictEqual(status, records.length);
await this._cache.deleteShard(shard);
logger.info(`ingested ${status} records from ${config.nodeId} into ${ingestedIntoNodeId}`);
shardAgeTotal += shardAge;
ingestedShards += 1;
this._pushMetrics({ ingestedTotal: records.length });
if (areSlowEvents) {
this._pushMetrics({ ingestedSlow: records.length });
}
} else {
logger.debug('No events found in shard, cleaning up');
}
@ -91,6 +177,8 @@ class IngestShardTask extends BaseTask {
logger.warn('shard does not exist', { shard });
}
});
const shardAgeTotalSecs = shardAgeTotal / 1000000;
this._pushMetrics({ shardAgeTotal: shardAgeTotalSecs, ingestedShards });
}
}

View File

@ -20,7 +20,13 @@ const logger = new LoggerContext({
class ReindexTask extends BaseTask {
constructor(options) {
super(options);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.reindexPort,
...options,
});
this._defaultSchedule = config.reindexSchedule;
this._defaultLag = 0;
const eventFilters = (config && config.filter) || {};

View File

@ -1,3 +1,4 @@
const promClient = require('prom-client');
const BaseTask = require('./BaseTask');
const config = require('../config');
const { LoggerContext } = require('../utils');
@ -9,11 +10,51 @@ const logger = new LoggerContext({
class RepairTask extends BaseTask {
constructor(options) {
super(options);
super({
enableMetrics: config.metrics.enabled,
metricsHost: config.metrics.host,
metricsPort: config.metrics.repairPort,
...options,
});
this._defaultSchedule = config.repairSchedule;
this._defaultLag = repairLagSecs;
}
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const created = new promClient.Counter({
name: 's3_utapi_repair_task_created_total',
help: 'Total number of repair records created',
labelNames: ['origin', 'containerName'],
});
return {
created,
};
}
/**
* Metrics for RepairTask
* @typedef {Object} RepairMetrics
* @property {number} created - Number of repair records created
*/
/**
*
* @param {RepairMetrics} metrics - Metric values to push
* @returns {undefined}
*/
_pushMetrics(metrics) {
if (!this._enableMetrics) {
return;
}
if (metrics.created !== undefined) {
this._metricsHandlers.created.inc(metrics.created);
}
}
async _execute(timestamp) {
logger.debug('Checking for repairs', { timestamp, nodeId: this.nodeId });
@ -30,6 +71,7 @@ class RepairTask extends BaseTask {
});
if (status.result[0]) {
logger.info(`created ${status.result[0]} corrections`);
this._pushMetrics({ created: status.result[0] });
}
}
}

View File

@ -4,6 +4,7 @@ const timestamp = require('./timestamp');
const func = require('./func');
const disk = require('./disk');
const filter = require('./filter');
const probe = require('./probe');
module.exports = {
...log,
@ -12,4 +13,5 @@ module.exports = {
...func,
...disk,
...filter,
...probe,
};

View File

@ -1,14 +1,6 @@
const werelogs = require('werelogs');
const config = require('../config');
const { comprehend } = require('./func');
const loggerConfig = {
level: config.logging.level,
dump: config.logging.dumpLevel,
};
werelogs.configure(loggerConfig);
const rootLogger = new werelogs.Logger('Utapi');
class LoggerContext {
@ -78,8 +70,6 @@ class LoggerContext {
}
}
rootLogger.debug('logger initialized', { loggerConfig });
function buildRequestLogger(req) {
let reqUids = [];
if (req.headers['x-scal-request-uids'] !== undefined) {

32
libV2/utils/probe.js Normal file
View File

@ -0,0 +1,32 @@
const { ProbeServer } = require('arsenal').network.probe.ProbeServer;
/**
* Configure probe servers
* @typedef {Object} ProbeServerConfig
* @property {string} bindAddress - Address to bind probe server to
* @property {number} port - Port to bind probe server to
*/
/**
* Start an empty probe server
* @async
* @param {ProbeServerConfig} config - Configuration for probe server
* @returns {Promise<ProbeServer>} - Instance of ProbeServer
*/
async function startProbeServer(config) {
if (!config) {
throw new Error('configuration for probe server is missing');
}
return new Promise((resolve, reject) => {
const probeServer = new ProbeServer(config);
probeServer.onListening(() => resolve(probeServer));
probeServer.onError(err => reject(err));
probeServer.start();
});
}
module.exports = {
startProbeServer,
};

View File

@ -1,6 +1,5 @@
const assert = require('assert');
const { auth, policies } = require('arsenal');
const vaultclient = require('vaultclient');
const config = require('../config');
const errors = require('../errors');
/**
@ -9,9 +8,17 @@ const errors = require('../errors');
*/
class VaultWrapper extends auth.Vault {
create(config) {
if (config.vaultd.host) {
return new VaultWrapper(config);
}
return null;
}
constructor(options) {
let client;
const { host, port } = options.vaultd;
const vaultclient = require('vaultclient');
if (options.tls) {
const { key, cert, ca } = options.tls;
client = new vaultclient.Client(host, port, true, key, cert,
@ -69,7 +76,7 @@ class VaultWrapper extends auth.Vault {
request,
request.logger.logger,
(err, authInfo, authRes) => {
if (err && (err.InvalidAccessKeyId || err.AccessDenied)) {
if (err && err.is && (err.is.InvalidAccessKeyId || err.is.AccessDenied)) {
resolve({ authed: false });
return;
}
@ -119,7 +126,7 @@ class VaultWrapper extends auth.Vault {
}
}
const vault = new VaultWrapper(config);
const vault = VaultWrapper.create(config);
auth.setHandler(vault);
module.exports = {

View File

@ -73,6 +73,11 @@ async function authorizeUserAccessKey(authInfo, level, resources, log) {
log.trace('Authorizing IAM user', { resources });
// If no resources were authorized by Vault then no further checking is required
if (resources.length === 0) {
return [false, []];
}
// Get the parent account id from the user's arn
const parentAccountId = authInfo.getArn().split(':')[4];

View File

@ -3,7 +3,7 @@
"engines": {
"node": ">=16"
},
"version": "8.1.6",
"version": "8.1.15",
"description": "API for tracking resource utilization and reporting metrics",
"main": "index.js",
"repository": {
@ -19,13 +19,12 @@
"dependencies": {
"@hapi/joi": "^17.1.1",
"@senx/warp10": "^1.0.14",
"arsenal": "scality/Arsenal#8.1.39",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^3.2.0",
"aws-sdk": "^2.1005.0",
"aws4": "^1.8.0",
"backo": "^1.1.0",
"body-parser": "^1.19.0",
"bucketclient": "scality/bucketclient#8.1.3",
"byte-size": "^7.0.0",
"commander": "^5.1.0",
"cron-parser": "^2.15.0",
@ -37,18 +36,17 @@
"level-mem": "^5.0.1",
"needle": "^2.5.0",
"node-schedule": "^1.3.2",
"oas-tools": "^2.2.1",
"prom-client": "^13.1.0",
"oas-tools": "^2.2.2",
"prom-client": "14.2.0",
"uuid": "^3.3.2",
"vaultclient": "scality/vaultclient#8.2.5",
"werelogs": "scality/werelogs#8.1.0"
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1"
},
"devDependencies": {
"eslint": "6.0.1",
"eslint-config-airbnb": "17.1.0",
"eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-plugin-import": "^2.18.0",
"mocha": "^3.0.2",
"mocha": ">=3.1.2",
"nodemon": "^2.0.4",
"protobufjs": "^6.10.1",
"sinon": "^9.0.2"

2
requirements.txt Normal file
View File

@ -0,0 +1,2 @@
redis==5.0.3
requests==2.31.0

View File

@ -3,17 +3,16 @@ const assert = require('assert');
const url = require('url');
const { auth, errors, policies } = require('arsenal');
const safeJsonParse = require('../utils/safeJsonParse');
const Vault = require('../lib/Vault');
class Router {
/**
* @constructor
* @param {Config} config - Config instance
*/
constructor(config) {
constructor(config, vault) {
this._service = config.component;
this._routes = {};
this._vault = new Vault(config);
this._vault = vault;
}
/**

View File

@ -1,4 +1,21 @@
const config = require('./lib/Config');
const fs = require('fs');
const path = require('path');
const Config = require('./lib/Config');
const server = require('./lib/server');
server(Object.assign({}, config, { component: 's3' }));
/*
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
const cfgpath = process.env.UTAPI_CONFIG_FILE || (__dirname+'/config.json');
let cfg;
try {
cfg = JSON.parse(fs.readFileSync(cfgpath, { encoding: 'utf-8' }));
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
cfg.component = 's3';
server(new Config(cfg));

View File

@ -1,11 +1,14 @@
const assert = require('assert');
const fs = require('fs');
const sinon = require('sinon');
const uuid = require('uuid');
const promClient = require('prom-client');
const { clients: warp10Clients } = require('../../../libV2/warp10');
const { MonitorDiskUsage } = require('../../../libV2/tasks');
const { fillDir } = require('../../utils/v2Data');
const { assertMetricValue } = require('../../utils/prom');
// eslint-disable-next-line func-names
describe('Test MonitorDiskUsage hard limit', function () {
@ -15,14 +18,18 @@ describe('Test MonitorDiskUsage hard limit', function () {
beforeEach(async () => {
path = `/tmp/diskusage-${uuid.v4()}`;
task = new MonitorDiskUsage({ warp10: warp10Clients });
fs.mkdirSync(`${path}/datalog`, { recursive: true });
promClient.register.clear();
task = new MonitorDiskUsage({ warp10: warp10Clients, enableMetrics: true });
await task.setup();
task._path = path;
task._enabled = true;
});
afterEach(async () => task.join());
it('should trigger a database lock if above the limit', async () => {
fillDir(path, { count: 1, size: 100 });
fillDir(`${path}/leveldb`, { count: 1, size: 100 });
task._hardLimit = 1;
const checkSpy = sinon.spy(task, '_checkHardLimit');
const lockSpy = sinon.spy(task, '_disableWarp10Updates');
@ -34,10 +41,11 @@ describe('Test MonitorDiskUsage hard limit', function () {
assert(lockSpy.calledOnce);
assert(unlockSpy.notCalled);
assert(execStub.calledOnce);
await assertMetricValue('s3_utapi_monitor_disk_usage_hard_limit_bytes', 1);
});
it('should trigger a database unlock if below the limit', async () => {
fillDir(path, { count: 1, size: 100 });
fillDir(`${path}/leveldb`, { count: 1, size: 100 });
task._hardLimit = 10240;
const checkSpy = sinon.spy(task, '_checkHardLimit');
const lockSpy = sinon.spy(task, '_disableWarp10Updates');
@ -50,7 +58,7 @@ describe('Test MonitorDiskUsage hard limit', function () {
});
it('should not throw when failing to calculate disk usage', async () => {
fillDir(path, { count: 1, size: 100 });
fillDir(`${path}/leveldb`, { count: 1, size: 100 });
task._hardLimit = 1;
sinon.stub(task, '_getUsage').throws();
const _task = task.execute();

View File

@ -0,0 +1,83 @@
const assert = require('assert');
const needle = require('needle');
const promClient = require('prom-client');
const sinon = require('sinon');
const { DEFAULT_METRICS_ROUTE } = require('arsenal').network.probe.ProbeServer;
const { BaseTask } = require('../../../../libV2/tasks');
const { clients: warp10Clients } = require('../../../../libV2/warp10');
const { getMetricValues } = require('../../../utils/prom');
const METRICS_SERVER_PORT = 10999;
class CustomTask extends BaseTask {
// eslint-disable-next-line class-methods-use-this
_registerMetricHandlers() {
const foo = new promClient.Gauge({
name: 's3_utapi_custom_task_foo_total',
help: 'Count of foos',
labelNames: ['origin', 'containerName'],
});
return { foo };
}
async _execute() {
this._metricsHandlers.foo.inc(1);
}
}
describe('Test BaseTask metrics', () => {
let task;
beforeEach(async () => {
task = new CustomTask({
enableMetrics: true,
metricsPort: METRICS_SERVER_PORT,
warp10: [warp10Clients[0]],
});
await task.setup();
});
afterEach(async () => {
await task.join();
promClient.register.clear();
});
it('should start a metrics server on the provided port', async () => {
const res = await needle(
'get',
`http://localhost:${METRICS_SERVER_PORT}${DEFAULT_METRICS_ROUTE}`,
);
const lines = res.body.split('\n');
const first = lines[0];
assert.strictEqual(res.statusCode, 200);
assert(first.startsWith('# HELP'));
});
it('should push metrics for a task execution', async () => {
await task.execute();
const timeValues = await getMetricValues('s3_utapi_custom_task_duration_seconds');
assert.strictEqual(timeValues.length, 1);
const attemptsValues = await getMetricValues('s3_utapi_custom_task_attempts_total');
assert.deepStrictEqual(attemptsValues, [{ value: 1, labels: {} }]);
const failuresValues = await getMetricValues('s3_utapi_custom_task_failures_total');
assert.deepStrictEqual(failuresValues, []);
});
it('should push metrics for a failed task execution', async () => {
sinon.replace(task, '_execute', sinon.fake.rejects('forced failure'));
await task.execute();
const failuresValues = await getMetricValues('s3_utapi_custom_task_failures_total');
assert.deepStrictEqual(failuresValues, [{ value: 1, labels: {} }]);
});
it('should allow custom handlers to be registered', async () => {
await task.execute();
const fooValues = await getMetricValues('s3_utapi_custom_task_foo_total');
assert.deepStrictEqual(fooValues, [{ value: 1, labels: {} }]);
});
});

View File

@ -1,4 +1,5 @@
const assert = require('assert');
const promClient = require('prom-client');
const uuid = require('uuid');
const { Warp10Client } = require('../../../../libV2/warp10');
@ -6,6 +7,7 @@ const { convertTimestamp } = require('../../../../libV2/utils');
const { CreateCheckpoint } = require('../../../../libV2/tasks');
const { generateCustomEvents, fetchRecords } = require('../../../utils/v2Data');
const { assertMetricValue } = require('../../../utils/prom');
const _now = Math.floor(new Date().getTime() / 1000);
const getTs = delta => convertTimestamp(_now + delta);
@ -49,10 +51,16 @@ describe('Test CreateCheckpoint', function () {
prefix = uuid.v4();
warp10 = new Warp10Client({ nodeId: prefix });
checkpointTask = new CreateCheckpoint({ warp10: [warp10] });
checkpointTask = new CreateCheckpoint({ warp10: [warp10], enableMetrics: true });
await checkpointTask.setup();
checkpointTask._program = { lag: 0, nodeId: prefix };
});
afterEach(async () => {
await checkpointTask.join();
promClient.register.clear();
});
it('should create checkpoints from events', async () => {
const start = getTs(-300);
const stop = getTs(-120);
@ -72,6 +80,7 @@ describe('Test CreateCheckpoint', function () {
assert.strictEqual(series.length, 3);
assertResults(totals, series);
await assertMetricValue('s3_utapi_create_checkpoint_created_total', series.length);
});
it('should only include events not in an existing checkpoint', async () => {

View File

@ -1,4 +1,5 @@
const assert = require('assert');
const promClient = require('prom-client');
const uuid = require('uuid');
const { Warp10Client } = require('../../../../libV2/warp10');
@ -6,6 +7,7 @@ const { convertTimestamp } = require('../../../../libV2/utils');
const { CreateCheckpoint, CreateSnapshot, RepairTask } = require('../../../../libV2/tasks');
const { generateCustomEvents, fetchRecords } = require('../../../utils/v2Data');
const { assertMetricValue } = require('../../../utils/prom');
const _now = Math.floor(new Date().getTime() / 1000);
const getTs = delta => convertTimestamp(_now + delta);
@ -54,13 +56,19 @@ describe('Test CreateSnapshot', function () {
checkpointTask = new CreateCheckpoint({ warp10: [warp10] });
checkpointTask._program = { lag: 0, nodeId: prefix };
snapshotTask = new CreateSnapshot({ warp10: [warp10] });
snapshotTask = new CreateSnapshot({ warp10: [warp10], enableMetrics: true });
await snapshotTask.setup();
snapshotTask._program = { lag: 0, nodeId: prefix };
repairTask = new RepairTask({ warp10: [warp10] });
repairTask._program = { lag: 0, nodeId: prefix };
});
afterEach(async () => {
await snapshotTask.join();
promClient.register.clear();
});
it('should create a snapshot from a checkpoint', async () => {
const start = getTs(-300);
const stop = getTs(-120);
@ -80,6 +88,7 @@ describe('Test CreateSnapshot', function () {
assert.strictEqual(series.length, 3);
assertResults(totals, series);
await assertMetricValue('s3_utapi_create_snapshot_created_total', series.length);
});
it('should create a snapshot from more than one checkpoint', async () => {

View File

@ -1,15 +1,19 @@
const assert = require('assert');
const fs = require('fs');
const promClient = require('prom-client');
const uuid = require('uuid');
const { MonitorDiskUsage } = require('../../../../libV2/tasks');
const { getFolderSize } = require('../../../../libV2/utils');
const { fillDir } = require('../../../utils/v2Data');
const { assertMetricValue } = require('../../../utils/prom');
class MonitorDiskUsageShim extends MonitorDiskUsage {
async _getUsage() {
this.usage = await super._getUsage();
return this.usage;
async _getUsage(path) {
const usage = await super._getUsage(path);
this.usage = (this.usage || 0) + usage;
return usage;
}
}
@ -43,17 +47,35 @@ describe('Test MonitorDiskUsage', () => {
beforeEach(async () => {
path = `/tmp/diskusage-${uuid.v4()}`;
task = new MonitorDiskUsageShim({ warp10: [] });
fs.mkdirSync(path);
task = new MonitorDiskUsageShim({ warp10: [], enableMetrics: true });
task._path = path;
task._enabled = true;
await task.setup();
});
testCases.map(testCase =>
afterEach(async () => {
await task.join();
promClient.register.clear();
});
testCases.map(testCase => {
it(`should calculate disk usage for ${testCase.count} files of ${testCase.size} bytes each`,
async () => {
fillDir(path, testCase);
fillDir(`${path}/leveldb`, testCase);
fillDir(`${path}/datalog`, testCase);
await task._execute();
assert.strictEqual(task.usage, testCase.expected + emptyDirSize + (emptyFileSize * testCase.count));
}));
const expectedSingleSize = emptyDirSize + testCase.expected + (emptyFileSize * testCase.count);
const expectedTotalSize = expectedSingleSize * 2;
assert.strictEqual(task.usage, expectedTotalSize);
// Should equal the usage minus the empty datalog
await assertMetricValue('s3_utapi_monitor_disk_usage_leveldb_bytes', expectedSingleSize);
await assertMetricValue('s3_utapi_monitor_disk_usage_datalog_bytes', expectedSingleSize);
});
});
it('should fail if a subpath does not exist', () => {
assert.doesNotThrow(() => task._execute());
assert.strictEqual(task.usage, undefined);
});
});

View File

@ -1,4 +1,5 @@
const assert = require('assert');
const promClient = require('prom-client');
const uuid = require('uuid');
const { CacheClient, backends: cacheBackends } = require('../../../../libV2/cache');
@ -9,6 +10,7 @@ const config = require('../../../../libV2/config');
const { eventFieldsToWarp10 } = require('../../../../libV2/constants');
const { generateFakeEvents, fetchRecords } = require('../../../utils/v2Data');
const { assertMetricValue, getMetricValues } = require('../../../utils/prom');
const _now = Math.floor(new Date().getTime() / 1000);
const getTs = delta => convertTimestamp(_now + delta);
@ -58,17 +60,21 @@ describe('Test IngestShards', function () {
await cacheClient.connect();
warp10 = new Warp10Client({ nodeId: prefix });
ingestTask = new IngestShard({ warp10: [warp10] });
ingestTask = new IngestShard({ warp10: [warp10], enableMetrics: true });
await ingestTask.setup();
ingestTask._cache._cacheBackend._prefix = prefix;
ingestTask._program = { lag: 0 };
await ingestTask._cache.connect();
});
this.afterEach(async () => {
afterEach(async () => {
await cacheClient.disconnect();
await ingestTask._cache.disconnect();
await ingestTask.join();
promClient.register.clear();
});
it('should ingest metrics from a single shard', async () => {
const start = shardFromTimestamp(getTs(-120));
const stop = start + 9000000;
@ -86,6 +92,12 @@ describe('Test IngestShards', function () {
'@utapi/decodeEvent',
);
assertResults(events, series);
await assertMetricValue('s3_utapi_ingest_shard_task_ingest_total', events.length);
await assertMetricValue('s3_utapi_ingest_shard_task_shard_ingest_total', 1);
const metricValues = await getMetricValues('s3_utapi_ingest_shard_task_shard_age_total');
assert.strictEqual(metricValues.length, 1);
const [metric] = metricValues;
assert(metric.value > 0);
});
it('should ingest metrics for multiple shards', async () => {
@ -106,6 +118,7 @@ describe('Test IngestShards', function () {
);
assertResults(events, series);
await assertMetricValue('s3_utapi_ingest_shard_task_ingest_total', events.length);
});
it('should ingest old metrics as repair', async () => {
@ -125,6 +138,7 @@ describe('Test IngestShards', function () {
'@utapi/decodeEvent',
);
assertResults(events, series);
await assertMetricValue('s3_utapi_ingest_shard_task_slow_total', events.length);
});
it('should strip the event uuid during ingestion', async () => {
@ -208,4 +222,3 @@ describe('Test IngestShards', function () {
], timestamps);
});
});

View File

@ -1,4 +1,5 @@
const assert = require('assert');
const promClient = require('prom-client');
const uuid = require('uuid');
const { Warp10Client } = require('../../../../libV2/warp10');
@ -6,6 +7,7 @@ const { convertTimestamp } = require('../../../../libV2/utils');
const { RepairTask } = require('../../../../libV2/tasks');
const { generateCustomEvents, fetchRecords } = require('../../../utils/v2Data');
const { assertMetricValue } = require('../../../utils/prom');
// Ten minutes in the past
const _now = Math.floor(new Date().getTime() / 1000) - (600);
@ -49,10 +51,16 @@ describe('Test Repair', function () {
beforeEach(async () => {
prefix = uuid.v4();
warp10 = new Warp10Client({ nodeId: prefix });
repairTask = new RepairTask({ warp10: [warp10] });
repairTask = new RepairTask({ warp10: [warp10], enableMetrics: true });
await repairTask.setup();
repairTask._program = { lag: 0, nodeId: prefix };
});
afterEach(async () => {
await repairTask.join();
promClient.register.clear();
});
it('should create corrections from events', async () => {
const start = getTs(-300);
const stop = getTs(-120);
@ -72,6 +80,7 @@ describe('Test Repair', function () {
assert.strictEqual(series.length, 3);
assertResults(totals, series);
await assertMetricValue('s3_utapi_repair_task_created_total', series.length);
});
it('should only include events not in an existing correction', async () => {

View File

@ -1,6 +1,5 @@
/* eslint-disable implicit-arrow-linebreak */
const assert = require('assert');
const { errors } = require('arsenal');
const { Logger } = require('werelogs');
const MemoryBackend = require('../../lib/backend/Memory');
const Datastore = require('../../lib/Datastore');
@ -40,7 +39,7 @@ function getMetricResponse(schemaKey) {
return response;
}
function assertMetrics(schemaKey, metricName, props, isNegativeValue, done) {
function assertMetrics(schemaKey, metricName, props, done) {
const timestamp = new Date().setMinutes(0, 0, 0);
const timeRange = [timestamp, timestamp];
const expectedRes = getMetricResponse(schemaKey);
@ -52,17 +51,6 @@ function assertMetrics(schemaKey, metricName, props, isNegativeValue, done) {
datastore,
logger,
(err, res) => {
if (isNegativeValue) {
assert.deepStrictEqual(
err,
errors.InternalError.customizeDescription(
'Utapi is in a transient state for this time period as '
+ 'metrics are being collected. Please try again in a few '
+ 'minutes.',
),
);
return done();
}
assert.strictEqual(err, null);
// overwrite operations metrics
if (expectedResProps.operations) {
@ -100,13 +88,12 @@ function testOps(schemaKey, keyIndex, metricindex, isNegativeValue, done) {
if (keyIndex === 'storageUtilized' || keyIndex === 'numberOfObjects') {
key = generateStateKey(schemaObject, keyIndex);
val = isNegativeValue ? -1024 : 1024;
props[metricindex] = [val, val];
props[metricindex] = isNegativeValue ? [0, 0] : [val, val];
memBackend.zadd(key, timestamp, val, () =>
assertMetrics(
schemaKey,
schemaObject[schemaKey],
props,
isNegativeValue,
done,
));
} else if (keyIndex === 'incomingBytes' || keyIndex === 'outgoingBytes') {
@ -118,7 +105,6 @@ function testOps(schemaKey, keyIndex, metricindex, isNegativeValue, done) {
schemaKey,
schemaObject[schemaKey],
props,
isNegativeValue,
done,
));
} else {
@ -131,7 +117,6 @@ function testOps(schemaKey, keyIndex, metricindex, isNegativeValue, done) {
schemaKey,
schemaObject[schemaKey],
props,
isNegativeValue,
done,
));
}
@ -147,7 +132,6 @@ Object.keys(metricLevels).forEach(schemaKey => {
schemaKey,
resourceNames[schemaKey],
null,
false,
done,
));

View File

@ -273,7 +273,11 @@ tests.forEach(test => {
c.setDataStore(ds);
c.pushMetric(metric, REQUID, params, () => {
deserializeMemoryBackend(memoryBackend.data);
assert.deepStrictEqual(memoryBackend.data, expected);
Object.keys(expected).forEach(key => {
if (memoryBackend.data[key]) {
assert.deepStrictEqual(memoryBackend.data[key], expected[key]);
}
});
return cb();
});
}
@ -516,6 +520,7 @@ tests.forEach(test => {
storageUtilized: '1024',
numberOfObjects: '1',
};
setMockData(data, timestamp, () => {
testMetric('deleteObject', params, expected, done);
});
@ -693,6 +698,40 @@ tests.forEach(test => {
testMetric('putDeleteMarkerObject', metricTypes, expected, done);
});
it('should push putDeleteMarkerObject metrics and have correct bytes and number of objects', done => {
const expected = buildExpectedResult({
action: 'PutObject',
numberOfObjects: '1',
});
const metrics = {
bucket: '5741-repro',
keys: ['foo2'],
byteLength: undefined,
newByteLength: 258,
oldByteLength: null,
numberOfObjects: 1,
accountId: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be',
userId: undefined,
};
testMetric('putObject', Object.assign(metrics, metricTypes), expected, () => {
const expected = buildExpectedResult({
action: 'DeleteObject',
numberOfObjects: '1',
});
const metrics2 = {
bucket: '5741-repro',
keys: ['foo2'],
byteLength: 258,
newByteLength: undefined,
oldByteLength: undefined,
numberOfObjects: undefined,
accountId: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be',
userId: undefined,
};
testMetric('putDeleteMarkerObject', Object.assign(metrics2, metricTypes), expected, done);
});
});
it('should push putBucketReplication metrics', done => {
const expected = buildExpectedResult({
action: 'PutBucketReplication',

View File

@ -10,6 +10,7 @@ const templateExpected = opts => ({
operationId: 'healthcheck',
tag: 'internal',
encrypted: false,
requestTimer: null,
...(opts || {}),
});

View File

@ -1,8 +1,11 @@
const assert = require('assert');
const sinon = require('sinon');
const promClient = require('prom-client');
const { middleware } = require('../../../../libV2/server/middleware');
const { templateRequest, ExpressResponseStub } = require('../../../utils/v2Data');
const RequestContext = require('../../../../libV2/models/RequestContext');
const { getMetricValues, assertMetricValue } = require('../../../utils/prom');
describe('Test middleware', () => {
it('should build a request logger', next => {
@ -86,4 +89,56 @@ describe('Test middleware', () => {
}));
});
});
describe('test httpMetricsMiddleware', () => {
let resp;
beforeEach(() => {
resp = new ExpressResponseStub();
resp.status(200);
});
afterEach(() => {
promClient.register.clear();
});
it('should increment the counter if not an internal route', async () => {
const req = templateRequest({
swagger: {
operation: {
'x-router-controller': 'metrics',
'operationId': 'listMetrics',
},
},
});
req.ctx = new RequestContext(req);
middleware.httpMetricsMiddleware(req, resp);
await assertMetricValue('s3_utapi_http_requests_total', 1);
const durationMetric = 's3_utapi_http_request_duration_seconds';
const duration = await getMetricValues(durationMetric);
// 14 defined buckets + 1 for Infinity
assert.strictEqual(
duration.filter(metric => metric.metricName === `${durationMetric}_bucket`).length,
15,
);
const count = duration.filter(metric => metric.metricName === `${durationMetric}_count`);
assert.deepStrictEqual(count, [{
labels: {
action: 'listMetrics',
code: 200,
},
metricName: `${durationMetric}_count`,
value: 1,
}]);
assert.strictEqual(count[0].value, 1);
});
it('should not increment the counter if an internal route', async () => {
const req = templateRequest();
req.ctx = new RequestContext(req);
middleware.httpMetricsMiddleware(req, resp);
assert.rejects(() => getMetricValues('s3_utapi_http_requests_total'));
});
});
});

View File

@ -112,6 +112,17 @@ class BucketD {
return body;
}
_getBucketVersionResponse(bucketName) {
const body = {
CommonPrefixes: [],
IsTruncated: false,
Versions: (this._bucketContent[bucketName] || [])
// patch in a versionId to more closely match the real response
.map(entry => ({ ...entry, versionId: 'null' })),
};
return body;
}
_getShadowBucketOverviewResponse(bucketName) {
const mpus = (this._bucketContent[bucketName] || []).map(o => ({
key: o.key,
@ -137,6 +148,8 @@ class BucketD {
|| req.query.listingType === 'Delimiter'
) {
req.body = this._getBucketResponse(bucketName);
} else if (req.query.listingType === 'DelimiterVersions') {
req.body = this._getBucketVersionResponse(bucketName);
}
// v2 reindex uses `Basic` listing type for everything

20
tests/utils/prom.js Normal file
View File

@ -0,0 +1,20 @@
const promClient = require('prom-client');
const assert = require('assert');
async function getMetricValues(name) {
const metric = await promClient.register.getSingleMetric(name);
const data = await metric.get();
return data.values;
}
async function assertMetricValue(name, value) {
const values = await getMetricValues(name);
assert.strictEqual(values.length, 1);
const [metric] = values;
assert.strictEqual(metric.value, value);
}
module.exports = {
getMetricValues,
assertMetricValue,
};

View File

@ -115,7 +115,16 @@
%> FOREACH
%>
<%
DROP 0 STOP
// If no new events were found
// - drop the empty list
// - write a new master checkpoint
// - return 0
DROP
NEWGTS $master_checkpoint_class RENAME
$endTimestamp NaN NaN NaN 0 ADDVALUE
{ 'node' $nodeId } RELABEL
$write_token UPDATE
0 STOP
%> IFTE
0 'checkpoints' STORE

5321
yarn.lock

File diff suppressed because it is too large Load Diff