diff --git a/README.md b/README.md index 5c93649..97fed2a 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,8 @@ OPTIONS: Run modes in order. See NOTES for more info (default "cxipgdx") -n int Maximum number of objects <-1 for unlimited> (default -1) + -o string + Write CSV output to this file -op string Prefix for objects -r string @@ -106,63 +108,64 @@ Below is an example run of the benchmark using a 10s test duration, 10 threads, running on an Intel P3700 NVMe device. ``` -$ ./hsbench -a -s -u http://127.0.0.1:7480 -z 4K -d 10 -t 10 -b 10 -2019/08/15 18:32:36 Hotsauce S3 Benchmark Version 0.1 -2019/08/15 18:32:36 Parameters: -2019/08/15 18:32:36 url=http://127.0.0.1:7480 -2019/08/15 18:32:36 object_prefix= -2019/08/15 18:32:36 bucket_prefix=hotsauce_bench -2019/08/15 18:32:36 region=us-east-1 -2019/08/15 18:32:36 modes=cxipgdx -2019/08/15 18:32:36 object_count=-1 -2019/08/15 18:32:36 bucket_count=10 -2019/08/15 18:32:36 duration=10 -2019/08/15 18:32:36 threads=10 -2019/08/15 18:32:36 loops=1 -2019/08/15 18:32:36 size=4K -2019/08/15 18:32:36 interval=1.000000 -2019/08/15 18:32:36 Running Loop 0 BUCKET CLEAR TEST -2019/08/15 18:32:36 Loop: 0, Int: ALL, Dur(s): 0.0, Mode: BCLR, Ops: 0, MB/s: 0.00, IO/s: 0, Lat(ms): [ min: 0.0, avg: 0.0, 99%: 0.0, max: 0.0 ], Slowdowns: 0 -2019/08/15 18:32:36 Running Loop 0 BUCKET DELETE TEST -2019/08/15 18:32:36 Loop: 0, Int: ALL, Dur(s): 0.0, Mode: BDEL, Ops: 0, MB/s: 0.00, IO/s: 0, Lat(ms): [ min: 0.0, avg: 0.0, 99%: 0.0, max: 0.0 ], Slowdowns: 0 -2019/08/15 18:32:36 Running Loop 0 BUCKET INIT TEST -2019/08/15 18:32:36 Loop: 0, Int: ALL, Dur(s): 0.0, Mode: BINIT, Ops: 10, MB/s: 0.00, IO/s: 796, Lat(ms): [ min: 10.1, avg: 11.3, 99%: 12.4, max: 12.4 ], Slowdowns: 0 -2019/08/15 18:32:36 Running Loop 0 OBJECT PUT TEST -2019/08/15 18:32:37 Loop: 0, Int: 1, Dur(s): 1.0, Mode: PUT, Ops: 5201, MB/s: 20.32, IO/s: 5201, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.7, max: 6.1 ], Slowdowns: 0 -2019/08/15 18:32:38 Loop: 0, Int: 2, Dur(s): 1.0, Mode: PUT, Ops: 5372, MB/s: 20.98, IO/s: 5372, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.3, max: 4.3 ], Slowdowns: 0 -2019/08/15 18:32:39 Loop: 0, Int: 3, Dur(s): 1.0, Mode: PUT, Ops: 5270, MB/s: 20.59, IO/s: 5270, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.4, max: 10.3 ], Slowdowns: 0 -2019/08/15 18:32:40 Loop: 0, Int: 4, Dur(s): 1.0, Mode: PUT, Ops: 5280, MB/s: 20.62, IO/s: 5280, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.4, max: 9.4 ], Slowdowns: 0 -2019/08/15 18:32:41 Loop: 0, Int: 5, Dur(s): 1.0, Mode: PUT, Ops: 5356, MB/s: 20.92, IO/s: 5356, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.2, max: 4.3 ], Slowdowns: 0 -2019/08/15 18:32:42 Loop: 0, Int: 6, Dur(s): 1.0, Mode: PUT, Ops: 5279, MB/s: 20.62, IO/s: 5279, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.0, max: 11.8 ], Slowdowns: 0 -2019/08/15 18:32:43 Loop: 0, Int: 7, Dur(s): 1.0, Mode: PUT, Ops: 5296, MB/s: 20.69, IO/s: 5296, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.0, max: 10.2 ], Slowdowns: 0 -2019/08/15 18:32:44 Loop: 0, Int: 8, Dur(s): 1.0, Mode: PUT, Ops: 5319, MB/s: 20.78, IO/s: 5319, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 2.9, max: 3.7 ], Slowdowns: 0 -2019/08/15 18:32:45 Loop: 0, Int: 9, Dur(s): 1.0, Mode: PUT, Ops: 5161, MB/s: 20.16, IO/s: 5161, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.1, max: 9.8 ], Slowdowns: 0 -2019/08/15 18:32:46 Loop: 0, Int: 10, Dur(s): 1.0, Mode: PUT, Ops: 4768, MB/s: 18.62, IO/s: 4768, Lat(ms): [ min: 1.2, avg: 2.1, 99%: 3.0, max: 89.3 ], Slowdowns: 0 -2019/08/15 18:32:46 Loop: 0, Int: ALL, Dur(s): 10.0, Mode: PUT, Ops: 52312, MB/s: 20.43, IO/s: 5230, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.2, max: 89.3 ], Slowdowns: 0 -2019/08/15 18:32:46 Running Loop 0 OBJECT GET TEST -2019/08/15 18:32:47 Loop: 0, Int: 1, Dur(s): 1.0, Mode: GET, Ops: 1190, MB/s: 4.65, IO/s: 1190, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.3, max: 1.6 ], Slowdowns: 0 -2019/08/15 18:32:48 Loop: 0, Int: 2, Dur(s): 1.0, Mode: GET, Ops: 1142, MB/s: 4.46, IO/s: 1142, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.7 ], Slowdowns: 0 -2019/08/15 18:32:49 Loop: 0, Int: 3, Dur(s): 1.0, Mode: GET, Ops: 1111, MB/s: 4.34, IO/s: 1111, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.2 ], Slowdowns: 0 -2019/08/15 18:32:50 Loop: 0, Int: 4, Dur(s): 1.0, Mode: GET, Ops: 1113, MB/s: 4.35, IO/s: 1113, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 3.7 ], Slowdowns: 0 -2019/08/15 18:32:51 Loop: 0, Int: 5, Dur(s): 1.0, Mode: GET, Ops: 1083, MB/s: 4.23, IO/s: 1083, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 1.4 ], Slowdowns: 0 -2019/08/15 18:32:52 Loop: 0, Int: 6, Dur(s): 1.0, Mode: GET, Ops: 1089, MB/s: 4.25, IO/s: 1089, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.4 ], Slowdowns: 0 -2019/08/15 18:32:53 Loop: 0, Int: 7, Dur(s): 1.0, Mode: GET, Ops: 1122, MB/s: 4.38, IO/s: 1122, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 1.5 ], Slowdowns: 0 -2019/08/15 18:32:54 Loop: 0, Int: 8, Dur(s): 1.0, Mode: GET, Ops: 1089, MB/s: 4.25, IO/s: 1089, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.4 ], Slowdowns: 0 -2019/08/15 18:32:55 Loop: 0, Int: 9, Dur(s): 1.0, Mode: GET, Ops: 1059, MB/s: 4.14, IO/s: 1059, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 5.7 ], Slowdowns: 0 -2019/08/15 18:32:56 Loop: 0, Int: ALL, Dur(s): 10.0, Mode: GET, Ops: 11086, MB/s: 4.31, IO/s: 1104, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 5.7 ], Slowdowns: 0 -2019/08/15 18:32:56 Running Loop 0 OBJECT DELETE TEST -2019/08/15 18:32:57 Loop: 0, Int: 1, Dur(s): 1.0, Mode: DEL, Ops: 5716, MB/s: 22.33, IO/s: 5716, Lat(ms): [ min: 1.1, avg: 1.7, 99%: 2.7, max: 3.8 ], Slowdowns: 0 -2019/08/15 18:32:58 Loop: 0, Int: 2, Dur(s): 1.0, Mode: DEL, Ops: 5661, MB/s: 22.11, IO/s: 5661, Lat(ms): [ min: 1.1, avg: 1.8, 99%: 2.8, max: 9.6 ], Slowdowns: 0 -2019/08/15 18:32:59 Loop: 0, Int: 3, Dur(s): 1.0, Mode: DEL, Ops: 5616, MB/s: 21.94, IO/s: 5616, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.9, max: 10.1 ], Slowdowns: 0 -2019/08/15 18:33:00 Loop: 0, Int: 4, Dur(s): 1.0, Mode: DEL, Ops: 5608, MB/s: 21.91, IO/s: 5608, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.7, max: 7.7 ], Slowdowns: 0 -2019/08/15 18:33:01 Loop: 0, Int: 5, Dur(s): 1.0, Mode: DEL, Ops: 5529, MB/s: 21.60, IO/s: 5529, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.0, max: 9.3 ], Slowdowns: 0 -2019/08/15 18:33:02 Loop: 0, Int: 6, Dur(s): 1.0, Mode: DEL, Ops: 5438, MB/s: 21.24, IO/s: 5438, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.0, max: 10.4 ], Slowdowns: 0 -2019/08/15 18:33:03 Loop: 0, Int: 7, Dur(s): 1.0, Mode: DEL, Ops: 5515, MB/s: 21.54, IO/s: 5515, Lat(ms): [ min: 1.3, avg: 1.8, 99%: 2.9, max: 7.5 ], Slowdowns: 0 -2019/08/15 18:33:04 Loop: 0, Int: 8, Dur(s): 1.0, Mode: DEL, Ops: 5481, MB/s: 21.41, IO/s: 5481, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.0, max: 9.7 ], Slowdowns: 0 -2019/08/15 18:33:05 Loop: 0, Int: 9, Dur(s): 1.0, Mode: DEL, Ops: 5614, MB/s: 21.93, IO/s: 5614, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.1, max: 8.0 ], Slowdowns: 0 -2019/08/15 18:33:05 Loop: 0, Int: ALL, Dur(s): 9.4, Mode: DEL, Ops: 52312, MB/s: 21.76, IO/s: 5571, Lat(ms): [ min: 1.1, avg: 1.8, 99%: 2.9, max: 10.4 ], Slowdowns: 0 -2019/08/15 18:33:05 Running Loop 0 BUCKET DELETE TEST -2019/08/15 18:33:05 Loop: 0, Int: ALL, Dur(s): 0.1, Mode: BDEL, Ops: 10, MB/s: 0.00, IO/s: 133, Lat(ms): [ min: 54.1, avg: 68.9, 99%: 75.2, max: 75.2 ], Slowdowns: 0 +$ ./hsbench -a 3JZ0SVK94Z55OZU5J1N0 -s OdzEPyDDZ0ls1haDUu1NVWkJDcnG74Lb7XylfXRM -u http://127.0.0.1:7480 -z 4K -d 10 -t 10 -b 10 -o test.csv +2019/08/19 08:18:51 Hotsauce S3 Benchmark Version 0.1 +2019/08/19 08:18:51 Parameters: +2019/08/19 08:18:51 url=http://127.0.0.1:7480 +2019/08/19 08:18:51 object_prefix= +2019/08/19 08:18:51 bucket_prefix=hotsauce_bench +2019/08/19 08:18:51 region=us-east-1 +2019/08/19 08:18:51 modes=cxipgdx +2019/08/19 08:18:51 object_count=-1 +2019/08/19 08:18:51 bucket_count=10 +2019/08/19 08:18:51 duration=10 +2019/08/19 08:18:51 threads=10 +2019/08/19 08:18:51 loops=1 +2019/08/19 08:18:51 size=4K +2019/08/19 08:18:51 interval=1.000000 +2019/08/19 08:18:51 Running Loop 0 BUCKET CLEAR TEST +2019/08/19 08:18:51 Loop: 0, Int: TOTAL, Dur(s): 0.0, Mode: BCLR, Ops: 0, MB/s: 0.00, IO/s: 0, Lat(ms): [ min: 0.0, avg: 0.0, 99%: 0.0, max: 0.0 ], Slowdowns: 0 +2019/08/19 08:18:51 Running Loop 0 BUCKET DELETE TEST +2019/08/19 08:18:51 Loop: 0, Int: TOTAL, Dur(s): 0.0, Mode: BDEL, Ops: 0, MB/s: 0.00, IO/s: 0, Lat(ms): [ min: 0.0, avg: 0.0, 99%: 0.0, max: 0.0 ], Slowdowns: 0 +2019/08/19 08:18:51 Running Loop 0 BUCKET INIT TEST +2019/08/19 08:18:51 Loop: 0, Int: TOTAL, Dur(s): 0.0, Mode: BINIT, Ops: 10, MB/s: 0.00, IO/s: 968, Lat(ms): [ min: 7.5, avg: 8.7, 99%: 10.2, max: 10.2 ], Slowdowns: 0 +2019/08/19 08:18:51 Running Loop 0 OBJECT PUT TEST +2019/08/19 08:18:52 Loop: 0, Int: 0, Dur(s): 1.0, Mode: PUT, Ops: 5255, MB/s: 20.53, IO/s: 5255, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.4, max: 14.1 ], Slowdowns: 0 +2019/08/19 08:18:53 Loop: 0, Int: 1, Dur(s): 1.0, Mode: PUT, Ops: 5237, MB/s: 20.46, IO/s: 5237, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.4, max: 11.1 ], Slowdowns: 0 +2019/08/19 08:18:54 Loop: 0, Int: 2, Dur(s): 1.0, Mode: PUT, Ops: 5454, MB/s: 21.30, IO/s: 5454, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.9, max: 7.3 ], Slowdowns: 0 +2019/08/19 08:18:55 Loop: 0, Int: 3, Dur(s): 1.0, Mode: PUT, Ops: 5318, MB/s: 20.77, IO/s: 5318, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.7, max: 7.1 ], Slowdowns: 0 +2019/08/19 08:18:56 Loop: 0, Int: 4, Dur(s): 1.0, Mode: PUT, Ops: 5364, MB/s: 20.95, IO/s: 5364, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 2.9, max: 7.8 ], Slowdowns: 0 +2019/08/19 08:18:57 Loop: 0, Int: 5, Dur(s): 1.0, Mode: PUT, Ops: 5219, MB/s: 20.39, IO/s: 5219, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.2, max: 16.8 ], Slowdowns: 0 +2019/08/19 08:18:58 Loop: 0, Int: 6, Dur(s): 1.0, Mode: PUT, Ops: 5200, MB/s: 20.31, IO/s: 5200, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.1, max: 9.6 ], Slowdowns: 0 +2019/08/19 08:18:59 Loop: 0, Int: 7, Dur(s): 1.0, Mode: PUT, Ops: 5250, MB/s: 20.51, IO/s: 5250, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.0, max: 7.3 ], Slowdowns: 0 +2019/08/19 08:19:00 Loop: 0, Int: 8, Dur(s): 1.0, Mode: PUT, Ops: 4701, MB/s: 18.36, IO/s: 4701, Lat(ms): [ min: 1.3, avg: 2.1, 99%: 3.5, max: 86.6 ], Slowdowns: 0 +2019/08/19 08:19:01 Loop: 0, Int: 9, Dur(s): 1.0, Mode: PUT, Ops: 5269, MB/s: 20.58, IO/s: 5269, Lat(ms): [ min: 1.3, avg: 1.9, 99%: 3.2, max: 11.4 ], Slowdowns: 0 +2019/08/19 08:19:01 Loop: 0, Int: TOTAL, Dur(s): 10.0, Mode: PUT, Ops: 52277, MB/s: 20.42, IO/s: 5227, Lat(ms): [ min: 1.2, avg: 1.9, 99%: 3.2, max: 86.6 ], Slowdowns: 0 +2019/08/19 08:19:01 Running Loop 0 OBJECT GET TEST +2019/08/19 08:19:03 Loop: 0, Int: 0, Dur(s): 1.0, Mode: GET, Ops: 1275, MB/s: 4.98, IO/s: 1275, Lat(ms): [ min: 0.6, avg: 0.9, 99%: 1.4, max: 1.5 ], Slowdowns: 0 +2019/08/19 08:19:04 Loop: 0, Int: 1, Dur(s): 1.0, Mode: GET, Ops: 1286, MB/s: 5.02, IO/s: 1286, Lat(ms): [ min: 0.6, avg: 0.9, 99%: 1.3, max: 1.8 ], Slowdowns: 0 +2019/08/19 08:19:05 Loop: 0, Int: 2, Dur(s): 1.0, Mode: GET, Ops: 1112, MB/s: 4.34, IO/s: 1112, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.6 ], Slowdowns: 0 +2019/08/19 08:19:06 Loop: 0, Int: 3, Dur(s): 1.0, Mode: GET, Ops: 1128, MB/s: 4.41, IO/s: 1128, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 5.7 ], Slowdowns: 0 +2019/08/19 08:19:07 Loop: 0, Int: 4, Dur(s): 1.0, Mode: GET, Ops: 1112, MB/s: 4.34, IO/s: 1112, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 1.7 ], Slowdowns: 0 +2019/08/19 08:19:08 Loop: 0, Int: 5, Dur(s): 1.0, Mode: GET, Ops: 1174, MB/s: 4.59, IO/s: 1174, Lat(ms): [ min: 0.6, avg: 0.9, 99%: 1.3, max: 1.6 ], Slowdowns: 0 +2019/08/19 08:19:09 Loop: 0, Int: 6, Dur(s): 1.0, Mode: GET, Ops: 1106, MB/s: 4.32, IO/s: 1106, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.1, max: 1.7 ], Slowdowns: 0 +2019/08/19 08:19:10 Loop: 0, Int: 7, Dur(s): 1.0, Mode: GET, Ops: 1132, MB/s: 4.42, IO/s: 1132, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 11.6 ], Slowdowns: 0 +2019/08/19 08:19:11 Loop: 0, Int: 8, Dur(s): 1.0, Mode: GET, Ops: 1074, MB/s: 4.20, IO/s: 1074, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 1.4 ], Slowdowns: 0 +2019/08/19 08:19:12 Loop: 0, Int: 9, Dur(s): 1.0, Mode: GET, Ops: 1080, MB/s: 4.22, IO/s: 1080, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 1.6 ], Slowdowns: 0 +2019/08/19 08:19:12 Loop: 0, Int: TOTAL, Dur(s): 10.0, Mode: GET, Ops: 11481, MB/s: 4.47, IO/s: 1144, Lat(ms): [ min: 0.6, avg: 0.8, 99%: 1.2, max: 11.6 ], Slowdowns: 0 +2019/08/19 08:19:12 Running Loop 0 OBJECT DELETE TEST +2019/08/19 08:19:13 Loop: 0, Int: 0, Dur(s): 1.0, Mode: DEL, Ops: 5591, MB/s: 21.84, IO/s: 5591, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.3, max: 11.0 ], Slowdowns: 0 +2019/08/19 08:19:14 Loop: 0, Int: 1, Dur(s): 1.0, Mode: DEL, Ops: 5607, MB/s: 21.90, IO/s: 5607, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.1, max: 13.9 ], Slowdowns: 0 +2019/08/19 08:19:15 Loop: 0, Int: 2, Dur(s): 1.0, Mode: DEL, Ops: 5680, MB/s: 22.19, IO/s: 5680, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.7, max: 7.4 ], Slowdowns: 0 +2019/08/19 08:19:16 Loop: 0, Int: 3, Dur(s): 1.0, Mode: DEL, Ops: 5691, MB/s: 22.23, IO/s: 5691, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.9, max: 6.5 ], Slowdowns: 0 +2019/08/19 08:19:17 Loop: 0, Int: 4, Dur(s): 1.0, Mode: DEL, Ops: 5565, MB/s: 21.74, IO/s: 5565, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 3.6, max: 11.0 ], Slowdowns: 0 +2019/08/19 08:19:18 Loop: 0, Int: 5, Dur(s): 1.0, Mode: DEL, Ops: 5739, MB/s: 22.42, IO/s: 5739, Lat(ms): [ min: 1.2, avg: 1.7, 99%: 2.7, max: 3.5 ], Slowdowns: 0 +2019/08/19 08:19:19 Loop: 0, Int: 6, Dur(s): 1.0, Mode: DEL, Ops: 5613, MB/s: 21.93, IO/s: 5613, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.9, max: 7.1 ], Slowdowns: 0 +2019/08/19 08:19:20 Loop: 0, Int: 7, Dur(s): 1.0, Mode: DEL, Ops: 5591, MB/s: 21.84, IO/s: 5591, Lat(ms): [ min: 1.2, avg: 1.8, 99%: 2.9, max: 16.3 ], Slowdowns: 0 +2019/08/19 08:19:21 Loop: 0, Int: 8, Dur(s): 1.0, Mode: DEL, Ops: 5784, MB/s: 22.59, IO/s: 5784, Lat(ms): [ min: 1.2, avg: 1.7, 99%: 2.9, max: 14.3 ], Slowdowns: 0 +2019/08/19 08:19:21 Loop: 0, Int: TOTAL, Dur(s): 9.3, Mode: DEL, Ops: 52277, MB/s: 22.04, IO/s: 5643, Lat(ms): [ min: 1.1, avg: 1.8, 99%: 3.0, max: 16.3 ], Slowdowns: 0 +2019/08/19 08:19:21 Running Loop 0 BUCKET DELETE TEST +2019/08/19 08:19:21 Loop: 0, Int: TOTAL, Dur(s): 0.1, Mode: BDEL, Ops: 10, MB/s: 0.00, IO/s: 126, Lat(ms): [ min: 71.3, avg: 74.7, 99%: 79.3, max: 79.3 ], Slowdowns: 0 ``` One notable point is that like the s3-benchmark program it is based on, hsbench has relatively low CPU overhead compared to some other S3 benchmarks. During the 4K PUT phase of the above test: diff --git a/hsbench.go b/hsbench.go index b15c9a2..69c6549 100644 --- a/hsbench.go +++ b/hsbench.go @@ -11,6 +11,7 @@ import ( "crypto/sha1" "crypto/tls" "encoding/base64" + "encoding/csv" "flag" "fmt" "io" @@ -35,7 +36,7 @@ import ( ) // Global variables -var access_key, secret_key, url_host, bucket_prefix, object_prefix, region, modes, sizeArg string +var access_key, secret_key, url_host, bucket_prefix, object_prefix, region, modes, output, sizeArg string var buckets []string var duration_secs, threads, loops int var object_data []byte @@ -44,6 +45,7 @@ var running_threads, bucket_count, object_count, object_size, op_counter int64 var object_count_flag bool var endtime time.Time var interval float64 +var csvWriter *csv.Writer // Our HTTP transport used for the roundtripper below var HTTPTransport http.RoundTripper = &http.Transport{ @@ -134,30 +136,160 @@ func setSignature(req *http.Request) { } type IntervalStats struct { + loop int + name string + mode string bytes int64 slowdowns int64 + intervalNano int64 latNano []int64 } +func (is *IntervalStats) makeOutputStats() OutputStats { + // Compute and log the stats + ops := len(is.latNano) + totalLat := int64(0); + minLat := float64(0); + maxLat := float64(0); + NinetyNineLat := float64(0); + avgLat := float64(0); + if ops > 0 { + minLat = float64(is.latNano[0]) / 1000000 + maxLat = float64(is.latNano[ops - 1]) / 1000000 + for i := range is.latNano { + totalLat += is.latNano[i] + } + avgLat = float64(totalLat) / float64(ops) / 1000000 + NintyNineLatNano := is.latNano[int64(math.Round(0.99*float64(ops))) - 1] + NinetyNineLat = float64(NintyNineLatNano) / 1000000 + } + seconds := float64(is.intervalNano) / 1000000000 + mbps := float64(is.bytes) / seconds / bytefmt.MEGABYTE + iops := float64(ops) / seconds + + return OutputStats{ + is.loop, + is.name, + seconds, + is.mode, + ops, + mbps, + iops, + minLat, + avgLat, + NinetyNineLat, + maxLat, + is.slowdowns} +} + +type OutputStats struct { + loop int + intervalName string + seconds float64 + mode string + ops int + mbps float64 + iops float64 + minLat float64 + avgLat float64 + NinetyNineLat float64 + maxLat float64 + slowdowns int64 +} + +func (o *OutputStats) log() { + log.Printf( + "Loop: %d, Int: %s, Dur(s): %.1f, Mode: %s, Ops: %d, MB/s: %.2f, IO/s: %.0f, Lat(ms): [ min: %.1f, avg: %.1f, 99%%: %.1f, max: %.1f ], Slowdowns: %d", + o.loop, + o.intervalName, + o.seconds, + o.mode, + o.ops, + o.mbps, + o.iops, + o.minLat, + o.avgLat, + o.NinetyNineLat, + o.maxLat, + o.slowdowns) +} + +func (o *OutputStats) csv_header(w *csv.Writer) { + if w == nil { + log.Fatal("OutputStats Passed nil csv writer") + } + + s := []string{ + "Loop", + "Inteval", + "Duration(s)", + "Mode", "Ops", + "MB/s", + "IO/s", + "Min Latency (ms)", + "Avg Latency(ms)", + "99% Latency(ms)", + "Max Latency(ms)", + "Slowdowns"} + + if err := w.Write(s); err != nil { + log.Fatal("Error writing to csv: ", err) + } +} + +func (o *OutputStats) csv(w *csv.Writer) { + if w == nil { + log.Fatal("OutputStats Passed nil csv writer") + } + + s := []string { + strconv.Itoa(o.loop), + o.intervalName, + strconv.FormatFloat(o.seconds, 'f', 2, 64), + o.mode, + strconv.Itoa(o.ops), + strconv.FormatFloat(o.mbps, 'f', 2, 64), + strconv.FormatFloat(o.iops, 'f', 2, 64), + strconv.FormatFloat(o.minLat, 'f', 2, 64), + strconv.FormatFloat(o.avgLat, 'f', 2, 64), + strconv.FormatFloat(o.NinetyNineLat, 'f', 2, 64), + strconv.FormatFloat(o.maxLat, 'f', 2, 64), + strconv.FormatInt(o.slowdowns, 10)} + + if err := w.Write(s); err != nil { + log.Fatal("Error writing to csv: ", err) + } +} + type ThreadStats struct { start int64 curInterval int64 intervals []IntervalStats } -func makeThreadStats(s int64, intervalNano int64) ThreadStats { - ts := ThreadStats{start: s, curInterval: -1} - ts.updateIntervals(intervalNano) +func makeThreadStats(s int64, loop int, mode string, intervalNano int64) ThreadStats { + ts := ThreadStats{s, 0, []IntervalStats{}} + ts.intervals = append(ts.intervals, IntervalStats{loop, "0", mode, 0, 0, intervalNano, []int64{}}) return ts } -func (ts *ThreadStats) updateIntervals(intervalNano int64) int64 { +func (ts *ThreadStats) updateIntervals(loop int, mode string, intervalNano int64) int64 { + // Interval statistics disabled, so just return the current interval if intervalNano < 0 { return ts.curInterval } - for ts.start + intervalNano*ts.curInterval < time.Now().UnixNano() { - ts.intervals = append(ts.intervals, IntervalStats{0, 0, []int64{}}) - ts.curInterval++ + for ts.start + intervalNano*(ts.curInterval+1) < time.Now().UnixNano() { + ts.curInterval++ + ts.intervals = append( + ts.intervals, + IntervalStats{ + loop, + strconv.FormatInt(ts.curInterval, 10), + mode, + 0, + 0, + intervalNano, + []int64{}}) } return ts.curInterval } @@ -181,7 +313,7 @@ type Stats struct { intervalNano int64 // Per-thread statistics threadStats []ThreadStats - // a map of counters of how many threads have finished given interval of stats + // a map of per-interval thread completion counters intervalCompletions sync.Map // a counter of how many threads have finished updating stats entirely completions int32 @@ -191,15 +323,34 @@ func makeStats(loop int, mode string, threads int, intervalNano int64) Stats { start := time.Now().UnixNano() s := Stats{threads, loop, mode, start, 0, intervalNano, []ThreadStats{}, sync.Map{}, 0} for i := 0; i < threads; i++ { - s.threadStats = append(s.threadStats, makeThreadStats(start, s.intervalNano)) + s.threadStats = append(s.threadStats, makeThreadStats(start, s.loop, s.mode, s.intervalNano)) + s.updateIntervals(i) } return s } -func (stats *Stats) _getIntervalStats(i int64) IntervalStats { +func (stats *Stats) makeOutputStats(i int64) (OutputStats, bool) { + // Check bounds first + if stats.intervalNano < 0 || i < 0 { + return OutputStats{}, false + } + // Not safe to log if not all writers have completed. + value, ok := stats.intervalCompletions.Load(i) + if !ok { + return OutputStats{}, false + } + cp, ok := value.(*int32) + if !ok { + return OutputStats{}, false + } + count := atomic.LoadInt32(cp) + if count < int32(stats.threads) { + return OutputStats{}, false + } + bytes := int64(0) ops := int64(0) - slowdowns := int64(0); + slowdowns := int64(0) for t := 0; t < stats.threads; t++ { bytes += stats.threadStats[t].intervals[i].bytes @@ -213,10 +364,18 @@ func (stats *Stats) _getIntervalStats(i int64) IntervalStats { c += copy(tmpLat[c:], stats.threadStats[t].intervals[i].latNano) } sort.Slice(tmpLat, func(i, j int) bool { return tmpLat[i] < tmpLat[j] }) - return IntervalStats{bytes, slowdowns, tmpLat} + is := IntervalStats{stats.loop, strconv.FormatInt(i, 10), stats.mode, bytes, slowdowns, stats.intervalNano, tmpLat} + return is.makeOutputStats(), true } -func (stats *Stats) _getTotalStats() IntervalStats { +func (stats *Stats) makeTotalStats() (OutputStats, bool) { + // Not safe to log if not all writers have completed. + completions := atomic.LoadInt32(&stats.completions) + if (completions < int32(threads)) { + log.Printf("log, completions: %d", completions) + return OutputStats{}, false + } + bytes := int64(0) ops := int64(0) slowdowns := int64(0); @@ -237,84 +396,14 @@ func (stats *Stats) _getTotalStats() IntervalStats { } } sort.Slice(tmpLat, func(i, j int) bool { return tmpLat[i] < tmpLat[j] }) - return IntervalStats{bytes, slowdowns, tmpLat} -} - -func (stats *Stats) logI(i int64) bool { - // Check bounds first - if stats.intervalNano < 0 || i < 0 { - return false; - } - // Not safe to log if not all writers have completed. - value, ok := stats.intervalCompletions.Load(i) - if !ok { - return false; - } - cp, ok := value.(*int32) - if !ok { - return false; - } - count := atomic.LoadInt32(cp) - if count < int32(stats.threads) { - return false; - } - - return stats._log(strconv.FormatInt(i, 10), stats.intervalNano, stats._getIntervalStats(i)) -} - -func (stats *Stats) log() bool { - // Not safe to log if not all writers have completed. - completions := atomic.LoadInt32(&stats.completions) - if (completions < int32(stats.threads)) { - log.Printf("log, completions: %d", completions) - return false; - } - return stats._log("ALL", stats.endNano - stats.startNano, stats._getTotalStats()) -} - -func (stats *Stats) _log(intervalName string, intervalNano int64, intervalStats IntervalStats) bool { - // Compute and log the stats - ops := len(intervalStats.latNano) - totalLat := int64(0); - minLat := float64(0); - maxLat := float64(0); - NinetyNineLat := float64(0); - avgLat := float64(0); - if ops > 0 { - minLat = float64(intervalStats.latNano[0]) / 1000000 - maxLat = float64(intervalStats.latNano[ops - 1]) / 1000000 - for i := range intervalStats.latNano { - totalLat += intervalStats.latNano[i] - } - avgLat = float64(totalLat) / float64(ops) / 1000000 - NintyNineLatNano := intervalStats.latNano[int64(math.Round(0.99*float64(ops))) - 1] - NinetyNineLat = float64(NintyNineLatNano) / 1000000 - } - seconds := float64(intervalNano) / 1000000000 - mbps := float64(intervalStats.bytes) / seconds / bytefmt.MEGABYTE - iops := float64(ops) / seconds - - log.Printf( - "Loop: %d, Int: %s, Dur(s): %.1f, Mode: %s, Ops: %d, MB/s: %.2f, IO/s: %.0f, Lat(ms): [ min: %.1f, avg: %.1f, 99%%: %.1f, max: %.1f ], Slowdowns: %d", - stats.loop, - intervalName, - seconds, - stats.mode, - ops, - mbps, - iops, - minLat, - avgLat, - NinetyNineLat, - maxLat, - intervalStats.slowdowns) - return true + is := IntervalStats{stats.loop, "TOTAL", stats.mode, bytes, slowdowns, stats.endNano - stats.startNano, tmpLat} + return is.makeOutputStats(), true } // Only safe to call from the calling thread func (stats *Stats) updateIntervals(thread_num int) int64 { curInterval := stats.threadStats[thread_num].curInterval - newInterval := stats.threadStats[thread_num].updateIntervals(stats.intervalNano) + newInterval := stats.threadStats[thread_num].updateIntervals(stats.loop, stats.mode, stats.intervalNano) // Finish has already been called if curInterval < 0 { @@ -332,7 +421,9 @@ func (stats *Stats) updateIntervals(thread_num int) int64 { count := atomic.AddInt32(cp, 1) if count == int32(stats.threads) { - stats.logI(i) + if is, ok := stats.makeOutputStats(i); ok { + is.log() + } } } return newInterval @@ -356,10 +447,10 @@ func (stats *Stats) addSlowDown(thread_num int) { } func (stats *Stats) finish(thread_num int) { - stats.threadStats[thread_num].updateIntervals(stats.intervalNano) + stats.updateIntervals(thread_num) stats.threadStats[thread_num].finish() count := atomic.AddInt32(&stats.completions, 1) - if count == int32(stats.threads) { + if count == int32(stats.threads) { stats.endNano = time.Now().UnixNano() } } @@ -406,8 +497,8 @@ func runUpload(thread_num int, fendtime time.Time, stats *Stats) { break } } + stats.finish(thread_num) atomic.AddInt64(&running_threads, -1) - stats.finish(thread_num) } func runDownload(thread_num int, fendtime time.Time, stats *Stats) { @@ -454,8 +545,8 @@ func runDownload(thread_num int, fendtime time.Time, stats *Stats) { } } + stats.finish(thread_num) atomic.AddInt64(&running_threads, -1) - stats.finish(thread_num) } func runDelete(thread_num int, stats *Stats) { @@ -499,8 +590,8 @@ func runDelete(thread_num int, stats *Stats) { break } } - atomic.AddInt64(&running_threads, -1) stats.finish(thread_num) + atomic.AddInt64(&running_threads, -1) } func runBucketDelete(thread_num int, stats *Stats) { @@ -526,8 +617,8 @@ func runBucketDelete(thread_num int, stats *Stats) { } stats.addOp(thread_num, 0, end-start) } - atomic.AddInt64(&running_threads, -1) stats.finish(thread_num) + atomic.AddInt64(&running_threads, -1) } var cfg *aws.Config @@ -555,8 +646,8 @@ func runBucketsInit(thread_num int, stats *Stats) { } stats.addOp(thread_num, 0, end-start) } + stats.finish(thread_num) atomic.AddInt64(&running_threads, -1) - stats.finish(thread_num) } func runBucketsClear(thread_num int, stats *Stats) { @@ -592,11 +683,10 @@ func runBucketsClear(thread_num int, stats *Stats) { n = len(out.Contents) } } - atomic.AddInt64(&running_threads, -1) stats.finish(thread_num) + atomic.AddInt64(&running_threads, -1) } - func runWrapper(loop int, r rune) { op_counter = -1 running_threads = int64(threads) @@ -655,14 +745,32 @@ func runWrapper(loop int, r rune) { time.Sleep(time.Millisecond) } - // If the user didn't set the object_count, we can set it here // to limit subsequent get/del tests to valid objects only. if r == 'p' && object_count < 0 { object_count = op_counter + 1 object_count_flag = true } - stats.log() + + // Print Interval Output + for i := int64(0); i >= 0;i++ { + if o, ok := stats.makeOutputStats(i); ok { + if csvWriter != nil { + o.csv(csvWriter) + } + } else { + break + } + } + + // Print Totals Output + if o, ok := stats.makeTotalStats(); ok { + o.log() + if csvWriter != nil { + o.csv(csvWriter) + csvWriter.Flush() + } + } } func init() { @@ -675,6 +783,7 @@ func init() { myflag.StringVar(&bucket_prefix, "bp", "hotsauce_bench", "Prefix for buckets") myflag.StringVar(®ion, "r", "us-east-1", "Region for testing") myflag.StringVar(&modes, "m", "cxipgdx", "Run modes in order. See NOTES for more info") + myflag.StringVar(&output, "o", "", "Write CSV output to this file") myflag.Int64Var(&object_count, "n", -1, "Maximum number of objects <-1 for unlimited>") myflag.Int64Var(&bucket_count, "b", 1, "Number of buckets to distribute IOs across") myflag.IntVar(&duration_secs, "d", 60, "Maximum test duration in seconds <-1 for unlimited>") @@ -793,6 +902,18 @@ func main() { buckets = append(buckets, fmt.Sprintf("%s%012d", bucket_prefix, i)) } + // Init CSV file + if output != "" { + file, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0777) + defer file.Close() + if err != nil { + log.Fatal("Could not open CSV file for writing.") + } else { + csvWriter = csv.NewWriter(file) + o := OutputStats{} + o.csv_header(csvWriter) + } + } // Loop running the tests for loop := 0; loop < loops; loop++ { for _, r := range modes {