From fdf3e5cc3707068639ad0126a668a990bf4b56d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=B5=D0=BD=D0=B1=D0=B5=D1=80=D0=B3=20?= =?UTF-8?q?=D0=9C=D0=B0=D1=80=D0=BA?= Date: Mon, 14 May 2018 12:44:51 +0500 Subject: [PATCH] Just indentation --- main.py | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/main.py b/main.py index a1bace3..bb64437 100755 --- a/main.py +++ b/main.py @@ -1,23 +1,22 @@ #!/usr/bin/python3 -import sys import json -import argparse import logging import os -import rados -from itertools import cycle, count -from pprint import pprint - +import sys +from collections import OrderedDict +from itertools import cycle import ceph_argparse +import rados -if sys.version_info >= (3,0): +if sys.version_info >= (3, 0): from time import monotonic else: from time import time as monotonic log = logging.getLogger(__name__) + def do_bench(secs, name, ioctx, data): b = a = monotonic() stop = a + secs @@ -33,15 +32,16 @@ def do_bench(secs, name, ioctx, data): ioctx.remove_object(name) except Exception as e: log.error('Failed to remove object %s: %r', name, e) - return b-a, ops + return b - a, ops + def _cmd(cluster, cmd, **kwargs): target = ceph_argparse.find_cmd_target(cmd.split()) argdict = { - 'prefix': cmd, - 'target': target, - 'format': 'json', + 'prefix': cmd, + 'target': target, + 'format': 'json', } argdict.update(kwargs) log.debug('Calling ceph: %r', argdict) @@ -56,13 +56,12 @@ def _cmd(cluster, cmd, **kwargs): return json.loads(outbuf.decode('utf-8')) - def main(): logging.basicConfig(level=logging.INFO) conf = {'keyring': 'keyring.conf'} pool = 'single' MODE = 'HOST' # HOST or OSD - secs = 10 # secs to benchmark + secs = 10 # secs to benchmark bytesperobj = 4 * 1024 * 1024 bigdata = cycle([os.urandom(bytesperobj), os.urandom(bytesperobj)]) @@ -71,38 +70,35 @@ def main(): log.info('Attaching to CEPH cluster. pool=%s', pool) with rados.Rados(conffile='/etc/ceph/ceph.conf', conf=conf) as cluster: log.info('Getting map osd -> host.') - #info = json.loads(subprocess.check_output(['ceph', 'osd', 'tree', '--format=json']).decode('utf-8')) + # info = json.loads(subprocess.check_output(['ceph', 'osd', 'tree', '--format=json']).decode('utf-8')) info = _cmd(cluster, 'osd tree') osd2host = {} for i in info['nodes']: - if i ['type'] != 'host': + if i['type'] != 'host': continue for j in i['children']: osd2host[j] = i['name'] pool_id = cluster.pool_lookup(pool) - log.info('Getting pg => acting set.') - #info = json.loads(subprocess.check_output(['ceph', '--format=json', 'pg', 'dump', 'pgs_brief']).decode('utf-8')) + # info = json.loads(subprocess.check_output(['ceph', '--format=json', 'pg', 'dump', 'pgs_brief']).decode('utf-8')) info = _cmd(cluster, 'pg dump', dumpcontents=['pgs_brief']) - - pgid2acting = {i['pgid']:tuple(i['acting']) for i in info if i['pgid'].startswith(str(pool_id))} + pgid2acting = {i['pgid']: tuple(i['acting']) for i in info if i['pgid'].startswith(str(pool_id))} if MODE == 'HOST': bench_items = set(tuple(osd2host[i] for i in osds) for osds in pgid2acting.values()) else: bench_items = set(pgid2acting.values()) - log.info('Figuring out object names for %d %s combinations.', len(bench_items), MODE) obj2info = dict() cnt = 0 - totlen=len(bench_items) + totlen = len(bench_items) while bench_items: cnt = cnt + 1 name = 'bench_%d' % cnt - #info = json.loads(subprocess.check_output(['ceph', '-f', 'json', 'osd', 'map', pool, name]).decode('utf-8')) + # info = json.loads(subprocess.check_output(['ceph', '-f', 'json', 'osd', 'map', pool, name]).decode('utf-8')) info = _cmd(cluster, 'osd map', object=name, pool=pool) acting = tuple(info['acting']) @@ -117,11 +113,11 @@ def main(): continue bench_items.remove(bench_item) - log.info('Found %d/%d', totlen-len(bench_items), totlen) + log.info('Found %d/%d', totlen - len(bench_items), totlen) obj2info[name] = (hosts, acting) - obj2info=dict(sorted(obj2info.items(), key=lambda i: i[1])) + obj2info = OrderedDict(sorted(obj2info.items(), key=lambda i: i[1])) log.debug('Opening IO context for pool %s.', pool) with cluster.open_ioctx(pool) as ioctx: @@ -130,15 +126,16 @@ def main(): log.debug('Benchmarking IOPS on OSD %r (%r)', list(acting), ','.join(hosts)) delay, ops = do_bench(secs, name, ioctx, cycle([b'q', b'w'])) iops = ops / delay - lat = delay / ops # in sec - log.debug('Benchmarking Linear write on OSD %r (%r) blocksize=%d MiB', list(acting), ','.join(hosts), bytesperobj//(1024*1024)) + lat = delay / ops # in sec + log.debug('Benchmarking Linear write on OSD %r (%r) blocksize=%d MiB', list(acting), ','.join(hosts), + bytesperobj // (1024 * 1024)) delay, ops = do_bench(secs, name, ioctx, bigdata) bsec = ops * bytesperobj / delay log.info( 'OSD %r (%r): %2.2f IOPS, lat=%.4f ms. %2.2f MB/sec (%2.2f Mbit/s).', list(acting), - ','.join(hosts), + ','.join(hosts), iops, lat * 1000, bsec / 1000000,