Compare commits

...

7 Commits

Author SHA1 Message Date
Vitaliy Filippov 3094358ec2 Fix autovivification leading to extra empty keys in pool-create
Test / test_snapshot_chain_ec (push) Successful in 2m48s Details
Test / test_rebalance_verify_imm (push) Successful in 3m4s Details
Test / test_root_node (push) Successful in 10s Details
Test / test_rebalance_verify (push) Successful in 3m44s Details
Test / test_switch_primary (push) Successful in 36s Details
Test / test_write (push) Successful in 39s Details
Test / test_write_no_same (push) Successful in 19s Details
Test / test_write_xor (push) Successful in 1m4s Details
Test / test_rebalance_verify_ec_imm (push) Successful in 3m36s Details
Test / test_rebalance_verify_ec (push) Successful in 4m21s Details
Test / test_heal_pg_size_2 (push) Successful in 3m33s Details
Test / test_heal_csum_32k_dmj (push) Successful in 5m41s Details
Test / test_heal_ec (push) Successful in 6m5s Details
Test / test_heal_csum_32k_dj (push) Successful in 5m29s Details
Test / test_heal_csum_32k (push) Successful in 6m11s Details
Test / test_osd_tags (push) Successful in 22s Details
Test / test_enospc (push) Successful in 2m30s Details
Test / test_heal_csum_4k (push) Successful in 6m9s Details
Test / test_heal_csum_4k_dj (push) Successful in 6m11s Details
Test / test_heal_csum_4k_dmj (push) Successful in 6m14s Details
Test / test_scrub (push) Successful in 42s Details
Test / test_enospc_imm (push) Successful in 47s Details
Test / test_enospc_xor (push) Successful in 1m4s Details
Test / test_enospc_imm_xor (push) Successful in 1m1s Details
Test / test_scrub_zero_osd_2 (push) Successful in 27s Details
Test / test_scrub_xor (push) Successful in 27s Details
Test / test_nfs (push) Successful in 20s Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 34s Details
Test / test_scrub_pg_size_3 (push) Successful in 49s Details
Test / test_scrub_ec (push) Successful in 31s Details
2024-04-20 02:04:09 +03:00
Vitaliy Filippov 87f666d2a2 Filter out OSDs reweighted to 0 2024-04-20 02:03:53 +03:00
Vitaliy Filippov bd7fe4ef8f Filter out non-existing OSDs added in node_placement 2024-04-20 02:03:36 +03:00
Vitaliy Filippov 1b3f9a1416 Do not set non-existing OSD weight to 0, we'll remove them instead 2024-04-20 02:03:11 +03:00
Vitaliy Filippov a7b7354f38 Do not recheck primary distribution when pool has no PGs 2024-04-20 02:02:47 +03:00
Vitaliy Filippov 765befa22f Remove empty nodes from tree because PG DSL expects that all leaf nodes are OSDs 2024-04-20 02:02:28 +03:00
Vitaliy Filippov 87b3ab94fe Do not disable require-atomic-updates and no-unused-vars 2024-04-20 02:02:13 +03:00
7 changed files with 68 additions and 41 deletions

View File

@ -30,18 +30,12 @@ module.exports = {
"error",
"always"
],
"require-atomic-updates": [
"off"
],
"no-useless-escape": [
"off"
],
"no-control-regex": [
"off"
],
"no-unused-vars": [
"off"
],
"no-empty": [
"off"
],

View File

@ -97,7 +97,6 @@ function scale_pg_history(prev_pg_history, prev_pgs, new_pgs)
function scale_pg_count(prev_pgs, new_pg_count)
{
const old_pg_count = prev_pgs.length;
// Just for the lp_solve optimizer - pick a "previous" PG for each "new" one
if (prev_pgs.length < new_pg_count)
{

View File

@ -77,7 +77,7 @@ async function optimize_initial({ osd_weights, combinator, pg_count, pg_size = 3
{
if (osd !== NO_OSD)
{
let osd_pg_count = (osd_weights[osd]||0)/total_weight*pg_effsize*pg_count;
let osd_pg_count = osd_weights[osd]/total_weight*pg_effsize*pg_count;
lp += pg_per_osd[osd].join(' + ')+' <= '+osd_pg_count+';\n';
}
}
@ -215,7 +215,7 @@ function calc_intersect_weights(old_pg_size, pg_size, pg_count, prev_weights, al
{
const intersect_count = ordered
? pg.reduce((a, osd, i) => a + (prev_hash[osd] == 1+i ? 1 : 0), 0)
: pg.reduce((a, osd, i) => a + (prev_hash[osd] ? 1 : 0), 0);
: pg.reduce((a, osd) => a + (prev_hash[osd] ? 1 : 0), 0);
if (max_int < intersect_count)
{
max_int = intersect_count;
@ -299,7 +299,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_weights, combinator
)).join(' + ');
const rm_osd_pg_count = (prev_pg_per_osd[osd]||[])
.reduce((a, [ old_pg_name, space ]) => (a + (all_pgs_hash[old_pg_name] ? space : 0)), 0);
const osd_pg_count = (osd_weights[osd]||0)*pg_effsize/total_weight*pg_count - rm_osd_pg_count;
const osd_pg_count = osd_weights[osd]*pg_effsize/total_weight*pg_count - rm_osd_pg_count;
lp += osd_sum + ' <= ' + osd_pg_count + ';\n';
}
}

View File

@ -585,7 +585,7 @@ class Mon
now = Date.now();
}
tried[base] = now;
const ok = await new Promise((ok, no) =>
const ok = await new Promise(ok =>
{
const timer_id = setTimeout(() =>
{
@ -875,33 +875,19 @@ class Mon
levels.osd = levels.osd || 101;
const tree = {};
let up_osds = {};
for (const node_id in this.state.config.node_placement||{})
{
const node_cfg = this.state.config.node_placement[node_id];
if (/^\d+$/.exec(node_id))
{
node_cfg.level = 'osd';
}
if (!node_id || !node_cfg.level || !levels[node_cfg.level])
{
// All nodes must have non-empty IDs and valid levels
continue;
}
tree[node_id] = { id: node_id, level: node_cfg.level, parent: node_cfg.parent, children: [] };
}
// This requires monitor system time to be in sync with OSD system times (at least to some extent)
const down_time = Date.now()/1000 - this.config.osd_out_time;
for (const osd_num of this.all_osds().sort((a, b) => a - b))
{
const stat = this.state.osd.stats[osd_num];
const osd_cfg = this.state.config.osd[osd_num];
if (stat && stat.size && (this.state.osd.state[osd_num] || Number(stat.time) >= down_time ||
let reweight = osd_cfg == null ? 1 : Number(osd_cfg.reweight);
if (reweight < 0 || isNaN(reweight))
reweight = 1;
if (stat && stat.size && reweight && (this.state.osd.state[osd_num] || Number(stat.time) >= down_time ||
osd_cfg && osd_cfg.noout))
{
// Numeric IDs are reserved for OSDs
let reweight = osd_cfg == null ? 1 : Number(osd_cfg.reweight);
if (reweight < 0 || isNaN(reweight))
reweight = 1;
if (this.state.osd.state[osd_num] && reweight > 0)
{
// React to down OSDs immediately
@ -929,6 +915,29 @@ class Mon
}
}
}
for (const node_id in this.state.config.node_placement||{})
{
const node_cfg = this.state.config.node_placement[node_id];
if (/^\d+$/.exec(node_id))
{
node_cfg.level = 'osd';
}
if (!node_id || !node_cfg.level || !levels[node_cfg.level] ||
node_cfg.level === 'osd' && !tree[node_id])
{
// All nodes must have non-empty IDs and valid levels
// OSDs have to actually exist
continue;
}
tree[node_id] = tree[node_id] || {};
tree[node_id].id = node_id;
tree[node_id].level = node_cfg.level;
tree[node_id].parent = node_cfg.parent;
if (node_cfg.level !== 'osd')
{
tree[node_id].children = [];
}
}
return { up_osds, levels, osd_tree: tree };
}
@ -958,6 +967,25 @@ class Mon
const parent = parent_level && parent_level < node_level ? node_cfg.parent : '';
tree[parent].children.push(tree[node_id]);
}
// Delete empty nodes
let deleted = 0;
do
{
deleted = 0;
for (const node_id in tree)
{
if (tree[node_id].level !== 'osd' && (!tree[node_id].children || !tree[node_id].children.length))
{
const parent = tree[node_id].parent;
if (parent)
{
tree[parent].children = tree[parent].children.filter(c => c != tree[node_id]);
}
deleted++;
delete tree[node_id];
}
}
} while (deleted > 0);
return tree;
}
@ -994,7 +1022,7 @@ class Mon
const key = b64(this.etcd_prefix+'/osd/state/'+osd_num);
checks.push({ key, target: 'MOD', result: 'LESS', mod_revision: ''+this.etcd_watch_revision });
}
const res = await this.etcd_call('/kv/txn', {
await this.etcd_call('/kv/txn', {
compare: [
{ key: b64(this.etcd_prefix+'/mon/master'), target: 'LEASE', lease: ''+this.etcd_lease_id },
{ key: b64(this.etcd_prefix+'/config/pgs'), target: 'MOD', mod_revision: ''+this.etcd_watch_revision, result: 'LESS' },
@ -1522,11 +1550,14 @@ class Mon
{
continue;
}
const replicated = pool_cfg.scheme === 'replicated';
const aff_osds = this.get_affinity_osds(pool_cfg, up_osds, osd_tree);
this.reset_rng();
for (let pg_num = 1; pg_num <= pool_cfg.pg_count; pg_num++)
{
if (!this.state.config.pgs.items[pool_id])
{
continue;
}
const pg_cfg = this.state.config.pgs.items[pool_id][pg_num];
if (pg_cfg)
{
@ -1696,7 +1727,6 @@ class Mon
derive_osd_stats(st, prev, prev_diff)
{
const zero_stats = { op: { bps: 0n, iops: 0n, lat: 0n }, subop: { iops: 0n, lat: 0n }, recovery: { bps: 0n, iops: 0n } };
const diff = { op_stats: {}, subop_stats: {}, recovery_stats: {}, inode_stats: {} };
if (!st || !st.time || !prev || !prev.time || prev.time >= st.time)
{
@ -1736,7 +1766,7 @@ class Mon
}
for (const pool_id in st.inode_stats||{})
{
const pool_diff = diff.inode_stats[pool_id] = {};
diff.inode_stats[pool_id] = {};
for (const inode_num in st.inode_stats[pool_id])
{
const inode_diff = diff.inode_stats[pool_id][inode_num] = {};
@ -2154,7 +2184,7 @@ class Mon
_die(err, code)
{
// In fact we can just try to rejoin
console.error(new Error(err || 'Cluster connection failed'));
console.error(err instanceof Error ? err : new Error(err || 'Cluster connection failed'));
process.exit(code || 2);
}
@ -2178,7 +2208,7 @@ class Mon
function POST(url, body, timeout)
{
return new Promise((ok, no) =>
return new Promise(ok =>
{
const body_text = Buffer.from(JSON.stringify(body));
let timer_id = timeout > 0 ? setTimeout(() =>

View File

@ -91,7 +91,7 @@ async function run()
function system(cmd)
{
return new Promise((ok, no) => child_process.exec(cmd, { maxBuffer: 64*1024*1024 }, (err, stdout, stderr) => (err ? no(err.message) : ok(stdout))));
return new Promise((ok, no) => child_process.exec(cmd, { maxBuffer: 64*1024*1024 }, (err, stdout/*, stderr*/) => (err ? no(err.message) : ok(stdout))));
}
run().catch(err => { console.error(err); process.exit(1); });

View File

@ -198,7 +198,6 @@ function all_combinations(osd_tree, pg_size, ordered, count)
function check_combinations(osd_tree, pgs)
{
const hosts = Object.keys(osd_tree).sort();
const host_per_osd = {};
for (const host in osd_tree)
{
@ -235,6 +234,7 @@ function compat(params)
module.exports = {
flatten_tree,
all_combinations,
SimpleCombinator,
compat,
NO_OSD,

View File

@ -14,7 +14,7 @@
struct pool_creator_t
{
cli_tool_t *parent;
json11::Json::object cfg;
json11::Json cfg;
bool force = false;
bool wait = false;
@ -55,8 +55,12 @@ struct pool_creator_t
goto resume_8;
// Validate pool parameters
result.text = validate_pool_config(cfg, json11::Json(), parent->cli->st_cli.global_block_size,
parent->cli->st_cli.global_bitmap_granularity, force);
{
auto new_cfg = cfg.object_items();
result.text = validate_pool_config(new_cfg, json11::Json(), parent->cli->st_cli.global_block_size,
parent->cli->st_cli.global_bitmap_granularity, force);
cfg = new_cfg;
}
if (result.text != "")
{
result.err = EINVAL;
@ -605,7 +609,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_pool_create(json11::Json c
{
auto pool_creator = new pool_creator_t();
pool_creator->parent = this;
pool_creator->cfg = cfg.object_items();
pool_creator->cfg = cfg;
pool_creator->force = cfg["force"].bool_value();
pool_creator->wait = cfg["wait"].bool_value();
return [pool_creator](cli_result_t & result)