vitastor-cli: add commands to control pools: pool-create, pool-ls, pool-modify, pool-rm

PR #59 - https://github.com/vitalif/vitastor/pull/58/commits

By MIND Software LLC

By submitting this pull request, I accept Vitastor CLA
master
idelson 2024-02-01 18:44:14 +03:00 committed by Vitaliy Filippov
parent 02d1f16bbd
commit dc92851322
19 changed files with 2700 additions and 263 deletions

View File

@ -145,7 +145,6 @@ add_library(vitastor_client SHARED
cli_status.cpp
cli_describe.cpp
cli_fix.cpp
cli_df.cpp
cli_ls.cpp
cli_create.cpp
cli_modify.cpp
@ -154,6 +153,11 @@ add_library(vitastor_client SHARED
cli_rm_data.cpp
cli_rm.cpp
cli_rm_osd.cpp
cli_pool_cfg.cpp
cli_pool_create.cpp
cli_pool_ls.cpp
cli_pool_modify.cpp
cli_pool_rm.cpp
)
set_target_properties(vitastor_client PROPERTIES PUBLIC_HEADER "vitastor_c.h")
target_link_libraries(vitastor_client

View File

@ -113,6 +113,105 @@ static const char* help_text =
" With --dry-run only checks if deletion is possible without data loss and\n"
" redundancy degradation.\n"
"\n"
"vitastor-cli create-pool <name> --scheme <scheme> -s <pg_size> --pg_minsize <pg_minsize> -n <pg_count> --parity_chunks <number> [OPTIONS]\n"
" Create a pool.\n"
" --scheme <scheme>\n"
" Redundancy scheme used for data in this pool. One of: \"replicated\", \"xor\", \"ec\" or \"jerasure\".\n"
" It's \"replicated\" by default.\n"
" --ec <N>+<K>\n"
" Shortcut for 'ec' scheme. scheme = ec, pg_size = N+K, parity_chunks = K.\n"
" -s|--pg_size <size>\n"
" Total number of disks for PGs of this pool - i.e., number of replicas for replicated pools and number of data plus parity disks for EC/XOR pools.\n"
" --pg_minsize <size>\n"
" Number of available live OSDs for PGs of this pool to remain active.\n"
" -n|--pg_count <count>\n"
" Number of PGs for this pool.\n"
" --parity_chunks <number>\n"
" Number of parity chunks for EC/XOR pools\n"
" -f|--force\n"
" Proceed without checking pool/OSD params (pg_size, block_size, bitmap_granularity, and immediate_commit).\n"
" --failure_domain <failure_domain>\n"
" Failure domain specification. Must be \"host\" or \"osd\" or refer to one of the placement tree levels, defined in placement_levels.\n"
" --max_osd_combinations <number>\n"
" This parameter specifies the maximum number of combinations to generate when optimising PG placement.\n"
" --block_size <size>\n"
" Block size for this pool.\n"
" --bitmap_granularity <granularity>\n"
" \"Sector\" size of virtual disks in this pool.\n"
" --immediate_commit <all|small|none>\n"
" Immediate commit setting for this pool. One of \"all\", \"small\" and \"none\".\n"
" --pg_stripe_size <size>\n"
" Specifies the stripe size for this pool according to which images are split into different PGs.\n"
" --root_node <node>\n"
" Specifies the root node of the OSD tree to restrict this pool OSDs to.\n"
" --osd_tags <tags>\n"
" Specifies OSD tags to restrict this pool to.\n"
" Example: --osd_tags tag0 or --osd_tags tag0,tag1\n"
" --primary_affinity_tags <tags>\n"
" Specifies OSD tags to prefer putting primary OSDs in this pool to.\n"
" Example: --primary_affinity_tags tag0 or --primary_affinity_tags tag0,tag1\n"
" --scrub_interval <time_interval>\n"
" Automatic scrubbing interval for this pool. Format: number + unit s/m/h/d/M/y.\n"
" Examples:\n"
" vitastor-cli create-pool test_x4 -s 4 -n 32\n"
" vitastor-cli create-pool test_ec42 --ec 4+2 -n 32\n"
"\n"
"vitastor-cli modify-pool <id|name> [--name <new_name>] [-s <pg_size>] [--pg_minsize <pg_minsize>] [-n <pg_count>] [OPTIONS]\n"
" Modify an existing pool.\n"
" --name <new_name>\n"
" Change name of this pool.\n"
" -s|--pg_size <size>\n"
" Total number of disks for PGs of this pool - i.e., number of replicas for replicated pools and number of data plus parity disks for EC/XOR pools.\n"
" --pg_minsize <size>\n"
" Number of available live OSDs for PGs of this pool to remain active.\n"
" -n|--pg_count <count>\n"
" Number of PGs for this pool.\n"
" -f|--force\n"
" Proceed without checking pool/OSD params (block_size, bitmap_granularity and immediate_commit).\n"
" --failure_domain <failure_domain>\n"
" Failure domain specification. Must be \"host\" or \"osd\" or refer to one of the placement tree levels, defined in placement_levels.\n"
" --max_osd_combinations <number>\n"
" This parameter specifies the maximum number of combinations to generate when optimising PG placement.\n"
" --block_size <size>\n"
" Block size for this pool.\n"
" --immediate_commit <all|small|none>\n"
" Immediate commit setting for this pool. One of \"all\", \"small\" and \"none\".\n"
" --pg_stripe_size <size>\n"
" Specifies the stripe size for this pool according to which images are split into different PGs.\n"
" --root_node <node>\n"
" Specifies the root node of the OSD tree to restrict this pool OSDs to.\n"
" --osd_tags <tags>\n"
" Specifies OSD tags to restrict this pool to.\n"
" Example: --osd_tags tag0 or --osd_tags tag0,tag1\n"
" --primary_affinity_tags <tags>\n"
" Specifies OSD tags to prefer putting primary OSDs in this pool to.\n"
" Example: --primary_affinity_tags tag0 or --primary_affinity_tags tag0,tag1\n"
" --scrub_interval <time_interval>\n"
" Automatic scrubbing interval for this pool. Format: number + unit s/m/h/d/M/y.\n"
" Examples:\n"
" vitastor-cli modify-pool pool_A -name pool_B\n"
" vitastor-cli modify-pool 2 -s 4 -n 128 --block_size 262144\n"
"\n"
"vitastor-cli rm-pool [--force] <id|name>\n"
" Remove existing pool from cluster.\n"
" Refuses to remove pools with related Image and/or Snapshot data without --force.\n"
" Examples:\n"
" vitastor-cli rm-pool test_pool\n"
" vitastor-cli rm-pool --force 2\n"
"\n"
"vitastor-cli ls-pool [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]\n"
" List pool (only matching <glob> patterns if passed).\n"
" -p|--pool POOL Show in detail pool ID or name\n"
" -l|--long Show all available field\n"
" --sort FIELD Sort by specified field (id, name, pg_count, scheme_name, used_byte, total, max_available, used_pct, space_efficiency, status, restore, root_node, failure_domain, osd_tags, primary_affinity_tags)\n"
" -r|--reverse Sort in descending order\n"
" -n|--count N Only list first N items\n"
" --stats Performance statistics\n"
" Examples:\n"
" vitastor-cli ls-pool -l\n"
" vitastor-cli ls-pool -l --sort pool_name\n"
" vitastor-cli ls-pool -p 2\n"
"\n"
"Use vitastor-cli --help <command> for command details or vitastor-cli --help --all for all details.\n"
"\n"
"GLOBAL OPTIONS:\n"
@ -133,6 +232,8 @@ static json11::Json::object parse_args(int narg, const char *args[])
cfg["progress"] = "1";
for (int i = 1; i < narg; i++)
{
bool argHasValue = (!(i == narg-1) && (args[i+1][0] != '-'));
if (args[i][0] == '-' && args[i][1] == 'h' && args[i][2] == 0)
{
cfg["help"] = "1";
@ -143,15 +244,15 @@ static json11::Json::object parse_args(int narg, const char *args[])
}
else if (args[i][0] == '-' && args[i][1] == 'n' && args[i][2] == 0)
{
cfg["count"] = args[++i];
cfg["count"] = argHasValue ? args[++i] : "";
}
else if (args[i][0] == '-' && args[i][1] == 'p' && args[i][2] == 0)
{
cfg["pool"] = args[++i];
cfg["pool"] = argHasValue ? args[++i] : "";
}
else if (args[i][0] == '-' && args[i][1] == 's' && args[i][2] == 0)
{
cfg["size"] = args[++i];
cfg["size"] = argHasValue ? args[++i] : "";
}
else if (args[i][0] == '-' && args[i][1] == 'r' && args[i][2] == 0)
{
@ -164,7 +265,7 @@ static json11::Json::object parse_args(int narg, const char *args[])
else if (args[i][0] == '-' && args[i][1] == '-')
{
const char *opt = args[i]+2;
cfg[opt] = i == narg-1 || !strcmp(opt, "json") ||
if (!strcmp(opt, "json") ||
!strcmp(opt, "wait-list") || !strcmp(opt, "wait_list") ||
!strcmp(opt, "long") || !strcmp(opt, "del") ||
!strcmp(opt, "no-color") || !strcmp(opt, "no_color") ||
@ -172,9 +273,15 @@ static json11::Json::object parse_args(int narg, const char *args[])
!strcmp(opt, "force") || !strcmp(opt, "reverse") ||
!strcmp(opt, "allow-data-loss") || !strcmp(opt, "allow_data_loss") ||
!strcmp(opt, "dry-run") || !strcmp(opt, "dry_run") ||
!strcmp(opt, "help") || !strcmp(opt, "all") ||
(!strcmp(opt, "writers-stopped") || !strcmp(opt, "writers_stopped")) && strcmp("1", args[i+1]) != 0
? "1" : args[++i];
!strcmp(opt, "help") || !strcmp(opt, "all") || !strcmp(opt, "stats") ||
!strcmp(opt, "writers-stopped") || !strcmp(opt, "writers_stopped"))
{
cfg[opt] = "1";
}
else
{
cfg[opt] = argHasValue ? args[++i] : "";
}
}
else
{
@ -217,7 +324,8 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
else if (cmd[0] == "df")
{
// Show pool space stats
action_cb = p->start_df(cfg);
cfg["dfformat"] = "1";
action_cb = p->start_pool_ls(cfg);
}
else if (cmd[0] == "ls")
{
@ -324,6 +432,43 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
// Allocate a new OSD number
action_cb = p->start_alloc_osd(cfg);
}
else if (cmd[0] == "create-pool")
{
// Create a new pool
if (cmd.size() > 1 && cfg["name"].is_null())
{
cfg["name"] = cmd[1];
}
action_cb = p->start_pool_create(cfg);
}
else if (cmd[0] == "modify-pool")
{
// Modify existing pool
if (cmd.size() > 1)
{
cfg["pool"] = cmd[1];
}
action_cb = p->start_pool_modify(cfg);
}
else if (cmd[0] == "rm-pool")
{
// Remove existing pool
if (cmd.size() > 1)
{
cfg["pool"] = cmd[1];
}
action_cb = p->start_pool_rm(cfg);
}
else if (cmd[0] == "ls-pool")
{
// Show pool list
if (cmd.size() > 1)
{
cmd.erase(cmd.begin(), cmd.begin()+1);
cfg["names"] = cmd;
}
action_cb = p->start_pool_ls(cfg);
}
else
{
result = { .err = EINVAL, .text = "unknown command: "+cmd[0].string_value() };

View File

@ -46,6 +46,7 @@ public:
json11::Json etcd_result;
void parse_config(json11::Json::object & cfg);
json11::Json parse_tags(std::string tags);
void change_parent(inode_t cur, inode_t new_parent, cli_result_t *result);
inode_config_t* get_inode_cfg(const std::string & name);
@ -58,7 +59,6 @@ public:
std::function<bool(cli_result_t &)> start_status(json11::Json);
std::function<bool(cli_result_t &)> start_describe(json11::Json);
std::function<bool(cli_result_t &)> start_fix(json11::Json);
std::function<bool(cli_result_t &)> start_df(json11::Json);
std::function<bool(cli_result_t &)> start_ls(json11::Json);
std::function<bool(cli_result_t &)> start_create(json11::Json);
std::function<bool(cli_result_t &)> start_modify(json11::Json);
@ -68,6 +68,10 @@ public:
std::function<bool(cli_result_t &)> start_rm(json11::Json);
std::function<bool(cli_result_t &)> start_rm_osd(json11::Json cfg);
std::function<bool(cli_result_t &)> start_alloc_osd(json11::Json cfg);
std::function<bool(cli_result_t &)> start_pool_create(json11::Json);
std::function<bool(cli_result_t &)> start_pool_modify(json11::Json);
std::function<bool(cli_result_t &)> start_pool_rm(json11::Json);
std::function<bool(cli_result_t &)> start_pool_ls(json11::Json);
// Should be called like loop_and_wait(start_status(), <completion callback>)
void loop_and_wait(std::function<bool(cli_result_t &)> loop_cb, std::function<void(const cli_result_t &)> complete_cb);

View File

@ -126,6 +126,32 @@ void cli_tool_t::parse_config(json11::Json::object & cfg)
list_first = cfg["wait_list"].uint64_value() ? true : false;
}
json11::Json cli_tool_t::parse_tags(std::string tags)
{
json11::Json json_tags;
// Format: "tag0" or "tag0,tag1,tag2"
if (tags.find(',') == std::string::npos)
{
json_tags = tags;
}
else
{
json11::Json::array json_tags_array;
while (tags.size())
{
auto pos = tags.find(',');
auto tag = tags.substr(0, pos);
if (tag != "")
{
json_tags_array.push_back(tag);
}
tags = pos == std::string::npos ? std::string("") : tags.substr(pos+1);
}
json_tags = json_tags_array;
}
return json_tags;
};
struct cli_result_looper_t
{
ring_consumer_t consumer;

View File

@ -183,7 +183,16 @@ resume_3:
// Save into inode_config for library users to be able to take it from there immediately
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
parent->cli->st_cli.insert_inode_config(new_cfg);
result = (cli_result_t){ .err = 0, .text = "Image "+image_name+" created" };
result = (cli_result_t){
.err = 0,
.text = "Image "+image_name+" created",
.data = json11::Json::object {
{ "name", image_name },
{ "pool", new_pool_name },
{ "parent", new_parent },
{ "size", size },
}
};
state = 100;
}
@ -251,7 +260,16 @@ resume_4:
// Save into inode_config for library users to be able to take it from there immediately
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
parent->cli->st_cli.insert_inode_config(new_cfg);
result = (cli_result_t){ .err = 0, .text = "Snapshot "+image_name+"@"+new_snap+" created" };
result = (cli_result_t){
.err = 0,
.text = "Snapshot "+image_name+"@"+new_snap+" created",
.data = json11::Json::object {
{ "name", image_name+"@"+new_snap },
{ "pool", (uint64_t)new_pool_id },
{ "parent", new_parent },
{ "size", size },
}
};
state = 100;
}

View File

@ -1,243 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
// List pools with space statistics
struct pool_lister_t
{
cli_tool_t *parent;
int state = 0;
json11::Json space_info;
cli_result_t result;
std::map<pool_id_t, json11::Json::object> pool_stats;
bool is_done()
{
return state == 100;
}
void get_stats()
{
if (state == 1)
goto resume_1;
// Space statistics - pool/stats/<pool>
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/osd/stats/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/osd/stats0"
) },
} },
},
} },
});
state = 1;
resume_1:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
space_info = parent->etcd_result;
std::map<pool_id_t, uint64_t> osd_free;
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID
pool_id_t pool_id;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
if (scanned != 1 || !pool_id || pool_id >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// pool/stats/<N>
pool_stats[pool_id] = kv.value.object_items();
}
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// osd ID
osd_num_t osd_num;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%ju%c", &osd_num, &null_byte);
if (scanned != 1 || !osd_num || osd_num >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// osd/stats/<N>::free
osd_free[osd_num] = kv.value["free"].uint64_value();
}
// Calculate max_avail for each pool
for (auto & pp: parent->cli->st_cli.pool_config)
{
auto & pool_cfg = pp.second;
uint64_t pool_avail = UINT64_MAX;
std::map<osd_num_t, uint64_t> pg_per_osd;
for (auto & pgp: pool_cfg.pg_config)
{
for (auto pg_osd: pgp.second.target_set)
{
if (pg_osd != 0)
{
pg_per_osd[pg_osd]++;
}
}
}
for (auto pg_per_pair: pg_per_osd)
{
uint64_t pg_free = osd_free[pg_per_pair.first] * pool_cfg.real_pg_count / pg_per_pair.second;
if (pool_avail > pg_free)
{
pool_avail = pg_free;
}
}
if (pool_avail == UINT64_MAX)
{
pool_avail = 0;
}
if (pool_cfg.scheme != POOL_SCHEME_REPLICATED)
{
pool_avail *= (pool_cfg.pg_size - pool_cfg.parity_chunks);
}
pool_stats[pool_cfg.id] = json11::Json::object {
{ "id", (uint64_t)pool_cfg.id },
{ "name", pool_cfg.name },
{ "pg_count", pool_cfg.pg_count },
{ "real_pg_count", pool_cfg.real_pg_count },
{ "scheme", pool_cfg.scheme == POOL_SCHEME_REPLICATED ? "replicated" : "ec" },
{ "scheme_name", pool_cfg.scheme == POOL_SCHEME_REPLICATED
? std::to_string(pool_cfg.pg_size)+"/"+std::to_string(pool_cfg.pg_minsize)
: "EC "+std::to_string(pool_cfg.pg_size-pool_cfg.parity_chunks)+"+"+std::to_string(pool_cfg.parity_chunks) },
{ "used_raw", (uint64_t)(pool_stats[pool_cfg.id]["used_raw_tb"].number_value() * ((uint64_t)1<<40)) },
{ "total_raw", (uint64_t)(pool_stats[pool_cfg.id]["total_raw_tb"].number_value() * ((uint64_t)1<<40)) },
{ "max_available", pool_avail },
{ "raw_to_usable", pool_stats[pool_cfg.id]["raw_to_usable"].number_value() },
{ "space_efficiency", pool_stats[pool_cfg.id]["space_efficiency"].number_value() },
{ "pg_real_size", pool_stats[pool_cfg.id]["pg_real_size"].uint64_value() },
{ "failure_domain", pool_cfg.failure_domain },
};
}
}
json11::Json::array to_list()
{
json11::Json::array list;
for (auto & kv: pool_stats)
{
list.push_back(kv.second);
}
return list;
}
void loop()
{
get_stats();
if (parent->waiting > 0)
return;
if (state == 100)
return;
if (parent->json_output)
{
// JSON output
result.data = to_list();
state = 100;
return;
}
// Table output: name, scheme_name, pg_count, total, used, max_avail, used%, efficiency
json11::Json::array cols;
cols.push_back(json11::Json::object{
{ "key", "name" },
{ "title", "NAME" },
});
cols.push_back(json11::Json::object{
{ "key", "scheme_name" },
{ "title", "SCHEME" },
});
cols.push_back(json11::Json::object{
{ "key", "pg_count_fmt" },
{ "title", "PGS" },
});
cols.push_back(json11::Json::object{
{ "key", "total_fmt" },
{ "title", "TOTAL" },
});
cols.push_back(json11::Json::object{
{ "key", "used_fmt" },
{ "title", "USED" },
});
cols.push_back(json11::Json::object{
{ "key", "max_avail_fmt" },
{ "title", "AVAILABLE" },
});
cols.push_back(json11::Json::object{
{ "key", "used_pct" },
{ "title", "USED%" },
});
cols.push_back(json11::Json::object{
{ "key", "eff_fmt" },
{ "title", "EFFICIENCY" },
});
json11::Json::array list;
for (auto & kv: pool_stats)
{
double raw_to = kv.second["raw_to_usable"].number_value();
if (raw_to < 0.000001 && raw_to > -0.000001)
raw_to = 1;
kv.second["pg_count_fmt"] = kv.second["real_pg_count"] == kv.second["pg_count"]
? kv.second["real_pg_count"].as_string()
: kv.second["real_pg_count"].as_string()+"->"+kv.second["pg_count"].as_string();
kv.second["total_fmt"] = format_size(kv.second["total_raw"].uint64_value() / raw_to);
kv.second["used_fmt"] = format_size(kv.second["used_raw"].uint64_value() / raw_to);
kv.second["max_avail_fmt"] = format_size(kv.second["max_available"].uint64_value());
kv.second["used_pct"] = format_q(kv.second["total_raw"].uint64_value()
? (100 - 100*kv.second["max_available"].uint64_value() *
kv.second["raw_to_usable"].number_value() / kv.second["total_raw"].uint64_value())
: 100)+"%";
kv.second["eff_fmt"] = format_q(kv.second["space_efficiency"].number_value()*100)+"%";
}
result.data = to_list();
result.text = print_table(result.data, cols, parent->color);
state = 100;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_df(json11::Json cfg)
{
auto lister = new pool_lister_t();
lister->parent = this;
return [lister](cli_result_t & result)
{
lister->loop();
if (lister->is_done())
{
result = lister->result;
delete lister;
return true;
}
return false;
};
}

View File

@ -342,7 +342,11 @@ struct snap_merger_t
printf("\rOverwriting blocks: %ju/%ju\n", to_process, to_process);
}
// Done
result = (cli_result_t){ .text = "Done, layers from "+from_name+" to "+to_name+" merged into "+target_name };
result = (cli_result_t){ .text = "Done, layers from "+from_name+" to "+to_name+" merged into "+target_name, .data = json11::Json::object {
{ "from", from_name },
{ "to", to_name },
{ "into", target_name },
}};
state = 100;
resume_100:
return;

View File

@ -84,7 +84,10 @@ struct image_changer_t
(!new_size && !force_size || cfg.size == new_size || cfg.size >= new_size && inc_size) &&
(new_name == "" || new_name == image_name))
{
result = (cli_result_t){ .text = "No change" };
result = (cli_result_t){ .err = 0, .text = "No change", .data = json11::Json::object {
{ "error_code", 0 },
{ "error_text", "No change" },
}};
state = 100;
return;
}
@ -220,7 +223,16 @@ resume_2:
parent->cli->st_cli.inode_by_name.erase(image_name);
}
parent->cli->st_cli.insert_inode_config(cfg);
result = (cli_result_t){ .err = 0, .text = "Image "+image_name+" modified" };
result = (cli_result_t){
.err = 0,
.text = "Image "+image_name+" modified",
.data = json11::Json::object {
{ "name", image_name },
{ "inode", INODE_NO_POOL(inode_num) },
{ "pool", (uint64_t)INODE_POOL(inode_num) },
{ "size", new_size },
}
};
state = 100;
}
};

473
src/cli_pool_cfg.cpp Normal file
View File

@ -0,0 +1,473 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#include "cli_pool_cfg.h"
bool pool_configurator_t::is_valid_scheme_string(std::string scheme_str)
{
if (scheme_str != "replicated" && scheme_str != "xor" && scheme_str != "ec")
{
error = "Coding scheme should be one of \"xor\", \"replicated\", \"ec\" or \"jerasure\"";
return false;
}
return true;
}
bool pool_configurator_t::is_valid_immediate_commit_string(std::string immediate_commit_str)
{
if (immediate_commit != "" && immediate_commit != "all" && immediate_commit != "small" && immediate_commit != "none")
{
error = "Immediate Commit should be one of \"all\", \"small\", or \"none\"";
return false;
}
return true;
}
std::string pool_configurator_t::get_error_string()
{
return error;
}
bool pool_configurator_t::parse(json11::Json cfg, bool new_pool)
{
if (new_pool) // New pool configuration
{
// Pool name (req)
name = cfg["name"].string_value();
if (name == "")
{
error = "Pool name must be given";
return false;
}
// Exclusive ec shortcut check
if (!cfg["ec"].is_null() &&
(!cfg["scheme"].is_null() || !cfg["size"].is_null() || !cfg["pg_size"].is_null() || !cfg["parity_chunks"].is_null()))
{
error = "You cannot use 'ec' shortcut together with PG size, parity chunks and scheme arguments";
return false;
}
// ec = N+K (opt)
if (cfg["ec"].is_string())
{
scheme = "ec";
// pg_size = N+K
// parity_chunks = K
int ret = sscanf(cfg["ec"].string_value().c_str(), "%lu+%lu", &pg_size, &parity_chunks);
if (ret != 2)
{
error = "Shortcut for 'ec' scheme has an invalid value. Format: --ec <N>+<K>";
return false;
}
if (!pg_size || !parity_chunks)
{
error = "<N>+<K> values for 'ec' scheme cannot be 0";
return false;
}
pg_size += parity_chunks;
}
// scheme (opt) + pg_size (req) + parity_chunks (req)
else
{
scheme = cfg["scheme"].is_string() ?
(cfg["scheme"].string_value() == "jerasure" ? "ec" : cfg["scheme"].string_value()) : "replicated";
if (!is_valid_scheme_string(scheme))
{
return false;
}
if (!cfg["size"].is_null() && !cfg["pg_size"].is_null() ||
!cfg["size"].is_null() && !cfg["size"].uint64_value() ||
!cfg["pg_size"].is_null() && !cfg["pg_size"].uint64_value())
{
error = "PG size has an invalid value";
return false;
}
pg_size = !cfg["size"].is_null() ? cfg["size"].uint64_value() : cfg["pg_size"].uint64_value();
if (!pg_size)
{
error = "PG size must be given with value >= 1";
return false;
}
if (!cfg["parity_chunks"].is_null() && !cfg["parity_chunks"].uint64_value())
{
error = "Parity chunks has an invalid value";
return false;
}
parity_chunks = cfg["parity_chunks"].uint64_value();
if (scheme == "xor" && !parity_chunks)
{
parity_chunks = 1;
}
if (!parity_chunks)
{
error = "Parity Chunks must be given with value >= 1";
return false;
}
}
// pg_minsize (opt)
if (cfg["pg_minsize"].uint64_value())
{
pg_minsize = cfg["pg_minsize"].uint64_value();
}
else
{
if (!cfg["pg_minsize"].is_null())
{
error = "PG minsize has an invalid value";
return false;
}
if (scheme == "replicated")
{
// pg_minsize = (N+K > 2) ? 2 : 1
pg_minsize = pg_size > 2 ? 2 : 1;
}
else // ec or xor
{
// pg_minsize = (K > 1) ? N + 1 : N
pg_minsize = pg_size - parity_chunks + (parity_chunks > 1 ? 1 : 0);
}
}
if (!pg_minsize)
{
error = "PG minsize must be given with value >= 1";
return false;
}
// pg_count (req)
if (!cfg["count"].is_null() && !cfg["pg_count"].is_null() ||
!cfg["count"].is_null() && !cfg["count"].uint64_value() ||
!cfg["pg_count"].is_null() && !cfg["pg_count"].uint64_value())
{
error = "PG count has an invalid value";
return false;
}
pg_count = !cfg["count"].is_null() ? cfg["count"].uint64_value() : cfg["pg_count"].uint64_value();
if (!pg_count)
{
error = "PG count must be given with value >= 1";
return false;
}
// Optional params
failure_domain = cfg["failure_domain"].string_value();
if (!cfg["max_osd_combinations"].is_null() && !cfg["max_osd_combinations"].uint64_value())
{
error = "Max OSD combinations has an invalid value";
return false;
}
max_osd_combinations = cfg["max_osd_combinations"].uint64_value();
if (!cfg["block_size"].is_null() && !cfg["block_size"].uint64_value())
{
error = "Block size has an invalid value";
return false;
}
block_size = cfg["block_size"].uint64_value();
if (!cfg["bitmap_granularity"].is_null() && !cfg["bitmap_granularity"].uint64_value())
{
error = "Bitmap granularity has an invalid value";
return false;
}
bitmap_granularity = cfg["bitmap_granularity"].uint64_value();
if (!is_valid_immediate_commit_string(cfg["immediate_commit"].string_value()))
{
return false;
}
immediate_commit = cfg["immediate_commit"].string_value();
if (!cfg["pg_stripe_size"].is_null() && !cfg["pg_stripe_size"].uint64_value())
{
error = "PG stripe size has an invalid value";
return false;
}
pg_stripe_size = cfg["pg_stripe_size"].uint64_value();
root_node = cfg["root_node"].string_value();
osd_tags = cfg["osd_tags"].string_value();
primary_affinity_tags = cfg["primary_affinity_tags"].string_value();
scrub_interval = cfg["scrub_interval"].string_value();
}
else // Modified pool configuration
{
bool has_changes = false;
// Unsupported parameters
if (!cfg["scheme"].is_null() || !cfg["parity_chunks"].is_null() || !cfg["ec"].is_null() || !cfg["bitmap_granularity"].is_null())
{
error = "Scheme, parity_chunks and bitmap_granularity parameters cannot be modified";
return false;
}
// Supported parameters
if (!cfg["name"].is_null())
{
name = cfg["name"].string_value();
has_changes = true;
}
if (!cfg["size"].is_null() || !cfg["pg_size"].is_null())
{
if (!cfg["size"].is_null() && !cfg["pg_size"].is_null())
{
error = "Cannot use both size and pg_size parameters at the same time.";
return false;
}
else if (!cfg["size"].is_null() && !cfg["size"].uint64_value() ||
!cfg["pg_size"].is_null() && !cfg["pg_size"].uint64_value())
{
error = "PG size has an invalid value";
return false;
}
pg_size = !cfg["size"].is_null() ? cfg["size"].uint64_value() : cfg["pg_size"].uint64_value();
has_changes = true;
}
if (!cfg["pg_minsize"].is_null())
{
if (!cfg["pg_minsize"].uint64_value())
{
error = "PG minsize has an invalid value";
return false;
}
pg_minsize = cfg["pg_minsize"].uint64_value();
has_changes = true;
}
if (!cfg["count"].is_null() || !cfg["pg_count"].is_null())
{
if (!cfg["count"].is_null() && !cfg["pg_count"].is_null())
{
error = "Cannot use both count and pg_count parameters at the same time.";
return false;
}
else if (!cfg["count"].is_null() && !cfg["count"].uint64_value() ||
!cfg["pg_count"].is_null() && !cfg["pg_count"].uint64_value())
{
error = "PG count has an invalid value";
return false;
}
pg_count = !cfg["count"].is_null() ? cfg["count"].uint64_value() : cfg["pg_count"].uint64_value();
has_changes = true;
}
if (!cfg["failure_domain"].is_null())
{
failure_domain = cfg["failure_domain"].string_value();
has_changes = true;
}
if (!cfg["max_osd_combinations"].is_null())
{
if (!cfg["max_osd_combinations"].uint64_value())
{
error = "Max OSD combinations has an invalid value";
return false;
}
max_osd_combinations = cfg["max_osd_combinations"].uint64_value();
has_changes = true;
}
if (!cfg["block_size"].is_null())
{
if (!cfg["block_size"].uint64_value())
{
error = "Block size has an invalid value";
return false;
}
block_size = cfg["block_size"].uint64_value();
has_changes = true;
}
if (!cfg["immediate_commit"].is_null())
{
if (!is_valid_immediate_commit_string(cfg["immediate_commit"].string_value()))
{
return false;
}
immediate_commit = cfg["immediate_commit"].string_value();
has_changes = true;
}
if (!cfg["pg_stripe_size"].is_null())
{
if (!cfg["pg_stripe_size"].uint64_value())
{
error = "PG stripe size has an invalid value";
return false;
}
pg_stripe_size = cfg["pg_stripe_size"].uint64_value();
has_changes = true;
}
if (!cfg["root_node"].is_null())
{
root_node = cfg["root_node"].string_value();
has_changes = true;
}
if (!cfg["osd_tags"].is_null())
{
osd_tags = cfg["osd_tags"].string_value();
has_changes = true;
}
if (!cfg["primary_affinity_tags"].is_null())
{
primary_affinity_tags = cfg["primary_affinity_tags"].string_value();
has_changes = true;
}
if (!cfg["scrub_interval"].is_null())
{
scrub_interval = cfg["scrub_interval"].string_value();
has_changes = true;
}
if (!has_changes)
{
error = "No changes were provided to modify pool";
return false;
}
}
return true;
}
bool pool_configurator_t::validate(etcd_state_client_t &st_cli, pool_config_t *pool_config, bool strict)
{
// Validate pool parameters
// Scheme
uint64_t p_scheme = (scheme != "" ?
(scheme == "xor" ? POOL_SCHEME_XOR : (scheme == "ec" ? POOL_SCHEME_EC : POOL_SCHEME_REPLICATED)) :
(pool_config ? pool_config->scheme : 0));
// PG size
uint64_t p_pg_size = (pg_size ? pg_size : (pool_config ? pool_config->pg_size : 0));
if (p_pg_size)
{
// Min PG size
if ((p_scheme == POOL_SCHEME_XOR || p_scheme == POOL_SCHEME_EC) && p_pg_size < 3)
{
error = "PG size cannot be less than 3 for XOR/EC pool";
return false;
}
// Max PG size
else if (p_pg_size > 256)
{
error = "PG size cannot be greater than 256";
return false;
}
}
// Parity Chunks
uint64_t p_parity_chunks = (parity_chunks ? parity_chunks : (pool_config ? pool_config->parity_chunks : 0));
if (p_parity_chunks)
{
if (p_scheme == POOL_SCHEME_XOR && p_parity_chunks > 1)
{
error = "Parity Chunks must be 1 for XOR pool";
return false;
}
if (p_scheme == POOL_SCHEME_EC && (p_parity_chunks < 1 || p_parity_chunks > p_pg_size-2))
{
error = "Parity Chunks must be between 1 and pg_size-2 for EC pool";
return false;
}
}
// PG minsize
uint64_t p_pg_minsize = (pg_minsize ? pg_minsize : (pool_config ? pool_config->pg_minsize : 0));
if (p_pg_minsize)
{
// Max PG minsize relative to PG size
if (p_pg_minsize > p_pg_size)
{
error = "PG minsize cannot be greater than "+std::to_string(p_pg_size)+" (PG size)";
return false;
}
// PG minsize relative to PG size and Parity Chunks
else if ((p_scheme == POOL_SCHEME_XOR || p_scheme == POOL_SCHEME_EC) && p_pg_minsize < (p_pg_size - p_parity_chunks))
{
error =
"PG minsize cannot be less than "+std::to_string(p_pg_size - p_parity_chunks)+" "
"(PG size - Parity Chunks) for XOR/EC pool";
return false;
}
}
// Max OSD Combinations (optional)
if (max_osd_combinations > 0 && max_osd_combinations < 100)
{
error = "Max OSD Combinations must be at least 100";
return false;
}
// Scrub interval (optional)
if (scrub_interval != "")
{
bool ok;
parse_time(scrub_interval, &ok);
if (!ok)
{
error = "Failed to parse scrub interval. Format: number + unit s/m/h/d/M/y";
return false;
}
}
// Additional checks (only if strict)
if (strict)
{
uint64_t p_block_size = block_size ? block_size :
(pool_config ? pool_config->data_block_size : st_cli.global_block_size);
uint64_t p_bitmap_granularity = bitmap_granularity ? bitmap_granularity :
(pool_config ? pool_config->bitmap_granularity : st_cli.global_bitmap_granularity);
// Block size value and range
if ((p_block_size & (p_block_size-1)) || p_block_size < MIN_DATA_BLOCK_SIZE || p_block_size > MAX_DATA_BLOCK_SIZE)
{
error =
"Data block size must be a power of two between "+std::to_string(MIN_DATA_BLOCK_SIZE)+" "
"and "+std::to_string(MAX_DATA_BLOCK_SIZE);
return false;
}
// Block size relative to bitmap granularity
if (p_block_size % p_bitmap_granularity)
{
error = "Data block size must be devisible by "+std::to_string(p_bitmap_granularity)+" (Bitmap Granularity)";
return false;
}
}
return true;
}

48
src/cli_pool_cfg.h Normal file
View File

@ -0,0 +1,48 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#pragma once
#include "json11/json11.hpp"
#include "etcd_state_client.h"
#include "str_util.h"
struct pool_configurator_t
{
protected:
std::string error;
bool is_valid_scheme_string(std::string scheme_str);
bool is_valid_immediate_commit_string(std::string immediate_commit_str);
public:
std::string name;
std::string scheme;
uint64_t pg_size, pg_minsize, pg_count;
uint64_t parity_chunks;
std::string immediate_commit;
std::string failure_domain;
std::string root_node;
uint64_t max_osd_combinations;
uint64_t block_size, bitmap_granularity;
uint64_t pg_stripe_size;
std::string osd_tags;
std::string primary_affinity_tags;
std::string scrub_interval;
std::string get_error_string();
bool parse(json11::Json cfg, bool new_pool);
bool validate(etcd_state_client_t &st_cli, pool_config_t *pool_config, bool strict);
};

708
src/cli_pool_create.cpp Normal file
View File

@ -0,0 +1,708 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#include <ctype.h>
#include "cli.h"
#include "cli_pool_cfg.h"
#include "cluster_client.h"
#include "epoll_manager.h"
#include "pg_states.h"
#include "str_util.h"
struct pool_creator_t
{
cli_tool_t *parent;
bool force;
pool_configurator_t *cfg;
int state = 0;
cli_result_t result;
struct {
uint32_t retries = 5;
uint32_t interval = 0;
bool passed = false;
} create_check;
uint64_t new_id = 1;
uint64_t new_pools_mod_rev;
json11::Json state_node_tree;
json11::Json new_pools;
json11::Json osd_tags_json;
json11::Json primary_affinity_tags_json;
bool is_done() { return state == 100; }
void loop()
{
if (state == 1)
goto resume_1;
else if (state == 2)
goto resume_2;
else if (state == 3)
goto resume_3;
else if (state == 4)
goto resume_4;
else if (state == 5)
goto resume_5;
else if (state == 6)
goto resume_6;
else if (state == 7)
goto resume_7;
else if (state == 8)
goto resume_8;
// Validate pool parameters
if (!cfg->validate(parent->cli->st_cli, NULL, !force))
{
result = (cli_result_t){ .err = EINVAL, .text = cfg->get_error_string() + "\n" };
state = 100;
return;
}
// OSD tags
if (cfg->osd_tags != "")
{
osd_tags_json = parent->parse_tags(cfg->osd_tags);
}
// Primary affinity tags
if (cfg->primary_affinity_tags != "")
{
primary_affinity_tags_json = parent->parse_tags(cfg->primary_affinity_tags);
}
state = 1;
resume_1:
// If not forced, check that we have enough osds for pg_size
if (!force)
{
// Get node_placement configuration from etcd
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/node_placement") },
} }
},
} },
});
state = 2;
resume_2:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Get state_node_tree based on node_placement and osd peer states
{
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
state_node_tree = get_state_node_tree(kv.value.object_items());
}
// Skip tag checks, if pool has none
if (!osd_tags_json.is_null())
{
// Get osd configs (for tags) of osds in state_node_tree
{
json11::Json::array osd_configs;
for (auto osd_num: state_node_tree["osds"].array_items())
{
osd_configs.push_back(json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/osd/"+osd_num.as_string()) },
} }
});
}
parent->etcd_txn(json11::Json::object { { "success", osd_configs, }, });
}
state = 3;
resume_3:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Filter out osds from state_node_tree based on pool/osd tags
{
std::vector<json11::Json> osd_configs;
for (auto & ocr: parent->etcd_result["responses"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(ocr["response_range"]["kvs"][0]);
osd_configs.push_back(kv.value);
}
state_node_tree = filter_state_node_tree_by_tags(state_node_tree, osd_configs);
}
}
// Get stats (for block_size, bitmap_granularity, ...) of osds in state_node_tree
{
json11::Json::array osd_stats;
for (auto osd_num: state_node_tree["osds"].array_items())
{
osd_stats.push_back(json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/osd/stats/"+osd_num.as_string()) },
} }
});
}
parent->etcd_txn(json11::Json::object { { "success", osd_stats, }, });
}
state = 4;
resume_4:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Filter osds from state_node_tree based on pool parameters and osd stats
{
std::vector<json11::Json> osd_stats;
for (auto & ocr: parent->etcd_result["responses"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(ocr["response_range"]["kvs"][0]);
osd_stats.push_back(kv.value);
}
state_node_tree = filter_state_node_tree_by_stats(state_node_tree, osd_stats);
}
// Check that pg_size <= max_pg_size
{
uint64_t max_pg_size = get_max_pg_size(state_node_tree["nodes"].object_items(),
cfg->failure_domain, cfg->root_node);
if (cfg->pg_size > max_pg_size)
{
result = (cli_result_t){
.err = EINVAL,
.text =
"There are "+std::to_string(max_pg_size)+" failure domains with OSDs matching tags and "
"block_size/bitmap_granularity/immediate_commit parameters, but you want to create a "
"pool with "+std::to_string(cfg->pg_size)+" OSDs from different failure domains in a PG. "
"Change parameters or add --force if you want to create a degraded pool and add OSDs later."
};
state = 100;
return;
}
}
}
// Create pool
state = 5;
resume_5:
// Get pools from etcd
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
} }
},
} },
});
state = 6;
resume_6:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
{
// Add new pool
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
new_pools = create_pool(kv);
if (new_pools.is_string())
{
result = (cli_result_t){ .err = EEXIST, .text = new_pools.string_value() };
state = 100;
return;
}
new_pools_mod_rev = kv.mod_revision;
}
// Update pools in etcd
parent->etcd_txn(json11::Json::object {
{ "compare", json11::Json::array {
json11::Json::object {
{ "target", "MOD" },
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "result", "LESS" },
{ "mod_revision", new_pools_mod_rev+1 },
}
} },
{ "success", json11::Json::array {
json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "value", base64_encode(new_pools.dump()) },
} },
},
} },
});
state = 7;
resume_7:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Perform final create-check
create_check.interval = parent->cli->config["mon_change_timeout"].uint64_value();
if (!create_check.interval)
create_check.interval = 1000;
state = 8;
resume_8:
if (parent->waiting > 0)
return;
// Unless forced, check that pool was created and is active
if (force)
create_check.passed = true;
else if (create_check.retries)
{
create_check.retries--;
parent->waiting++;
parent->epmgr->tfd->set_timer(create_check.interval, false, [this](int timer_id)
{
if (parent->cli->st_cli.pool_config.find(new_id) != parent->cli->st_cli.pool_config.end())
{
auto & pool_cfg = parent->cli->st_cli.pool_config[new_id];
create_check.passed = pool_cfg.real_pg_count > 0;
for (auto pg_it = pool_cfg.pg_config.begin(); pg_it != pool_cfg.pg_config.end(); pg_it++)
{
if (!(pg_it->second.cur_state & PG_ACTIVE))
{
create_check.passed = false;
break;
}
}
if (create_check.passed)
create_check.retries = 0;
}
parent->waiting--;
parent->ringloop->wakeup();
});
return;
}
if (!create_check.passed)
{
result = (cli_result_t) {
.err = EAGAIN,
.text =
"Pool "+cfg->name+" was created, but failed to become active. This may indicate that cluster "
"state has changed while the pool was being created. Please check the current state and "
"correct the pool's configuration if necessary.\n"
};
}
else
{
result = (cli_result_t){
.err = 0,
.text = "Pool "+cfg->name+" created",
.data = new_pools[std::to_string(new_id)]
};
}
state = 100;
}
// Returns a JSON object of form {"nodes": {...}, "osds": [...]} that
// contains: all nodes (osds, hosts, ...) based on node_placement config
// and current peer state, and a list of active peer osds.
json11::Json get_state_node_tree(json11::Json::object node_placement)
{
// Erase non-peer osd nodes from node_placement
for (auto np_it = node_placement.begin(); np_it != node_placement.end();)
{
// Numeric nodes are osds
osd_num_t osd_num = stoull_full(np_it->first);
// If node is osd and it is not in peer states, erase it
if (osd_num > 0 &&
parent->cli->st_cli.peer_states.find(osd_num) == parent->cli->st_cli.peer_states.end())
{
node_placement.erase(np_it++);
}
else
np_it++;
}
// List of peer osds
std::vector<std::string> peer_osds;
// Record peer osds and add missing osds/hosts to np
for (auto & ps: parent->cli->st_cli.peer_states)
{
std::string osd_num = std::to_string(ps.first);
// Record peer osd
peer_osds.push_back(osd_num);
// Add osd, if necessary
if (node_placement.find(osd_num) == node_placement.end())
{
std::string osd_host = ps.second["host"].as_string();
// Add host, if necessary
if (node_placement.find(osd_host) == node_placement.end())
{
node_placement[osd_host] = json11::Json::object {
{ "level", "host" }
};
}
node_placement[osd_num] = json11::Json::object {
{ "parent", osd_host }
};
}
}
return json11::Json::object { { "osds", peer_osds }, { "nodes", node_placement } };
}
// Returns new state_node_tree based on given state_node_tree with osds
// filtered out by tags in given osd_configs and current pool config.
// Requires: state_node_tree["osds"] must match osd_configs 1-1
json11::Json filter_state_node_tree_by_tags(const json11::Json & state_node_tree, std::vector<json11::Json> & osd_configs)
{
auto & osds = state_node_tree["osds"].array_items();
// Accepted state_node_tree nodes
auto accepted_nodes = state_node_tree["nodes"].object_items();
// List of accepted osds
std::vector<std::string> accepted_osds;
for (size_t i = 0; i < osd_configs.size(); i++)
{
auto & oc = osd_configs[i].object_items();
// Get osd number
auto osd_num = osds[i].as_string();
// We need tags in config to check against pool tags
if (oc.find("tags") == oc.end())
{
// Exclude osd from state_node_tree nodes
accepted_nodes.erase(osd_num);
continue;
}
else
{
// If all pool tags are in osd tags, accept osd
if (all_in_tags(osd_configs[i]["tags"], osd_tags_json))
{
accepted_osds.push_back(osd_num);
}
// Otherwise, exclude osd
else
{
// Exclude osd from state_node_tree nodes
accepted_nodes.erase(osd_num);
}
}
}
return json11::Json::object { { "osds", accepted_osds }, { "nodes", accepted_nodes } };
}
// Returns new state_node_tree based on given state_node_tree with osds
// filtered out by stats parameters (block_size, bitmap_granularity) in
// given osd_stats and current pool config.
// Requires: state_node_tree["osds"] must match osd_stats 1-1
json11::Json filter_state_node_tree_by_stats(const json11::Json & state_node_tree, std::vector<json11::Json> & osd_stats)
{
auto & osds = state_node_tree["osds"].array_items();
// Accepted state_node_tree nodes
auto accepted_nodes = state_node_tree["nodes"].object_items();
// List of accepted osds
std::vector<std::string> accepted_osds;
for (size_t i = 0; i < osd_stats.size(); i++)
{
auto & os = osd_stats[i].object_items();
// Get osd number
auto osd_num = osds[i].as_string();
// Check data_block_size
if (os.find("data_block_size") != os.end())
{
uint64_t p_block_size = cfg->block_size ? cfg->block_size : parent->cli->st_cli.global_block_size;
uint64_t o_block_size = osd_stats[i]["data_block_size"].int64_value();
if (p_block_size != o_block_size)
{
accepted_nodes.erase(osd_num);
continue;
}
}
// Check bitmap_granularity
if (os.find("bitmap_granularity") != os.end())
{
uint64_t p_bitmap_granularity = cfg->bitmap_granularity ?
cfg->bitmap_granularity : parent->cli->st_cli.global_bitmap_granularity;
uint64_t o_bitmap_granularity = osd_stats[i]["bitmap_granularity"].int64_value();
if (p_bitmap_granularity != o_bitmap_granularity)
{
accepted_nodes.erase(osd_num);
continue;
}
}
// Check immediate_commit
if (os.find("immediate_commit") != os.end())
{
uint32_t p_immediate_commit = (cfg->immediate_commit != "") ?
parent->cli->st_cli.parse_immediate_commit_string(cfg->immediate_commit) : parent->cli->st_cli.global_immediate_commit;
uint32_t o_immediate_commit = parent->cli->st_cli.parse_immediate_commit_string(osd_stats[i]["immediate_commit"].string_value());
if (o_immediate_commit < p_immediate_commit)
{
accepted_nodes.erase(osd_num);
continue;
}
}
// Accept osd if all checks passed
accepted_osds.push_back(osd_num);
}
return json11::Json::object { { "osds", accepted_osds }, { "nodes", accepted_nodes } };
}
// Returns maximum pg_size possible for given node_tree and failure_domain, starting at parent_node
uint64_t get_max_pg_size(json11::Json::object node_tree, const std::string & failure_domain = "", const std::string & parent_node = "")
{
uint64_t max_pg_sz = 0;
std::vector<std::string> nodes;
const std::string level = (failure_domain != "") ? failure_domain : "osd";
// Check if parnet node is an osd (numeric)
if (parent_node != "" && stoull_full(parent_node))
{
// Add it to node list if osd is in node tree
if (node_tree.find(parent_node) != node_tree.end())
nodes.push_back(parent_node);
}
// If parent node given, ...
else if (parent_node != "")
{
// ... look for children nodes of this parent
for (auto & sn: node_tree)
{
auto & props = sn.second.object_items();
auto parent_prop = props.find("parent");
if (parent_prop != props.end() && (parent_prop->second.as_string() == parent_node))
{
nodes.push_back(sn.first);
// If we're not looking for all osds, we only need a single
// child osd node
if (level != "osd" && stoull_full(sn.first))
break;
}
}
}
// No parent node given, and we're not looking for all osds
else if (level != "osd")
{
// ... look for all level nodes
for (auto & sn: node_tree)
{
auto & props = sn.second.object_items();
auto level_prop = props.find("level");
if (level_prop != props.end() && (level_prop->second.as_string() == level))
{
nodes.push_back(sn.first);
}
}
}
// Otherwise, ...
else
{
// ... we're looking for osd nodes only
for (auto & sn: node_tree)
{
if (stoull_full(sn.first))
{
nodes.push_back(sn.first);
}
}
}
// Process gathered nodes
for (auto & node: nodes)
{
// Check for osd node, return constant max size
if (stoull_full(node))
{
max_pg_sz += 1;
}
// Otherwise, ...
else
{
// ... exclude parent node from tree, and ...
node_tree.erase(parent_node);
// ... descend onto the resulting tree
max_pg_sz += get_max_pg_size(node_tree, level, node);
}
}
return max_pg_sz;
}
json11::Json create_pool(const etcd_kv_t & kv)
{
for (auto & p: kv.value.object_items())
{
// ID
uint64_t pool_id;
char null_byte = 0;
sscanf(p.first.c_str(), "%lu%c", &pool_id, &null_byte);
new_id = std::max(pool_id+1, new_id);
// Name
if (p.second["name"].string_value() == cfg->name)
{
return json11::Json("Pool "+std::to_string(pool_id)+" has the same name\n");
}
}
json11::Json::object new_pool = json11::Json::object {
{ "name", cfg->name },
{ "scheme", cfg->scheme },
{ "pg_size", cfg->pg_size },
{ "pg_minsize", cfg->pg_minsize },
{ "pg_count", cfg->pg_count },
{ "parity_chunks", cfg->parity_chunks },
};
if (cfg->failure_domain != "")
new_pool["failure_domain"] = cfg->failure_domain;
if (cfg->max_osd_combinations)
new_pool["max_osd_combinations"] = cfg->max_osd_combinations;
if (cfg->block_size)
new_pool["block_size"] = cfg->block_size;
if (cfg->bitmap_granularity)
new_pool["bitmap_granularity"] = cfg->bitmap_granularity;
if (cfg->immediate_commit != "")
new_pool["immediate_commit"] = cfg->immediate_commit;
if (cfg->pg_stripe_size)
new_pool["pg_stripe_size"] = cfg->pg_stripe_size;
if (cfg->root_node != "")
new_pool["root_node"] = cfg->root_node;
if (cfg->scrub_interval != "")
new_pool["scrub_interval"] = cfg->scrub_interval;
if (cfg->osd_tags != "")
new_pool["osd_tags"] = osd_tags_json;
if (cfg->primary_affinity_tags != "")
new_pool["primary_affinity_tags"] = primary_affinity_tags_json;
auto res = kv.value.object_items();
res[std::to_string(new_id)] = new_pool;
return res;
}
// Checks whether tags2 tags are all in tags1 tags
bool all_in_tags(json11::Json tags1, json11::Json tags2)
{
if (!tags2.is_array())
{
tags2 = json11::Json::array{ tags2.string_value() };
}
if (!tags1.is_array())
{
tags1 = json11::Json::array{ tags1.string_value() };
}
for (auto & tag2: tags2.array_items())
{
bool found = false;
for (auto & tag1: tags1.array_items())
{
if (tag1 == tag2)
{
found = true;
break;
}
}
if (!found)
{
return false;
}
}
return true;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_create(json11::Json cfg)
{
auto pool_creator = new pool_creator_t();
pool_creator->parent = this;
pool_creator->cfg = new pool_configurator_t();
if (!pool_creator->cfg->parse(cfg, true))
{
std::string err = pool_creator->cfg->get_error_string();
return [err](cli_result_t & result)
{
result = (cli_result_t){ .err = EINVAL, .text = err + "\n" };
return true;
};
}
pool_creator->force = !cfg["force"].is_null();
return [pool_creator](cli_result_t & result)
{
pool_creator->loop();
if (pool_creator->is_done())
{
result = pool_creator->result;
delete pool_creator;
return true;
}
return false;
};
}

708
src/cli_pool_ls.cpp Normal file
View File

@ -0,0 +1,708 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#include <algorithm>
#include <numeric>
#include <string>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "pg_states.h"
// List pools with space statistics
struct pool_ls_t
{
cli_tool_t *parent;
pool_id_t list_pool_id = 0;
std::string list_pool_name;
std::string sort_field;
std::set<std::string> only_names;
bool show_df_format = false;
bool show_stats = false;
bool reverse = false;
bool show_all = false;
int max_count = 0;
int state = 0;
json11::Json space_info;
cli_result_t result;
std::map<pool_id_t, json11::Json::object> pool_stats;
bool is_done()
{
return state == 100;
}
std::string item_as_string(const json11::Json& item)
{
if (item.is_array())
{
if (item.array_items().empty())
return std::string{};
std::string result = item.array_items().at(0).as_string();
std::for_each(
std::next(item.array_items().begin()),
item.array_items().end(),
[&result](const json11::Json& a)
{
result += ", " + a.as_string();
});
return result;
}
else
return item.as_string();
}
void get_stats()
{
if (state == 1)
goto resume_1;
if (list_pool_name != "")
{
for (auto & ic: parent->cli->st_cli.pool_config)
{
if (ic.second.name == list_pool_name)
{
list_pool_id = ic.first;
break;
}
}
if (!list_pool_id)
{
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+list_pool_name+" does not exist" };
state = 100;
return;
}
}
else if (list_pool_id !=0)
{
for (auto & ic: parent->cli->st_cli.pool_config)
{
if (ic.second.id == list_pool_id)
{
list_pool_name = ic.second.name;
break;
}
}
if (list_pool_name == "")
{
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+list_pool_name+" does not exist" };
state = 100;
return;
}
}
// Space statistics - pool/stats/<pool>
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pool/stats0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/osd/stats/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/osd/stats0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/inode/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pg/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"/"
) },
{ "range_end", base64_encode(
parent->cli->st_cli.etcd_prefix+"/pg/stats"+
(list_pool_id ? "/"+std::to_string(list_pool_id) : "")+"0"
) },
} },
},
} },
});
state = 1;
resume_1:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
space_info = parent->etcd_result;
std::map<pool_id_t, uint64_t> osd_free;
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID
pool_id_t pool_id;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
if (scanned != 1 || !pool_id || pool_id >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// pool/stats/<N>
pool_stats[pool_id] = kv.value.object_items();
}
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// osd ID
osd_num_t osd_num;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%lu%c", &osd_num, &null_byte);
if (scanned != 1 || !osd_num || osd_num >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
// osd/stats/<N>::free
osd_free[osd_num] = kv.value["free"].uint64_value();
}
// Performance statistics
double pool_read_iops = 0;
double pool_read_bps = 0;
double pool_read_lat = 0;
double pool_write_iops = 0;
double pool_write_bps = 0;
double pool_write_lat = 0;
double pool_delete_iops = 0;
double pool_delete_bps = 0;
double pool_delete_lat = 0;
uint32_t pool_inode_stats_count = 0;
for (auto & kv_item: space_info["responses"][2]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID & inode number
pool_id_t pool_id;
inode_t only_inode_num;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
"/inode/stats/%u/%lu%c", &pool_id, &only_inode_num, &null_byte);
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX || INODE_POOL(only_inode_num) != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
pool_read_iops += kv.value["read"]["iops"].number_value();
pool_read_bps += kv.value["read"]["bps"].number_value();
pool_read_lat += kv.value["read"]["lat"].number_value();
pool_write_iops += kv.value["write"]["iops"].number_value();
pool_write_bps += kv.value["write"]["bps"].number_value();
pool_write_lat += kv.value["write"]["lat"].number_value();
pool_delete_iops += kv.value["delete"]["iops"].number_value();
pool_delete_bps += kv.value["delete"]["bps"].number_value();
pool_delete_lat += kv.value["delete"]["lat"].number_value();
pool_inode_stats_count++;
}
pool_read_bps = pool_inode_stats_count ? (pool_read_bps/pool_inode_stats_count) : 0;
pool_write_bps = pool_inode_stats_count ? (pool_write_bps/pool_inode_stats_count) : 0;
pool_delete_bps = pool_inode_stats_count ? (pool_delete_bps/pool_inode_stats_count) : 0;
pool_read_lat = pool_inode_stats_count ? (pool_read_lat/pool_inode_stats_count) : 0;
pool_write_lat = pool_inode_stats_count ? (pool_write_lat/pool_inode_stats_count) : 0;
pool_delete_lat = pool_inode_stats_count ? (pool_delete_lat/pool_inode_stats_count) : 0;
// Calculate recovery percent
uint64_t object_count = 0;
uint64_t degraded_count = 0;
uint64_t misplaced_count = 0;
for (auto & kv_item: space_info["responses"][3]["response_range"]["kvs"].array_items())
{
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
// pool ID & pg number
pool_id_t pool_id;
pg_num_t pg_num = 0;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
"/pg/stats/%u/%u%c", &pool_id, &pg_num, &null_byte);
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
continue;
}
object_count += kv.value["object_count"].uint64_value();
degraded_count += kv.value["degraded_count"].uint64_value();
misplaced_count += kv.value["misplaced_count"].uint64_value();
}
// Calculate max_avail for each pool
for (auto & pp: parent->cli->st_cli.pool_config)
{
auto & pool_cfg = pp.second;
uint64_t pool_avail = UINT64_MAX;
std::map<osd_num_t, uint64_t> pg_per_osd;
json11::Json::array osd_set;
for (auto & pgp: pool_cfg.pg_config)
{
for (auto pg_osd: pgp.second.target_set)
{
if (pg_osd != 0)
{
pg_per_osd[pg_osd]++;
}
}
}
for (auto pg_per_pair: pg_per_osd)
{
uint64_t pg_free = osd_free[pg_per_pair.first] * pool_cfg.pg_count / pg_per_pair.second;
if (pool_avail > pg_free)
{
pool_avail = pg_free;
}
osd_set.push_back(pg_per_pair.first);
}
if (pool_avail == UINT64_MAX)
{
pool_avail = 0;
}
if (pool_cfg.scheme != POOL_SCHEME_REPLICATED)
{
pool_avail *= (pool_cfg.pg_size - pool_cfg.parity_chunks);
}
bool active = pool_cfg.real_pg_count > 0;
bool incomplete = false;
bool has_incomplete = false;
bool degraded = false;
bool has_degraded = false;
bool has_misplaced = false;
for (auto pg_it = pool_cfg.pg_config.begin(); pg_it != pool_cfg.pg_config.end(); pg_it++)
{
if (!(pg_it->second.cur_state & PG_ACTIVE))
{
active = false;
}
if (pg_it->second.cur_state & PG_INCOMPLETE)
{
incomplete = true;
}
if (pg_it->second.cur_state & PG_HAS_INCOMPLETE)
{
has_incomplete = true;
}
if (pg_it->second.cur_state & PG_DEGRADED)
{
degraded = true;
}
if (pg_it->second.cur_state & PG_HAS_DEGRADED)
{
has_degraded = true;
}
if (pg_it->second.cur_state & PG_HAS_MISPLACED)
{
has_misplaced = true;
}
}
// incomplete > has_incomplete > degraded > has_degraded > has_misplaced
std::string status;
if (active)
{
if (incomplete)
status ="incomplete";
else if (has_incomplete)
status ="has_incomplete";
else if (degraded)
status ="degraded";
else if (has_degraded)
status ="has_degraded";
else if (has_misplaced)
status ="has_misplaced";
else
status ="active";
}
else
{
status ="inactive";
}
pool_stats[pool_cfg.id] = json11::Json::object {
{ "id", (uint64_t)(pool_cfg.id) },
{ "name", pool_cfg.name },
{ "status", status },
{ "recovery", object_count ? (double)( (degraded_count + misplaced_count)/object_count) : 0 },
{ "pg_count", pool_cfg.pg_count },
{ "real_pg_count", pool_cfg.real_pg_count },
{ "scheme", pool_cfg.scheme == POOL_SCHEME_REPLICATED ? "replicated" : "ec" },
{ "scheme_name", pool_cfg.scheme == POOL_SCHEME_REPLICATED
? std::to_string(pool_cfg.pg_size)+"/"+std::to_string(pool_cfg.pg_minsize)
: "EC "+std::to_string(pool_cfg.pg_size-pool_cfg.parity_chunks)+"+"+std::to_string(pool_cfg.parity_chunks) },
{ "used_raw", (uint64_t)(pool_stats[pool_cfg.id]["used_raw_tb"].number_value() * ((uint64_t)1<<40)) },
{ "total_raw", (uint64_t)(pool_stats[pool_cfg.id]["total_raw_tb"].number_value() * ((uint64_t)1<<40)) },
{ "max_available", pool_avail },
{ "raw_to_usable", pool_stats[pool_cfg.id]["raw_to_usable"].number_value() },
{ "space_efficiency", pool_stats[pool_cfg.id]["space_efficiency"].number_value() },
{ "pg_real_size", pool_stats[pool_cfg.id]["pg_real_size"].uint64_value() },
{ "failure_domain", pool_cfg.failure_domain },
{ "root_node", pool_cfg.root_node },
{ "osd_tags", pool_cfg.osd_tags },
{ "osd_count", pg_per_osd.size() },
{ "osd_set", osd_set },
{ "primary_affinity_tags", pool_cfg.primary_affinity_tags },
{ "pg_minsize", pool_cfg.pg_minsize },
{ "pg_size", pool_cfg.pg_size },
{ "parity_chunks",pool_cfg.parity_chunks },
{ "max_osd_combinations",pool_cfg.max_osd_combinations },
{ "block_size", (uint64_t)pool_cfg.data_block_size },
{ "bitmap_granularity",(uint64_t)pool_cfg.bitmap_granularity },
{ "pg_stripe_size",pool_cfg.pg_stripe_size },
{ "scrub_interval",pool_cfg.scrub_interval },
{ "read_iops", pool_read_iops },
{ "read_bps", pool_read_bps },
{ "read_lat", pool_read_lat },
{ "write_iops", pool_write_iops },
{ "write_bps", pool_write_bps },
{ "write_lat", pool_write_lat },
{ "delete_iops", pool_delete_iops },
{ "delete_bps", pool_delete_bps },
{ "delete_lat", pool_delete_lat} ,
};
}
}
json11::Json::array to_list()
{
json11::Json::array list;
for (auto & kv: pool_stats)
{
if (!only_names.size())
{
list.push_back(kv.second);
}
else
{
for (auto glob: only_names)
{
if (stupid_glob(kv.second["name"].string_value(), glob))
{
list.push_back(kv.second);
break;
}
}
}
}
if (sort_field == "name" ||
sort_field == "scheme_name" ||
sort_field == "scheme" ||
sort_field == "failure_domain" ||
sort_field == "root_node" ||
sort_field == "osd_tags_fmt" ||
sort_field == "primary_affinity_tags_fmt" ||
sort_field == "status" )
{
std::sort(list.begin(), list.end(), [this](json11::Json a, json11::Json b)
{
auto av = a[sort_field].as_string();
auto bv = b[sort_field].as_string();
return reverse ? av > bv : av < bv;
});
}
else
{
std::sort(list.begin(), list.end(), [this](json11::Json a, json11::Json b)
{
auto av = a[sort_field].number_value();
auto bv = b[sort_field].number_value();
return reverse ? av > bv : av < bv;
});
}
if (max_count > 0 && list.size() > max_count)
{
list.resize(max_count);
}
return list;
}
void loop()
{
get_stats();
if (parent->waiting > 0)
return;
if (state == 100)
return;
if (parent->json_output)
{
// JSON output
json11::Json::array array = to_list();
if (list_pool_id != 0)
{
for (auto & a: array)
{
if (a["id"].uint64_value() == list_pool_id)
{
result.data = a;
break;
}
}
}
else
result.data = array;
state = 100;
return;
}
for (auto & kv: pool_stats)
{
double raw_to = kv.second["raw_to_usable"].number_value();
if (raw_to < 0.000001 && raw_to > -0.000001)
raw_to = 1;
kv.second["pg_count_fmt"] = kv.second["real_pg_count"] == kv.second["pg_count"]
? kv.second["real_pg_count"].as_string()
: kv.second["real_pg_count"].as_string()+"->"+kv.second["pg_count"].as_string();
kv.second["total_fmt"] = format_size(kv.second["total_raw"].uint64_value() / raw_to);
kv.second["used_fmt"] = format_size(kv.second["used_raw"].uint64_value() / raw_to);
kv.second["max_avail_fmt"] = format_size(kv.second["max_available"].uint64_value());
kv.second["used_pct"] = format_q(kv.second["total_raw"].uint64_value()
? (100 - 100*kv.second["max_available"].uint64_value() *
kv.second["raw_to_usable"].number_value() / kv.second["total_raw"].uint64_value())
: 100)+"%";
kv.second["eff_fmt"] = format_q(kv.second["space_efficiency"].number_value()*100)+"%";
kv.second["recovery_pct"] = format_q(kv.second["recovery"].number_value()*100)+"%";
kv.second["osd_tags_fmt"] = item_as_string(kv.second["osd_tags"]);
kv.second["primary_affinity_tags_fmt"] = item_as_string(kv.second["primary_affinity_tags"]);
kv.second["read_bw"] = format_size(kv.second["read_bps"].uint64_value())+"/s";
kv.second["write_bw"] = format_size(kv.second["write_bps"].uint64_value())+"/s";
kv.second["delete_bw"] = format_size(kv.second["delete_bps"].uint64_value())+"/s";
kv.second["read_iops"] = format_q(kv.second["read_iops"].number_value());
kv.second["write_iops"] = format_q(kv.second["write_iops"].number_value());
kv.second["delete_iops"] = format_q(kv.second["delete_iops"].number_value());
kv.second["read_lat_f"] = format_lat(kv.second["read_lat"].uint64_value());
kv.second["write_lat_f"] = format_lat(kv.second["write_lat"].uint64_value());
kv.second["delete_lat_f"] = format_lat(kv.second["delete_lat"].uint64_value());
}
if (list_pool_id != 0)
{
auto array = to_list();
for (auto & a: array)
{
if (a["id"].uint64_value() == list_pool_id)
{
result.data = a;
break;
}
}
result.text = print_pool_details(result.data, parent->color);
state = 100;
return;
}
// Table output: id, name, scheme_name, pg_count, total, used, max_avail, used%, efficiency, status, recovery, root_node, failure_domain, osd_tags, primary_affinity_tags
json11::Json::array cols;
if (!show_df_format)
{
cols.push_back(json11::Json::object{
{ "key", "id" },
{ "title", "ID" },
});
}
cols.push_back(json11::Json::object{
{ "key", "name" },
{ "title", "NAME" },
});
cols.push_back(json11::Json::object{
{ "key", "scheme_name" },
{ "title", "SCHEME" },
});
cols.push_back(json11::Json::object{
{ "key", "pg_count_fmt" },
{ "title", "PGS" },
});
cols.push_back(json11::Json::object{
{ "key", "total_fmt" },
{ "title", "TOTAL" },
});
cols.push_back(json11::Json::object{
{ "key", "used_fmt" },
{ "title", "USED" },
});
cols.push_back(json11::Json::object{
{ "key", "max_avail_fmt" },
{ "title", "AVAILABLE" },
});
cols.push_back(json11::Json::object{
{ "key", "used_pct" },
{ "title", "USED%" },
});
cols.push_back(json11::Json::object{
{ "key", "eff_fmt" },
{ "title", "EFFICIENCY" },
});
if (!show_df_format)
{
cols.push_back(json11::Json::object{
{ "key", "status" },
{ "title", "STATUS" },
});
cols.push_back(json11::Json::object{
{ "key", "recovery_pct" },
{ "title", "RECOVERY" },
});
}
if (show_stats)
{
cols.push_back(json11::Json::object{
{ "key", "read_bw" },
{ "title", "READ" },
});
cols.push_back(json11::Json::object{
{ "key", "read_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "read_lat_f" },
{ "title", "LAT" },
});
cols.push_back(json11::Json::object{
{ "key", "write_bw" },
{ "title", "WRITE" },
});
cols.push_back(json11::Json::object{
{ "key", "write_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "write_lat_f" },
{ "title", "LAT" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_bw" },
{ "title", "DEL" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_iops" },
{ "title", "IOPS" },
});
cols.push_back(json11::Json::object{
{ "key", "delete_lat_f" },
{ "title", "LAT" },
});
}
if (show_all)
{
cols.push_back(json11::Json::object{
{ "key", "root_node" },
{ "title", "ROOT" },
});
cols.push_back(json11::Json::object{
{ "key", "failure_domain" },
{ "title", "FAILURE_DOMAIN" },
});
cols.push_back(json11::Json::object{
{ "key", "osd_tags_fmt" },
{ "title", "OSD_TAGS" },
});
cols.push_back(json11::Json::object{
{ "key", "primary_affinity_tags_fmt" },
{ "title", "AFFINITY_TAGS" },
});
}
result.data = to_list();
result.text = print_table(result.data, cols, parent->color);
state = 100;
}
std::string print_pool_details(json11::Json items, bool use_esc)
{
std::string start_esc = use_esc ? "\033[1m" : "";
std::string end_esc = use_esc ? "\033[0m" : "";
std::string result ="Pool details: \n";
result +=start_esc+" pool id: "+end_esc+ items["id"].as_string() +"\n";
result +=start_esc+" pool name: "+end_esc+ items["name"].as_string() +"\n";
result +=start_esc+" scheme: "+end_esc+ items["scheme_name"].as_string() +"\n";
result +=start_esc+" placement group count: "+end_esc+ items["pg_count_fmt"].as_string() +"\n";
result +=start_esc+" total: "+end_esc+ items["total_fmt"].as_string() +"\n";
result +=start_esc+" used: "+end_esc+ items["used_fmt"].as_string() +" ("+items["used_pct"].as_string()+")"+ "\n";
result +=start_esc+" max available: "+end_esc+ items["max_available"].as_string() +"\n";
result +=start_esc+" space efficiency: "+end_esc+ items["eff_fmt"].as_string() +"\n";
result +=start_esc+" status: "+end_esc+ items["status"].as_string() +"\n";
result +=start_esc+" recovery: "+end_esc+ items["recovery_pct"].as_string() +"\n";
result +=start_esc+" root node: "+end_esc+ items["root_node"].as_string() +"\n";
result +=start_esc+" failure domain: "+end_esc+ items["failure_domain"].as_string() +"\n";
result +=start_esc+" pg size: "+end_esc+ items["pg_size"].as_string() +"\n";
result +=start_esc+" pg minsize: "+end_esc+ items["pg_minsize"].as_string() +"\n";
result +=start_esc+" parity chunks: "+end_esc+ items["parity_chunks"].as_string() +"\n";
result +=start_esc+" max osd combinations: "+end_esc+ items["max_osd_combinations"].as_string() +"\n";
result +=start_esc+" block size: "+end_esc+ items["block_size"].as_string() +"\n";
result +=start_esc+" bitmap granularity: "+end_esc+ items["bitmap_granularity"].as_string() +"\n";
result +=start_esc+" pg stripe size: "+end_esc+ items["pg_stripe_size"].as_string() +"\n";
result +=start_esc+" scrub interval: "+end_esc+ items["scrub_interval"].as_string() +"\n";
result +=start_esc+" osd count: "+end_esc+ items["osd_count"].as_string() +"\n";
result +=start_esc+" osd: "+end_esc+ item_as_string(items["osd_set"]) +"\n";
result +=start_esc+" osd tags: "+end_esc+ item_as_string(items["osd_tags"]) +"\n";
result +=start_esc+" primary affinity tags: "+end_esc+ item_as_string(items["primary_affinity_tags"]) +"\n";
result +=start_esc+" read bandwidth: "+end_esc+ items["read_bw"].as_string() +"\n";
result +=start_esc+" read IOPS: "+end_esc+ items["read_iops"].as_string() +"\n";
result +=start_esc+" read latency: "+end_esc+ items["read_lat_f"].as_string() +"\n";
result +=start_esc+" write bandwidth: "+end_esc+ items["write_bw"].as_string() +"\n";
result +=start_esc+" write IOPS: "+end_esc+ items["write_iops"].as_string() +"\n";
result +=start_esc+" write latency: "+end_esc+ items["write_lat_f"].as_string() +"\n";
result +=start_esc+" delete bandwidth: "+end_esc+ items["delete_bw"].as_string() +"\n";
result +=start_esc+" delete IOPS: "+end_esc+ items["delete_iops"].as_string() +"\n";
result +=start_esc+" delete latency: "+end_esc+ items["delete_lat_f"].as_string() +"\n";
return result;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_ls(json11::Json cfg)
{
auto lister = new pool_ls_t();
lister->parent = this;
lister->list_pool_id = cfg["pool"].uint64_value();
lister->list_pool_name = lister->list_pool_id ? "" : cfg["pool"].as_string();
lister->show_all = cfg["long"].bool_value();
lister->show_stats = cfg["stats"].bool_value();
lister->sort_field = cfg["sort"].string_value();
lister->show_df_format = cfg["dfformat"].bool_value();
if ((lister->sort_field == "osd_tags") ||
(lister->sort_field == "primary_affinity_tags" ))
lister->sort_field = lister->sort_field + "_fmt";
lister->reverse = cfg["reverse"].bool_value();
lister->max_count = cfg["count"].uint64_value();
for (auto & item: cfg["names"].array_items())
{
lister->only_names.insert(item.string_value());
}
return [lister](cli_result_t & result)
{
lister->loop();
if (lister->is_done())
{
result = lister->result;
delete lister;
return true;
}
return false;
};
}

268
src/cli_pool_modify.cpp Normal file
View File

@ -0,0 +1,268 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#include <ctype.h>
#include "cli.h"
#include "cli_pool_cfg.h"
#include "cluster_client.h"
#include "str_util.h"
struct pool_changer_t
{
cli_tool_t *parent;
// Required parameters (id/name)
pool_id_t pool_id = 0;
std::string pool_name;
// Force removal
bool force;
// Pool configurator
pool_configurator_t *cfg;
int state = 0;
cli_result_t result;
// Config of pool to be modified
pool_config_t *pool_config = NULL;
// Tags
json11::Json osd_tags_json;
json11::Json primary_affinity_tags_json;
// Updated pools
json11::Json new_pools;
// Expected pools mod revision
uint64_t pools_mod_rev;
bool is_done() { return state == 100; }
void loop()
{
if (state == 1)
goto resume_1;
else if (state == 2)
goto resume_2;
else if (state == 3)
goto resume_3;
// Validate pool name/id
// Get pool id by name (if name given)
if (pool_name != "")
{
for (auto & pce: parent->cli->st_cli.pool_config)
{
if (pce.second.name == pool_name)
{
pool_id = pce.first;
pool_config = &(pce.second);
break;
}
}
}
// Otherwise, check if given pool id is valid
else
{
// Set pool name from id (for easier logging)
pool_name = "id " + std::to_string(pool_id);
// Look-up pool id in pool_config
auto pce = parent->cli->st_cli.pool_config.find(pool_id);
if (pce != parent->cli->st_cli.pool_config.end())
{
pool_config = &(pce->second);
}
}
// Need pool config to proceed
if (!pool_config)
{
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+pool_name+" does not exist" };
state = 100;
return;
}
// Validate pool parameters
if (!cfg->validate(parent->cli->st_cli, pool_config, !force))
{
result = (cli_result_t){ .err = EINVAL, .text = cfg->get_error_string() + "\n" };
state = 100;
return;
}
// OSD tags
if (cfg->osd_tags != "")
{
osd_tags_json = parent->parse_tags(cfg->osd_tags);
}
// Primary affinity tags
if (cfg->primary_affinity_tags != "")
{
primary_affinity_tags_json = parent->parse_tags(cfg->primary_affinity_tags);
}
// Proceed to modifying the pool
state = 1;
resume_1:
// Get pools from etcd
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
} }
},
} },
});
state = 2;
resume_2:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
{
// Parse received pools from etcd
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
// Update pool
auto pls = kv.value.object_items();
auto p = pls[std::to_string(pool_id)].object_items();
if (cfg->name != "")
p["name"] = cfg->name;
if (cfg->pg_size)
p["pg_size"] = cfg->pg_size;
if (cfg->pg_minsize)
p["pg_minsize"] = cfg->pg_minsize;
if (cfg->pg_count)
p["pg_count"] = cfg->pg_count;
if (cfg->failure_domain != "")
p["failure_domain"] = cfg->failure_domain;
if (cfg->max_osd_combinations)
p["max_osd_combinations"] = cfg->max_osd_combinations;
if (cfg->block_size)
p["block_size"] = cfg->block_size;
if (cfg->bitmap_granularity)
p["bitmap_granularity"] = cfg->bitmap_granularity;
if (cfg->immediate_commit != "")
p["immediate_commit"] = cfg->immediate_commit;
if (cfg->pg_stripe_size)
p["pg_stripe_size"] = cfg->pg_stripe_size;
if (cfg->root_node != "")
p["root_node"] = cfg->root_node;
if (cfg->scrub_interval != "")
p["scrub_interval"] = cfg->scrub_interval;
if (cfg->osd_tags != "")
p["osd_tags"] = osd_tags_json;
if (cfg->primary_affinity_tags != "")
p["primary_affinity_tags"] = primary_affinity_tags_json;
pls[std::to_string(pool_id)] = p;
// Record updated pools
new_pools = pls;
// Expected pools mod revision
pools_mod_rev = kv.mod_revision;
}
// Update pools in etcd
parent->etcd_txn(json11::Json::object {
{ "compare", json11::Json::array {
json11::Json::object {
{ "target", "MOD" },
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "result", "LESS" },
{ "mod_revision", pools_mod_rev+1 },
}
} },
{ "success", json11::Json::array {
json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "value", base64_encode(new_pools.dump()) },
} },
},
} },
});
state = 3;
resume_3:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Successfully updated pool
result = (cli_result_t){
.err = 0,
.text = "Pool "+pool_name+" updated",
.data = new_pools
};
state = 100;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_modify(json11::Json cfg)
{
auto pool_changer = new pool_changer_t();
pool_changer->parent = this;
// Pool name (or id) required
if (!cfg["pool"].uint64_value() && cfg["pool"].as_string() == "")
{
return [](cli_result_t & result)
{
result = (cli_result_t){
.err = EINVAL, .text = "Pool name or id must be given\n"
};
return true;
};
}
pool_changer->pool_id = cfg["pool"].uint64_value();
pool_changer->pool_name = pool_changer->pool_id ? "" : cfg["pool"].as_string();
pool_changer->cfg = new pool_configurator_t();
if (!pool_changer->cfg->parse(cfg, false))
{
std::string err = pool_changer->cfg->get_error_string();
return [err](cli_result_t & result)
{
result = (cli_result_t){ .err = EINVAL, .text = err + "\n" };
return true;
};
}
pool_changer->force = !cfg["force"].is_null();
return [pool_changer](cli_result_t & result)
{
pool_changer->loop();
if (pool_changer->is_done())
{
result = pool_changer->result;
delete pool_changer;
return true;
}
return false;
};
}

230
src/cli_pool_rm.cpp Normal file
View File

@ -0,0 +1,230 @@
/*
=========================================================================
Copyright (c) 2023 MIND Software LLC. All Rights Reserved.
This file is part of the Software-Defined Storage MIND UStor Project.
For more information about this product, please visit https://mindsw.io
or contact us directly at info@mindsw.io
=========================================================================
*/
#include <ctype.h>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
struct pool_remover_t
{
cli_tool_t *parent;
// Required parameters (id/name)
pool_id_t pool_id = 0;
std::string pool_name;
// Force removal
bool force;
int state = 0;
cli_result_t result;
// Is pool valid?
bool pool_valid = false;
// Updated pools
json11::Json new_pools;
// Expected pools mod revision
uint64_t pools_mod_rev;
bool is_done() { return state == 100; }
void loop()
{
if (state == 1)
goto resume_1;
else if (state == 2)
goto resume_2;
else if (state == 3)
goto resume_3;
// Pool name (or id) required
if (!pool_id && pool_name == "")
{
result = (cli_result_t){ .err = EINVAL, .text = "Pool name or id must be given\n" };
state = 100;
return;
}
// Validate pool name/id
// Get pool id by name (if name given)
if (pool_name != "")
{
for (auto & ic: parent->cli->st_cli.pool_config)
{
if (ic.second.name == pool_name)
{
pool_id = ic.first;
pool_valid = 1;
break;
}
}
}
// Otherwise, check if given pool id is valid
else
{
// Set pool name from id (for easier logging)
pool_name = "id " + std::to_string(pool_id);
// Look-up pool id in pool_config
if (parent->cli->st_cli.pool_config.find(pool_id) != parent->cli->st_cli.pool_config.end())
{
pool_valid = 1;
}
}
// Need a valid pool to proceed
if (!pool_valid)
{
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+pool_name+" does not exist" };
state = 100;
return;
}
// Unless forced, check if pool has associated Images/Snapshots
if (!force)
{
std::string images;
for (auto & ic: parent->cli->st_cli.inode_config)
{
if (pool_id && INODE_POOL(ic.second.num) != pool_id)
{
continue;
}
images += ((images != "") ? ", " : "") + ic.second.name;
}
if (images != "")
{
result = (cli_result_t){
.err = ENOTEMPTY,
.text =
"Pool "+pool_name+" cannot be removed as it still has the following "
"images/snapshots associated with it: "+images
};
state = 100;
return;
}
}
// Proceed to deleting the pool
state = 1;
do
{
resume_1:
// Get pools from etcd
parent->etcd_txn(json11::Json::object {
{ "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
} }
},
} },
});
state = 2;
resume_2:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
{
// Parse received pools from etcd
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
// Remove pool
auto p = kv.value.object_items();
if (p.erase(std::to_string(pool_id)) != 1)
{
result = (cli_result_t){
.err = ENOENT,
.text = "Failed to erase pool "+pool_name+" from: "+kv.value.string_value()
};
state = 100;
return;
}
// Record updated pools
new_pools = p;
// Expected pools mod revision
pools_mod_rev = kv.mod_revision;
}
// Update pools in etcd
parent->etcd_txn(json11::Json::object {
{ "compare", json11::Json::array {
json11::Json::object {
{ "target", "MOD" },
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "result", "LESS" },
{ "mod_revision", pools_mod_rev+1 },
}
} },
{ "success", json11::Json::array {
json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
{ "value", base64_encode(new_pools.dump()) },
} },
},
} },
});
state = 3;
resume_3:
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
} while (!parent->etcd_result["succeeded"].bool_value());
// Successfully deleted pool
result = (cli_result_t){
.err = 0,
.text = "Pool "+pool_name+" deleted",
.data = new_pools
};
state = 100;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_rm(json11::Json cfg)
{
auto pool_remover = new pool_remover_t();
pool_remover->parent = this;
pool_remover->pool_id = cfg["pool"].uint64_value();
pool_remover->pool_name = pool_remover->pool_id ? "" : cfg["pool"].as_string();
pool_remover->force = !cfg["force"].is_null();
return [pool_remover](cli_result_t & result)
{
pool_remover->loop();
if (pool_remover->is_done())
{
result = pool_remover->result;
delete pool_remover;
return true;
}
return false;
};
}

View File

@ -245,6 +245,7 @@ resume_8:
}
state = 100;
result = (cli_result_t){
.err = 0,
.text = "",
.data = my_result(result.data),
};

View File

@ -573,8 +573,7 @@ void etcd_state_client_t::load_global_config()
{
global_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
}
global_immediate_commit = global_config["immediate_commit"].string_value() == "all"
? IMMEDIATE_ALL : (global_config["immediate_commit"].string_value() == "small" ? IMMEDIATE_SMALL : IMMEDIATE_NONE);
global_immediate_commit = parse_immediate_commit_string(global_config["immediate_commit"].string_value());
on_load_config_hook(global_config);
});
}
@ -871,15 +870,32 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
pc.scrub_interval = 0;
// Immediate Commit Mode
pc.immediate_commit = pool_item.second["immediate_commit"].is_string()
? (pool_item.second["immediate_commit"].string_value() == "all"
? IMMEDIATE_ALL : (pool_item.second["immediate_commit"].string_value() == "small"
? IMMEDIATE_SMALL : IMMEDIATE_NONE))
? parse_immediate_commit_string(pool_item.second["immediate_commit"].string_value())
: global_immediate_commit;
// PG Stripe Size
pc.pg_stripe_size = pool_item.second["pg_stripe_size"].uint64_value();
uint64_t min_stripe_size = pc.data_block_size * (pc.scheme == POOL_SCHEME_REPLICATED ? 1 : (pc.pg_size-pc.parity_chunks));
if (pc.pg_stripe_size < min_stripe_size)
pc.pg_stripe_size = min_stripe_size;
// Root Node
pc.root_node = pool_item.second["root_node"].string_value();
// osd_tags
if (pool_item.second["osd_tags"].is_array())
for (auto & osd_tag: pool_item.second["osd_tags"].array_items())
{
pc.osd_tags.push_back(osd_tag.string_value());
}
else
pc.osd_tags.push_back(pool_item.second["osd_tags"].string_value());
// primary_affinity_tags
if (pool_item.second["primary_affinity_tags"].is_array())
for (auto & primary_affinity_tag: pool_item.second["primary_affinity_tags"].array_items())
{
pc.primary_affinity_tags.push_back(primary_affinity_tag.string_value());
}
else
pc.primary_affinity_tags.push_back(pool_item.second["primary_affinity_tags"].string_value());
// Save
pc.real_pg_count = this->pool_config[pool_id].real_pg_count;
std::swap(pc.pg_config, this->pool_config[pool_id].pg_config);
@ -1167,6 +1183,12 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
}
}
uint32_t etcd_state_client_t::parse_immediate_commit_string(const std::string immediate_commit_str)
{
return immediate_commit_str == "all" ? IMMEDIATE_ALL :
(immediate_commit_str == "small" ? IMMEDIATE_SMALL : IMMEDIATE_NONE);
}
void etcd_state_client_t::insert_inode_config(const inode_config_t & cfg)
{
this->inode_config[cfg.num] = cfg;

View File

@ -60,6 +60,9 @@ struct pool_config_t
uint64_t pg_stripe_size;
std::map<pg_num_t, pg_config_t> pg_config;
uint64_t scrub_interval;
std::vector<std::string> osd_tags;
std::vector<std::string> primary_affinity_tags;
std::string root_node;
};
struct inode_config_t
@ -146,6 +149,7 @@ public:
void clean_nonexistent_pgs();
void parse_state(const etcd_kv_t & kv);
void parse_config(const json11::Json & config);
uint32_t parse_immediate_commit_string(const std::string immediate_commit_str);
void insert_inode_config(const inode_config_t & cfg);
inode_watch_t* watch_inode(std::string name);
void close_watch(inode_watch_t* watch);

View File

@ -3,6 +3,8 @@
#pragma once
#include "object_id.h"
#define POOL_SCHEME_REPLICATED 1
#define POOL_SCHEME_XOR 2
#define POOL_SCHEME_EC 3

View File

@ -214,7 +214,10 @@ void print_help(const char *help_text, std::string exe_name, std::string cmd, bo
else if (*next_line && isspace(*next_line))
started = true;
else if (cmd_start && matched)
{
filtered_text += std::string(cmd_start, next_line-cmd_start);
matched = started = false;
}
}
while (filtered_text.size() > 1 &&
filtered_text[filtered_text.size()-1] == '\n' &&