Add documentation and a very basic test for pool management commands
Test / buildenv (push) Successful in 14s
Details
Test / build (push) Successful in 2m52s
Details
Test / test_cas (push) Successful in 11s
Details
Test / make_test (push) Successful in 35s
Details
Test / test_change_pg_count (push) Successful in 36s
Details
Test / test_change_pg_size (push) Successful in 10s
Details
Test / test_change_pg_count_ec (push) Successful in 36s
Details
Test / test_create_nomaxid (push) Successful in 10s
Details
Test / test_etcd_fail (push) Successful in 1m1s
Details
Test / test_add_osd (push) Successful in 2m33s
Details
Test / test_interrupted_rebalance_imm (push) Successful in 2m44s
Details
Test / test_interrupted_rebalance (push) Successful in 2m48s
Details
Test / test_interrupted_rebalance_ec (push) Successful in 1m55s
Details
Test / test_failure_domain (push) Successful in 11s
Details
Test / test_minsize_1 (push) Successful in 21s
Details
Test / test_interrupted_rebalance_ec_imm (push) Successful in 1m35s
Details
Test / test_snapshot (push) Successful in 32s
Details
Test / test_snapshot_ec (push) Successful in 31s
Details
Test / test_rm (push) Successful in 17s
Details
Test / test_move_reappear (push) Successful in 24s
Details
Test / test_snapshot_down (push) Successful in 27s
Details
Test / test_snapshot_down_ec (push) Successful in 33s
Details
Test / test_splitbrain (push) Successful in 20s
Details
Test / test_snapshot_chain (push) Successful in 2m15s
Details
Test / test_snapshot_chain_ec (push) Successful in 2m58s
Details
Test / test_rebalance_verify_imm (push) Successful in 5m3s
Details
Test / test_rebalance_verify (push) Successful in 5m36s
Details
Test / test_switch_primary (push) Successful in 37s
Details
Test / test_rebalance_verify_ec_imm (push) Successful in 4m3s
Details
Test / test_write_no_same (push) Successful in 21s
Details
Test / test_write (push) Successful in 58s
Details
Test / test_write_xor (push) Successful in 1m31s
Details
Test / test_rebalance_verify_ec (push) Successful in 6m20s
Details
Test / test_heal_pg_size_2 (push) Successful in 4m7s
Details
Test / test_heal_ec (push) Successful in 4m33s
Details
Test / test_heal_csum_32k_dmj (push) Successful in 5m53s
Details
Test / test_heal_csum_32k_dj (push) Successful in 6m17s
Details
Test / test_heal_csum_32k (push) Successful in 7m23s
Details
Test / test_heal_csum_4k_dmj (push) Successful in 6m56s
Details
Test / test_scrub_zero_osd_2 (push) Successful in 1m26s
Details
Test / test_scrub (push) Successful in 1m29s
Details
Test / test_heal_csum_4k_dj (push) Successful in 7m1s
Details
Test / test_scrub_xor (push) Successful in 1m1s
Details
Test / test_heal_csum_4k (push) Successful in 6m34s
Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 32s
Details
Test / test_scrub_pg_size_3 (push) Successful in 1m19s
Details
Test / test_scrub_ec (push) Successful in 24s
Details
Test / buildenv (push) Successful in 14s
Details
Test / build (push) Successful in 2m52s
Details
Test / test_cas (push) Successful in 11s
Details
Test / make_test (push) Successful in 35s
Details
Test / test_change_pg_count (push) Successful in 36s
Details
Test / test_change_pg_size (push) Successful in 10s
Details
Test / test_change_pg_count_ec (push) Successful in 36s
Details
Test / test_create_nomaxid (push) Successful in 10s
Details
Test / test_etcd_fail (push) Successful in 1m1s
Details
Test / test_add_osd (push) Successful in 2m33s
Details
Test / test_interrupted_rebalance_imm (push) Successful in 2m44s
Details
Test / test_interrupted_rebalance (push) Successful in 2m48s
Details
Test / test_interrupted_rebalance_ec (push) Successful in 1m55s
Details
Test / test_failure_domain (push) Successful in 11s
Details
Test / test_minsize_1 (push) Successful in 21s
Details
Test / test_interrupted_rebalance_ec_imm (push) Successful in 1m35s
Details
Test / test_snapshot (push) Successful in 32s
Details
Test / test_snapshot_ec (push) Successful in 31s
Details
Test / test_rm (push) Successful in 17s
Details
Test / test_move_reappear (push) Successful in 24s
Details
Test / test_snapshot_down (push) Successful in 27s
Details
Test / test_snapshot_down_ec (push) Successful in 33s
Details
Test / test_splitbrain (push) Successful in 20s
Details
Test / test_snapshot_chain (push) Successful in 2m15s
Details
Test / test_snapshot_chain_ec (push) Successful in 2m58s
Details
Test / test_rebalance_verify_imm (push) Successful in 5m3s
Details
Test / test_rebalance_verify (push) Successful in 5m36s
Details
Test / test_switch_primary (push) Successful in 37s
Details
Test / test_rebalance_verify_ec_imm (push) Successful in 4m3s
Details
Test / test_write_no_same (push) Successful in 21s
Details
Test / test_write (push) Successful in 58s
Details
Test / test_write_xor (push) Successful in 1m31s
Details
Test / test_rebalance_verify_ec (push) Successful in 6m20s
Details
Test / test_heal_pg_size_2 (push) Successful in 4m7s
Details
Test / test_heal_ec (push) Successful in 4m33s
Details
Test / test_heal_csum_32k_dmj (push) Successful in 5m53s
Details
Test / test_heal_csum_32k_dj (push) Successful in 6m17s
Details
Test / test_heal_csum_32k (push) Successful in 7m23s
Details
Test / test_heal_csum_4k_dmj (push) Successful in 6m56s
Details
Test / test_scrub_zero_osd_2 (push) Successful in 1m26s
Details
Test / test_scrub (push) Successful in 1m29s
Details
Test / test_heal_csum_4k_dj (push) Successful in 7m1s
Details
Test / test_scrub_xor (push) Successful in 1m1s
Details
Test / test_heal_csum_4k (push) Successful in 6m34s
Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 32s
Details
Test / test_scrub_pg_size_3 (push) Successful in 1m19s
Details
Test / test_scrub_ec (push) Successful in 24s
Details
parent
86243b7101
commit
4eab26f968
|
@ -154,8 +154,25 @@ That is, if it becomes impossible to place PG data on at least (pg_minsize)
|
|||
OSDs, PG is deactivated for both read and write. So you know that a fresh
|
||||
write always goes to at least (pg_minsize) OSDs (disks).
|
||||
|
||||
That is, pg_size minus pg_minsize sets the number of disk failures to tolerate
|
||||
without temporary downtime (for [osd_out_time](monitor.en.md#osd_out_time)).
|
||||
For example, the difference between pg_minsize 2 and 1 in a 3-way replicated
|
||||
pool (pg_size=3) is:
|
||||
- If 2 hosts go down with pg_minsize=2, the pool becomes inactive and remains
|
||||
inactive for [osd_out_time](monitor.en.md#osd_out_time) (10 minutes). After
|
||||
this timeout, the monitor selects replacement hosts/OSDs and the pool comes
|
||||
up and starts to heal. Therefore, if you don't have replacement OSDs, i.e.
|
||||
if you only have 3 hosts with OSDs and 2 of them are down, the pool remains
|
||||
inactive until you add or return at least 1 host (or change failure_domain
|
||||
to "osd").
|
||||
- If 2 hosts go down with pg_minsize=1, the pool only experiences a short
|
||||
I/O pause until the monitor notices that OSDs are down (5-10 seconds with
|
||||
the default [etcd_report_interval](osd.en.md#etcd_report_interval)). After
|
||||
this pause, I/O resumes, but new data is temporarily written in only 1 copy.
|
||||
Then, after osd_out_time, the monitor also selects replacement OSDs and the
|
||||
pool starts to heal.
|
||||
|
||||
So, pg_minsize regulates the number of failures that a pool can tolerate
|
||||
without temporary downtime for [osd_out_time](monitor.en.md#osd_out_time),
|
||||
but at a cost of slightly reduced storage reliability.
|
||||
|
||||
FIXME: pg_minsize behaviour may be changed in the future to only make PGs
|
||||
read-only instead of deactivating them.
|
||||
|
@ -168,8 +185,8 @@ read-only instead of deactivating them.
|
|||
Number of PGs for this pool. The value should be big enough for the monitor /
|
||||
LP solver to be able to optimize data placement.
|
||||
|
||||
"Enough" is usually around 64-128 PGs per OSD, i.e. you set pg_count for pool
|
||||
to (total OSD count * 100 / pg_size). You can round it to the closest power of 2,
|
||||
"Enough" is usually around 10-100 PGs per OSD, i.e. you set pg_count for pool
|
||||
to (total OSD count * 10 / pg_size). You can round it to the closest power of 2,
|
||||
because it makes it easier to reduce or increase PG count later by dividing or
|
||||
multiplying it by 2.
|
||||
|
||||
|
|
|
@ -157,9 +157,25 @@
|
|||
OSD, PG деактивируется на чтение и запись. Иными словами, всегда известно,
|
||||
что новые блоки данных всегда записываются как минимум на pg_minsize дисков.
|
||||
|
||||
По сути, разница pg_size и pg_minsize задаёт число отказов дисков, которые пул
|
||||
может пережить без временной (на [osd_out_time](monitor.ru.md#osd_out_time))
|
||||
остановки обслуживания.
|
||||
Для примера, разница между pg_minsize 2 и 1 в реплицированном пуле с 3 копиями
|
||||
данных (pg_size=3), проявляется следующим образом:
|
||||
- Если 2 сервера отключаются при pg_minsize=2, пул становится неактивным и
|
||||
остаётся неактивным в течение [osd_out_time](monitor.en.md#osd_out_time)
|
||||
(10 минут), после чего монитор назначает другие OSD/серверы на замену, пул
|
||||
поднимается и начинает восстанавливать недостающие копии данных. Соответственно,
|
||||
если OSD на замену нет - то есть, если у вас всего 3 сервера с OSD и 2 из них
|
||||
недоступны - пул так и остаётся недоступным до тех пор, пока вы не вернёте
|
||||
или не добавите хотя бы 1 сервер (или не переключите failure_domain на "osd").
|
||||
- Если 2 сервера отключаются при pg_minsize=1, ввод-вывод лишь приостанавливается
|
||||
на короткое время, до тех пор, пока монитор не поймёт, что OSD отключены
|
||||
(что занимает 5-10 секунд при стандартном [etcd_report_interval](osd.en.md#etcd_report_interval)).
|
||||
После этого ввод-вывод восстанавливается, но новые данные временно пишутся
|
||||
всего в 1 копии. Когда же проходит osd_out_time, монитор точно так же назначает
|
||||
другие OSD на замену выбывшим и пул начинает восстанавливать копии данных.
|
||||
|
||||
То есть, pg_minsize регулирует число отказов, которые пул может пережить без
|
||||
временной остановки обслуживания на [osd_out_time](monitor.ru.md#osd_out_time),
|
||||
но ценой немного пониженных гарантий надёжности.
|
||||
|
||||
FIXME: Поведение pg_minsize может быть изменено в будущем с полной деактивации
|
||||
PG на перевод их в режим только для чтения.
|
||||
|
@ -172,8 +188,8 @@ PG на перевод их в режим только для чтения.
|
|||
Число PG для данного пула. Число должно быть достаточно большим, чтобы монитор
|
||||
мог равномерно распределить по ним данные.
|
||||
|
||||
Обычно это означает примерно 64-128 PG на 1 OSD, т.е. pg_count можно устанавливать
|
||||
равным (общему числу OSD * 100 / pg_size). Значение можно округлить до ближайшей
|
||||
Обычно это означает примерно 10-100 PG на 1 OSD, т.е. pg_count можно устанавливать
|
||||
равным (общему числу OSD * 10 / pg_size). Значение можно округлить до ближайшей
|
||||
степени 2, чтобы потом было легче уменьшать или увеличивать число PG, умножая
|
||||
или деля его на 2.
|
||||
|
||||
|
|
|
@ -75,18 +75,16 @@ On the monitor hosts:
|
|||
|
||||
## Create a pool
|
||||
|
||||
Create pool configuration in etcd:
|
||||
Create a pool using vitastor-cli:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool",
|
||||
"scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'
|
||||
vitastor-cli create-pool testpool --pg_size 2 --pg_count 256
|
||||
```
|
||||
|
||||
For EC pools the configuration should look like the following:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
|
||||
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}}'
|
||||
vitastor-cli create-pool testpool --ec 2+2 --pg_count 256
|
||||
```
|
||||
|
||||
After you do this, one of the monitors will configure PGs and OSDs will start them.
|
||||
|
|
|
@ -77,18 +77,16 @@
|
|||
|
||||
## Создайте пул
|
||||
|
||||
Создайте конфигурацию пула с помощью etcdctl:
|
||||
Создайте пул с помощью vitastor-cli:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool",
|
||||
"scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'
|
||||
vitastor-cli create-pool testpool --pg_size 2 --pg_count 256
|
||||
```
|
||||
|
||||
Для пулов с кодами коррекции ошибок конфигурация должна выглядеть примерно так:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
|
||||
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}}'
|
||||
vitastor-cli create-pool testpool --ec 2+2 --pg_count 256
|
||||
```
|
||||
|
||||
После этого один из мониторов должен сконфигурировать PG, а OSD должны запустить их.
|
||||
|
|
|
@ -24,6 +24,10 @@ It supports the following commands:
|
|||
- [fix](#fix)
|
||||
- [alloc-osd](#alloc-osd)
|
||||
- [rm-osd](#rm-osd)
|
||||
- [create-pool](#create-pool)
|
||||
- [modify-pool](#modify-pool)
|
||||
- [ls-pools](#ls-pools)
|
||||
- [rm-pool](#rm-pool)
|
||||
|
||||
Global options:
|
||||
|
||||
|
@ -238,3 +242,84 @@ Refuses to remove OSDs with data without `--force` and `--allow-data-loss`.
|
|||
|
||||
With `--dry-run` only checks if deletion is possible without data loss and
|
||||
redundancy degradation.
|
||||
|
||||
## create-pool
|
||||
|
||||
`vitastor-cli create-pool|pool-create <name> (-s <pg_size>|--ec <N>+<K>) -n <pg_count> [OPTIONS]`
|
||||
|
||||
Create a pool. Required parameters:
|
||||
|
||||
| `-s|--pg_size R` | Number of replicas for replicated pools |
|
||||
| `--ec N+K` | Number of data (N) and parity (K) chunks for erasure-coded pools |
|
||||
| `-n|--pg_count N` | PG count for the new pool (start with 10*<OSD count>/pg_size rounded to a power of 2) |
|
||||
|
||||
Optional parameters:
|
||||
|
||||
| `--pg_minsize <number>` | R or N+K minus number of failures to tolerate without downtime ([details](../config/pool.en.md#pg_minsize)) |
|
||||
| `--failure_domain host` | Failure domain: host, osd or a level from placement_levels. Default: host |
|
||||
| `--root_node <node>` | Put pool only on child OSDs of this placement tree node |
|
||||
| `--osd_tags <tag>[,<tag>]...` | Put pool only on OSDs tagged with all specified tags |
|
||||
| `--block_size 128k` | Put pool only on OSDs with this data block size |
|
||||
| `--bitmap_granularity 4k` | Put pool only on OSDs with this logical sector size |
|
||||
| `--immediate_commit none` | Put pool only on OSDs with this or larger immediate_commit (none < small < all) |
|
||||
| `--primary_affinity_tags tags` | Prefer to put primary copies on OSDs with all specified tags |
|
||||
| `--scrub_interval <time>` | Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y |
|
||||
| `--pg_stripe_size <number>` | Increase object grouping stripe |
|
||||
| `--max_osd_combinations 10000` | Maximum number of random combinations for LP solver input |
|
||||
| `--wait` | Wait for the new pool to come online |
|
||||
| `-f|--force` | Do not check that cluster has enough OSDs to create the pool |
|
||||
|
||||
See also [Pool configuration](../config/pool.en.md) for detailed parameter descriptions.
|
||||
|
||||
Examples:
|
||||
|
||||
`vitastor-cli create-pool test_x4 -s 4 -n 32`
|
||||
|
||||
`vitastor-cli create-pool test_ec42 --ec 4+2 -n 32`
|
||||
|
||||
## modify-pool
|
||||
|
||||
`vitastor-cli modify-pool|pool-modify <id|name> [--name <new_name>] [PARAMETERS...]`
|
||||
|
||||
Modify an existing pool. Modifiable parameters:
|
||||
|
||||
```
|
||||
[-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]
|
||||
[--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>]
|
||||
[--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]
|
||||
```
|
||||
|
||||
Non-modifiable parameters (changing them WILL lead to data loss):
|
||||
|
||||
```
|
||||
[--block_size <size>] [--bitmap_granularity <size>]
|
||||
[--immediate_commit <all|small|none>] [--pg_stripe_size <size>]
|
||||
```
|
||||
|
||||
These, however, can still be modified with -f|--force.
|
||||
|
||||
See [create-pool](#create-pool) for parameter descriptions.
|
||||
|
||||
Examples:
|
||||
|
||||
`vitastor-cli modify-pool pool_A --name pool_B`
|
||||
|
||||
`vitastor-cli modify-pool 2 --pg_size 4 -n 128`
|
||||
|
||||
## rm-pool
|
||||
|
||||
`vitastor-cli rm-pool|pool-rm [--force] <id|name>`
|
||||
|
||||
Remove a pool. Refuses to remove pools with images without `--force`.
|
||||
|
||||
## ls-pools
|
||||
|
||||
`vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]`
|
||||
|
||||
List pools (only matching <glob> patterns if passed).
|
||||
|
||||
| `-l|--long` | Also report I/O statistics |
|
||||
| `--detail` | Use list format (not table), show all details |
|
||||
| `--sort FIELD` | Sort by specified field (see fields in --json output) |
|
||||
| `-r|--reverse` | Sort in descending order |
|
||||
| `-n|--count N` | Only list first N items |
|
||||
|
|
|
@ -23,6 +23,10 @@ vitastor-cli - интерфейс командной строки для адм
|
|||
- [merge-data](#merge-data)
|
||||
- [alloc-osd](#alloc-osd)
|
||||
- [rm-osd](#rm-osd)
|
||||
- [create-pool](#create-pool)
|
||||
- [modify-pool](#modify-pool)
|
||||
- [ls-pools](#ls-pools)
|
||||
- [rm-pool](#rm-pool)
|
||||
|
||||
Глобальные опции:
|
||||
|
||||
|
@ -85,8 +89,8 @@ kaveri 2/1 32 0 B 10 G 0 B 100% 0%
|
|||
|
||||
`vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]`
|
||||
|
||||
Показать список образов, если переданы шаблоны `<glob>`, то только с именами,
|
||||
соответствующими этим шаблонам (стандартные ФС-шаблоны с * и ?).
|
||||
Показать список образов, если передан(ы) шаблон(ы) `<glob>`, то только с именами,
|
||||
соответствующими одному из шаблонов (стандартные ФС-шаблоны с * и ?).
|
||||
|
||||
Опции:
|
||||
|
||||
|
@ -255,3 +259,85 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
|||
|
||||
С опцией `--dry-run` только проверяет, возможно ли удаление без потери данных и деградации
|
||||
избыточности.
|
||||
|
||||
## create-pool
|
||||
|
||||
`vitastor-cli create-pool|pool-create <name> (-s <pg_size>|--ec <N>+<K>) -n <pg_count> [OPTIONS]`
|
||||
|
||||
Создать пул. Обязательные параметры:
|
||||
|
||||
| `-s|--pg_size R` | Число копий данных для реплицированных пулов |
|
||||
| `--ec N+K` | Число частей данных (N) и чётности (K) для пулов с кодами коррекции ошибок |
|
||||
| `-n|--pg_count N` | Число PG для нового пула (начните с 10*<число OSD>/pg_size, округлённого до степени двойки) |
|
||||
|
||||
Необязательные параметры:
|
||||
|
||||
| `--pg_minsize <number>` | (R или N+K) минус число разрешённых отказов без остановки пула ([подробнее](../config/pool.ru.md#pg_minsize)) |
|
||||
| `--failure_domain host` | Домен отказа: host, osd или другой из placement_levels. По умолчанию: host |
|
||||
| `--root_node <node>` | Использовать для пула только дочерние OSD этого узла дерева размещения |
|
||||
| `--osd_tags <tag>[,<tag>]...` | ...только OSD со всеми заданными тегами |
|
||||
| `--block_size 128k` | ...только OSD с данным размером блока |
|
||||
| `--bitmap_granularity 4k` | ...только OSD с данным размером логического сектора |
|
||||
| `--immediate_commit none` | ...только OSD с этим или большим immediate_commit (none < small < all) |
|
||||
| `--primary_affinity_tags tags` | Предпочитать OSD со всеми данными тегами для роли первичных |
|
||||
| `--scrub_interval <time>` | Включить скрабы с заданным интервалом времени (число + единица s/m/h/d/M/y) |
|
||||
| `--pg_stripe_size <number>` | Увеличить блок группировки объектов по PG |
|
||||
| `--max_osd_combinations 10000` | Максимальное число случайных комбинаций OSD для ЛП-солвера |
|
||||
| `--wait` | Подождать, пока новый пул будет активирован |
|
||||
| `-f|--force` | Не проверять, что в кластере достаточно доменов отказа для создания пула |
|
||||
|
||||
Подробно о параметрах см. [Конфигурация пулов](../config/pool.ru.md).
|
||||
|
||||
Примеры:
|
||||
|
||||
`vitastor-cli create-pool test_x4 -s 4 -n 32`
|
||||
|
||||
`vitastor-cli create-pool test_ec42 --ec 4+2 -n 32`
|
||||
|
||||
## modify-pool
|
||||
|
||||
`vitastor-cli modify-pool|pool-modify <id|name> [--name <new_name>] [PARAMETERS...]`
|
||||
|
||||
Изменить настройки существующего пула. Изменяемые параметры:
|
||||
|
||||
```
|
||||
[-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]
|
||||
[--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>]
|
||||
[--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]
|
||||
```
|
||||
|
||||
Неизменяемые параметры (их изменение ПРИВЕДЁТ к потере данных):
|
||||
|
||||
```
|
||||
[--block_size <size>] [--bitmap_granularity <size>]
|
||||
[--immediate_commit <all|small|none>] [--pg_stripe_size <size>]
|
||||
```
|
||||
|
||||
Эти параметры можно изменить, только если явно передать опцию -f или --force.
|
||||
|
||||
Описания параметров смотрите в [create-pool](#create-pool).
|
||||
|
||||
Примеры:
|
||||
|
||||
`vitastor-cli modify-pool pool_A --name pool_B`
|
||||
|
||||
`vitastor-cli modify-pool 2 --pg_size 4 -n 128`
|
||||
|
||||
## rm-pool
|
||||
|
||||
`vitastor-cli rm-pool|pool-rm [--force] <id|name>`
|
||||
|
||||
Удалить пул. Отказывается удалять пул, в котором ещё есть образы, без `--force`.
|
||||
|
||||
## ls-pools
|
||||
|
||||
`vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]`
|
||||
|
||||
Показать список пулов. Если передан(ы) шаблон(ы) `<glob>`, то только с именами,
|
||||
соответствующими одному из шаблонов (стандартные ФС-шаблоны с * и ?).
|
||||
|
||||
| `-l|--long` | Вывести также статистику ввода-вывода |
|
||||
| `--detail` | Максимально подробный вывод в виде списка (а не таблицы) |
|
||||
| `--sort FIELD` | Сортировать по заданному полю (поля см. в выводе с --json) |
|
||||
| `-r|--reverse` | Сортировать в обратном порядке |
|
||||
| `-n|--count N` | Выводить только первые N записей |
|
||||
|
|
20
src/cli.cpp
20
src/cli.cpp
|
@ -121,15 +121,15 @@ static const char* help_text =
|
|||
" Optional parameters:\n"
|
||||
" --pg_minsize <number> R or N+K minus number of failures to tolerate without downtime\n"
|
||||
" --failure_domain host Failure domain: host, osd or a level from placement_levels. Default: host\n"
|
||||
" --root_node <node> Put pool on child OSDs of this placement tree node\n"
|
||||
" --osd_tags <tag>[,<tag>]... Put pool on OSDs tagged with all specified tags\n"
|
||||
" --block_size 128k Put pool on OSDs with this data block size\n"
|
||||
" --bitmap_granularity 4k Put pool on OSDs with this logical sector size\n"
|
||||
" --immediate_commit none Put pool on OSDs with this or larger immediate_commit (none < small < all)\n"
|
||||
" --root_node <node> Put pool only on child OSDs of this placement tree node\n"
|
||||
" --osd_tags <tag>[,<tag>]... Put pool only on OSDs tagged with all specified tags\n"
|
||||
" --block_size 128k Put pool only on OSDs with this data block size\n"
|
||||
" --bitmap_granularity 4k Put pool only on OSDs with this logical sector size\n"
|
||||
" --immediate_commit none Put pool only on OSDs with this or larger immediate_commit (none < small < all)\n"
|
||||
" --primary_affinity_tags tags Prefer to put primary copies on OSDs with all specified tags\n"
|
||||
" --scrub_interval <time> Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y\n"
|
||||
" --pg_stripe_size <number> Increase object grouping stripe. Default: block_size*data_parts\n"
|
||||
" --max_osd_combinations 10000 Maximum number of random combinations for LP solver input. Default: 10000\n"
|
||||
" --pg_stripe_size <number> Increase object grouping stripe\n"
|
||||
" --max_osd_combinations 10000 Maximum number of random combinations for LP solver input\n"
|
||||
" --wait Wait for the new pool to come online\n"
|
||||
" -f|--force Do not check that cluster has enough OSDs to create the pool\n"
|
||||
" Examples:\n"
|
||||
|
@ -151,13 +151,13 @@ static const char* help_text =
|
|||
" vitastor-cli modify-pool 2 --pg_size 4 -n 128\n"
|
||||
"\n"
|
||||
"vitastor-cli rm-pool|pool-rm [--force] <id|name>\n"
|
||||
" Remove existing pool. Refuses to remove pools with data without --force.\n"
|
||||
" Remove a pool. Refuses to remove pools with images without --force.\n"
|
||||
"\n"
|
||||
"vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]\n"
|
||||
" List pools (only matching <glob> patterns if passed).\n"
|
||||
" -l|--long Also report PG states and I/O statistics\n"
|
||||
" -l|--long Also report I/O statistics\n"
|
||||
" --detail Use list format (not table), show all details\n"
|
||||
" --sort FIELD Sort by specified field\n"
|
||||
" --sort FIELD Sort by specified field (see fields in --json output)\n"
|
||||
" -r|--reverse Sort in descending order\n"
|
||||
" -n|--count N Only list first N items\n"
|
||||
"\n"
|
||||
|
|
|
@ -81,7 +81,8 @@ public:
|
|||
|
||||
std::string print_table(json11::Json items, json11::Json header, bool use_esc);
|
||||
|
||||
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, bool use_esc);
|
||||
size_t print_detail_title_len(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t prev_len);
|
||||
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t title_len, bool use_esc);
|
||||
|
||||
std::string format_lat(uint64_t lat);
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
|
|||
// Default scheme
|
||||
new_cfg["scheme"] = "replicated";
|
||||
}
|
||||
if (old_cfg.is_null() && !new_cfg["pg_minsize"].uint64_value())
|
||||
if (new_cfg.find("pg_minsize") == new_cfg.end() && (old_cfg.is_null() || new_cfg.find("pg_size") != new_cfg.end()))
|
||||
{
|
||||
// Default pg_minsize
|
||||
if (new_cfg["scheme"] == "replicated")
|
||||
|
@ -83,6 +83,7 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
|
|||
{
|
||||
return key+" must be a non-negative integer";
|
||||
}
|
||||
value = value.uint64_value();
|
||||
}
|
||||
else if (key == "name" || key == "scheme" || key == "immediate_commit" ||
|
||||
key == "failure_domain" || key == "root_node" || key == "scrub_interval")
|
||||
|
|
|
@ -562,11 +562,17 @@ resume_3:
|
|||
{ "write_fmt", "Write" },
|
||||
{ "delete_fmt", "Delete" },
|
||||
};
|
||||
for (auto & item: to_list())
|
||||
auto list = to_list();
|
||||
size_t title_len = 0;
|
||||
for (auto & item: list)
|
||||
{
|
||||
title_len = print_detail_title_len(item, cols, title_len);
|
||||
}
|
||||
for (auto & item: list)
|
||||
{
|
||||
if (result.text != "")
|
||||
result.text += "\n";
|
||||
result.text += print_detail(item, cols, parent->color);
|
||||
result.text += print_detail(item, cols, title_len, parent->color);
|
||||
}
|
||||
state = 100;
|
||||
return;
|
||||
|
@ -631,9 +637,9 @@ resume_3:
|
|||
}
|
||||
};
|
||||
|
||||
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, bool use_esc)
|
||||
size_t print_detail_title_len(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t prev_len)
|
||||
{
|
||||
size_t title_len = 0;
|
||||
size_t title_len = prev_len;
|
||||
for (auto & kv: names)
|
||||
{
|
||||
if (!item[kv.first].is_null() && (!item[kv.first].is_string() || item[kv.first].string_value() != ""))
|
||||
|
@ -642,6 +648,11 @@ std::string print_detail(json11::Json item, std::vector<std::pair<std::string, s
|
|||
title_len = title_len < len ? len : title_len;
|
||||
}
|
||||
}
|
||||
return title_len;
|
||||
}
|
||||
|
||||
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t title_len, bool use_esc)
|
||||
{
|
||||
std::string str;
|
||||
for (auto & kv: names)
|
||||
{
|
||||
|
|
|
@ -13,7 +13,14 @@ $ETCDCTL put /vitastor/osd/stats/5 '{"host":"host3","size":1073741824,"time":"'$
|
|||
$ETCDCTL put /vitastor/osd/stats/6 '{"host":"host3","size":1073741824,"time":"'$TIME'"}'
|
||||
$ETCDCTL put /vitastor/osd/stats/7 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
||||
$ETCDCTL put /vitastor/osd/stats/8 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
||||
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":4,"failure_domain":"rack"}}'
|
||||
build/src/vitastor-cli --etcd_address $ETCD_URL create-pool testpool --ec 3+2 -n 32 --failure_domain rack --force
|
||||
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1": {"failure_domain": "rack", "name": "testpool", "parity_chunks": 2, "pg_count": 32, "pg_minsize": 4, "pg_size": 5, "scheme": "ec"}}]'
|
||||
build/src/vitastor-cli --etcd_address $ETCD_URL modify-pool testpool --ec 3+3 --failure_domain host
|
||||
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1": {"failure_domain": "host", "name": "testpool", "parity_chunks": 3, "pg_count": 32, "pg_minsize": 4, "pg_size": 6, "scheme": "ec"}}]'
|
||||
build/src/vitastor-cli --etcd_address $ETCD_URL rm-pool testpool
|
||||
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{}]'
|
||||
build/src/vitastor-cli --etcd_address $ETCD_URL create-pool testpool -s 2 -n 4 --failure_domain rack --force
|
||||
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":4,"failure_domain":"rack"}}]'
|
||||
|
||||
node mon/mon-main.js --etcd_address $ETCD_URL --etcd_prefix "/vitastor" >>./testdata/mon.log 2>&1 &
|
||||
MON_PID=$!
|
||||
|
|
Loading…
Reference in New Issue