Compare commits
43 Commits
adbe9eee50
...
7ef14d586f
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | 7ef14d586f | |
Vitaliy Filippov | 87a2dab9fc | |
Vitaliy Filippov | 178b2b5f29 | |
Vitaliy Filippov | 40591feed9 | |
Vitaliy Filippov | fc613bab16 | |
Vitaliy Filippov | 5cbdff4951 | |
Vitaliy Filippov | 37ae8ea273 | |
Vitaliy Filippov | c9b2da267e | |
Vitaliy Filippov | abe3de9e8b | |
Vitaliy Filippov | f5fee7b5f1 | |
Vitaliy Filippov | 47d5cd2345 | |
Vitaliy Filippov | 25e1944342 | |
Vitaliy Filippov | 0e3373289f | |
Vitaliy Filippov | a523f334d8 | |
Vitaliy Filippov | cdc24938be | |
Vitaliy Filippov | a9275989f6 | |
Vitaliy Filippov | e509dfbe93 | |
Vitaliy Filippov | 3e051a77cb | |
Vitaliy Filippov | 69fe41c5d1 | |
Vitaliy Filippov | f5229d6b29 | |
Vitaliy Filippov | c1ea3d007f | |
Vitaliy Filippov | 4072f292d4 | |
Vitaliy Filippov | a4d84b27fc | |
Vitaliy Filippov | b327b6413e | |
Vitaliy Filippov | 93f334054f | |
Vitaliy Filippov | f1c5d2abaa | |
Vitaliy Filippov | e56371a49c | |
Vitaliy Filippov | da1cb65e0c | |
Vitaliy Filippov | 2f184faa08 | |
Vitaliy Filippov | d6584ee1b0 | |
Vitaliy Filippov | 59b4fb0ad7 | |
Vitaliy Filippov | 6997cc632a | |
Vitaliy Filippov | d456b28d6f | |
Vitaliy Filippov | 4cf6dceed7 | |
Vitaliy Filippov | 38b8963330 | |
Vitaliy Filippov | 77167e2920 | |
Vitaliy Filippov | 5af23672d0 | |
Vitaliy Filippov | 6bf1f539a6 | |
Vitaliy Filippov | 4eab26f968 | |
Vitaliy Filippov | 86243b7101 | |
idelson | dc92851322 | |
Zibort Cloud | 02d1f16bbd | |
Vitaliy Filippov | fc413038d1 |
|
@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
|
||||||
|
|
||||||
project(vitastor)
|
project(vitastor)
|
||||||
|
|
||||||
set(VERSION "1.4.7")
|
set(VERSION "1.4.8")
|
||||||
|
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
VERSION ?= v1.4.7
|
VERSION ?= v1.4.8
|
||||||
|
|
||||||
all: build push
|
all: build push
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ spec:
|
||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: vitalif/vitastor-csi:v1.4.7
|
image: vitalif/vitastor-csi:v1.4.8
|
||||||
args:
|
args:
|
||||||
- "--node=$(NODE_ID)"
|
- "--node=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
|
|
@ -121,7 +121,7 @@ spec:
|
||||||
privileged: true
|
privileged: true
|
||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
image: vitalif/vitastor-csi:v1.4.7
|
image: vitalif/vitastor-csi:v1.4.8
|
||||||
args:
|
args:
|
||||||
- "--node=$(NODE_ID)"
|
- "--node=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
|
|
@ -5,7 +5,7 @@ package vitastor
|
||||||
|
|
||||||
const (
|
const (
|
||||||
vitastorCSIDriverName = "csi.vitastor.io"
|
vitastorCSIDriverName = "csi.vitastor.io"
|
||||||
vitastorCSIDriverVersion = "1.4.7"
|
vitastorCSIDriverVersion = "1.4.8"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config struct fills the parameters of request or user input
|
// Config struct fills the parameters of request or user input
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
cat < vitastor.Dockerfile > ../Dockerfile
|
cat < vitastor.Dockerfile > ../Dockerfile
|
||||||
cd ..
|
cd ..
|
||||||
mkdir -p packages
|
mkdir -p packages
|
||||||
sudo podman build --build-arg REL=bookworm -v `pwd`/packages:/root/packages -f Dockerfile .
|
sudo podman build --build-arg DISTRO=debian --build-arg REL=bookworm -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||||
rm Dockerfile
|
rm Dockerfile
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
cat < vitastor.Dockerfile > ../Dockerfile
|
cat < vitastor.Dockerfile > ../Dockerfile
|
||||||
cd ..
|
cd ..
|
||||||
mkdir -p packages
|
mkdir -p packages
|
||||||
sudo podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f Dockerfile .
|
sudo podman build --build-arg DISTRO=debian --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||||
rm Dockerfile
|
rm Dockerfile
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
cat < vitastor.Dockerfile > ../Dockerfile
|
cat < vitastor.Dockerfile > ../Dockerfile
|
||||||
cd ..
|
cd ..
|
||||||
mkdir -p packages
|
mkdir -p packages
|
||||||
sudo podman build --build-arg REL=buster -v `pwd`/packages:/root/packages -f Dockerfile .
|
sudo podman build --build-arg DISTRO=debian --build-arg REL=buster -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||||
rm Dockerfile
|
rm Dockerfile
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
cat < vitastor.Dockerfile > ../Dockerfile
|
||||||
|
cd ..
|
||||||
|
mkdir -p packages
|
||||||
|
sudo podman build --build-arg DISTRO=ubuntu --build-arg REL=jammy -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||||
|
rm Dockerfile
|
|
@ -1,4 +1,4 @@
|
||||||
vitastor (1.4.7-1) unstable; urgency=medium
|
vitastor (1.4.8-1) unstable; urgency=medium
|
||||||
|
|
||||||
* Bugfixes
|
* Bugfixes
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
# Build patched libvirt for Debian Buster or Bullseye/Sid inside a container
|
# Build patched libvirt for Debian Buster or Bullseye/Sid inside a container
|
||||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/libvirt.Dockerfile .
|
# cd ..; podman build --build-arg DISTRO=debian --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/libvirt.Dockerfile .
|
||||||
|
|
||||||
|
ARG DISTRO=
|
||||||
ARG REL=
|
ARG REL=
|
||||||
FROM debian:$REL
|
FROM $DISTRO:$REL
|
||||||
ARG REL=
|
ARG REL=
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" ]; then \
|
RUN if ([ "${DISTRO}" = "debian" ]) && ( [ "${REL}" = "buster" -o "${REL}" = "bullseye" ] ); then \
|
||||||
echo "deb http://deb.debian.org/debian $REL-backports main" >> /etc/apt/sources.list; \
|
echo "deb http://deb.debian.org/debian $REL-backports main" >> /etc/apt/sources.list; \
|
||||||
echo >> /etc/apt/preferences; \
|
echo >> /etc/apt/preferences; \
|
||||||
echo 'Package: *' >> /etc/apt/preferences; \
|
echo 'Package: *' >> /etc/apt/preferences; \
|
||||||
|
@ -23,7 +24,7 @@ RUN apt-get -y build-dep libvirt0
|
||||||
RUN apt-get -y install libglusterfs-dev
|
RUN apt-get -y install libglusterfs-dev
|
||||||
RUN apt-get --download-only source libvirt
|
RUN apt-get --download-only source libvirt
|
||||||
|
|
||||||
ADD patches/libvirt-5.0-vitastor.diff patches/libvirt-7.0-vitastor.diff patches/libvirt-7.5-vitastor.diff patches/libvirt-7.6-vitastor.diff /root
|
ADD patches/libvirt-5.0-vitastor.diff patches/libvirt-7.0-vitastor.diff patches/libvirt-7.5-vitastor.diff patches/libvirt-7.6-vitastor.diff patches/libvirt-8.0-vitastor.diff /root
|
||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
mkdir -p /root/packages/libvirt-$REL; \
|
mkdir -p /root/packages/libvirt-$REL; \
|
||||||
rm -rf /root/packages/libvirt-$REL/*; \
|
rm -rf /root/packages/libvirt-$REL/*; \
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
# Build Vitastor packages for Debian inside a container
|
# Build Vitastor packages for Debian inside a container
|
||||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/vitastor.Dockerfile .
|
# cd ..; podman build --build-arg DISTRO=debian --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/vitastor.Dockerfile .
|
||||||
|
|
||||||
|
ARG DISTRO=debian
|
||||||
ARG REL=
|
ARG REL=
|
||||||
FROM debian:$REL
|
FROM $DISTRO:$REL
|
||||||
|
ARG DISTRO=debian
|
||||||
ARG REL=
|
ARG REL=
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
@ -35,8 +37,8 @@ RUN set -e -x; \
|
||||||
mkdir -p /root/packages/vitastor-$REL; \
|
mkdir -p /root/packages/vitastor-$REL; \
|
||||||
rm -rf /root/packages/vitastor-$REL/*; \
|
rm -rf /root/packages/vitastor-$REL/*; \
|
||||||
cd /root/packages/vitastor-$REL; \
|
cd /root/packages/vitastor-$REL; \
|
||||||
cp -r /root/vitastor vitastor-1.4.7; \
|
cp -r /root/vitastor vitastor-1.4.8; \
|
||||||
cd vitastor-1.4.7; \
|
cd vitastor-1.4.8; \
|
||||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||||
|
@ -49,8 +51,8 @@ RUN set -e -x; \
|
||||||
rm -rf a b; \
|
rm -rf a b; \
|
||||||
echo "dep:fio=$FIO" > debian/fio_version; \
|
echo "dep:fio=$FIO" > debian/fio_version; \
|
||||||
cd /root/packages/vitastor-$REL; \
|
cd /root/packages/vitastor-$REL; \
|
||||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.4.7.orig.tar.xz vitastor-1.4.7; \
|
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.4.8.orig.tar.xz vitastor-1.4.8; \
|
||||||
cd vitastor-1.4.7; \
|
cd vitastor-1.4.8; \
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
These parameters apply only to Vitastor clients (QEMU, fio, NBD and so on) and
|
These parameters apply only to Vitastor clients (QEMU, fio, NBD and so on) and
|
||||||
affect their interaction with the cluster.
|
affect their interaction with the cluster.
|
||||||
|
|
||||||
|
- [client_retry_interval](#client_retry_interval)
|
||||||
|
- [client_eio_retry_interval](#client_eio_retry_interval)
|
||||||
- [client_max_dirty_bytes](#client_max_dirty_bytes)
|
- [client_max_dirty_bytes](#client_max_dirty_bytes)
|
||||||
- [client_max_dirty_ops](#client_max_dirty_ops)
|
- [client_max_dirty_ops](#client_max_dirty_ops)
|
||||||
- [client_enable_writeback](#client_enable_writeback)
|
- [client_enable_writeback](#client_enable_writeback)
|
||||||
|
@ -19,6 +21,26 @@ affect their interaction with the cluster.
|
||||||
- [nbd_max_devices](#nbd_max_devices)
|
- [nbd_max_devices](#nbd_max_devices)
|
||||||
- [nbd_max_part](#nbd_max_part)
|
- [nbd_max_part](#nbd_max_part)
|
||||||
|
|
||||||
|
## client_retry_interval
|
||||||
|
|
||||||
|
- Type: milliseconds
|
||||||
|
- Default: 50
|
||||||
|
- Minimum: 10
|
||||||
|
- Can be changed online: yes
|
||||||
|
|
||||||
|
Retry time for I/O requests failed due to inactive PGs or network
|
||||||
|
connectivity errors.
|
||||||
|
|
||||||
|
## client_eio_retry_interval
|
||||||
|
|
||||||
|
- Type: milliseconds
|
||||||
|
- Default: 1000
|
||||||
|
- Can be changed online: yes
|
||||||
|
|
||||||
|
Retry time for I/O requests failed due to data corruption or unfinished
|
||||||
|
EC object deletions (has_incomplete PG state). 0 disables such retries
|
||||||
|
and clients are not blocked and just get EIO error code instead.
|
||||||
|
|
||||||
## client_max_dirty_bytes
|
## client_max_dirty_bytes
|
||||||
|
|
||||||
- Type: integer
|
- Type: integer
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
Данные параметры применяются только к клиентам Vitastor (QEMU, fio, NBD и т.п.) и
|
Данные параметры применяются только к клиентам Vitastor (QEMU, fio, NBD и т.п.) и
|
||||||
затрагивают логику их работы с кластером.
|
затрагивают логику их работы с кластером.
|
||||||
|
|
||||||
|
- [client_retry_interval](#client_retry_interval)
|
||||||
|
- [client_eio_retry_interval](#client_eio_retry_interval)
|
||||||
- [client_max_dirty_bytes](#client_max_dirty_bytes)
|
- [client_max_dirty_bytes](#client_max_dirty_bytes)
|
||||||
- [client_max_dirty_ops](#client_max_dirty_ops)
|
- [client_max_dirty_ops](#client_max_dirty_ops)
|
||||||
- [client_enable_writeback](#client_enable_writeback)
|
- [client_enable_writeback](#client_enable_writeback)
|
||||||
|
@ -19,6 +21,27 @@
|
||||||
- [nbd_max_devices](#nbd_max_devices)
|
- [nbd_max_devices](#nbd_max_devices)
|
||||||
- [nbd_max_part](#nbd_max_part)
|
- [nbd_max_part](#nbd_max_part)
|
||||||
|
|
||||||
|
## client_retry_interval
|
||||||
|
|
||||||
|
- Тип: миллисекунды
|
||||||
|
- Значение по умолчанию: 50
|
||||||
|
- Минимальное значение: 10
|
||||||
|
- Можно менять на лету: да
|
||||||
|
|
||||||
|
Время повтора запросов ввода-вывода, неудачных из-за неактивных PG или
|
||||||
|
ошибок сети.
|
||||||
|
|
||||||
|
## client_eio_retry_interval
|
||||||
|
|
||||||
|
- Тип: миллисекунды
|
||||||
|
- Значение по умолчанию: 1000
|
||||||
|
- Можно менять на лету: да
|
||||||
|
|
||||||
|
Время повтора запросов ввода-вывода, неудачных из-за повреждения данных
|
||||||
|
или незавершённых удалений EC-объектов (состояния PG has_incomplete).
|
||||||
|
0 отключает повторы таких запросов и клиенты не блокируются, а вместо
|
||||||
|
этого просто получают код ошибки EIO.
|
||||||
|
|
||||||
## client_max_dirty_bytes
|
## client_max_dirty_bytes
|
||||||
|
|
||||||
- Тип: целое число
|
- Тип: целое число
|
||||||
|
|
|
@ -25,7 +25,6 @@ between clients, OSDs and etcd.
|
||||||
- [peer_connect_timeout](#peer_connect_timeout)
|
- [peer_connect_timeout](#peer_connect_timeout)
|
||||||
- [osd_idle_timeout](#osd_idle_timeout)
|
- [osd_idle_timeout](#osd_idle_timeout)
|
||||||
- [osd_ping_timeout](#osd_ping_timeout)
|
- [osd_ping_timeout](#osd_ping_timeout)
|
||||||
- [up_wait_retry_interval](#up_wait_retry_interval)
|
|
||||||
- [max_etcd_attempts](#max_etcd_attempts)
|
- [max_etcd_attempts](#max_etcd_attempts)
|
||||||
- [etcd_quick_timeout](#etcd_quick_timeout)
|
- [etcd_quick_timeout](#etcd_quick_timeout)
|
||||||
- [etcd_slow_timeout](#etcd_slow_timeout)
|
- [etcd_slow_timeout](#etcd_slow_timeout)
|
||||||
|
@ -212,17 +211,6 @@ Maximum time to wait for OSD keepalive responses. If an OSD doesn't respond
|
||||||
within this time, the connection to it is dropped and a reconnection attempt
|
within this time, the connection to it is dropped and a reconnection attempt
|
||||||
is scheduled.
|
is scheduled.
|
||||||
|
|
||||||
## up_wait_retry_interval
|
|
||||||
|
|
||||||
- Type: milliseconds
|
|
||||||
- Default: 50
|
|
||||||
- Minimum: 10
|
|
||||||
- Can be changed online: yes
|
|
||||||
|
|
||||||
OSDs respond to clients with a special error code when they receive I/O
|
|
||||||
requests for a PG that's not synchronized and started. This parameter sets
|
|
||||||
the time for the clients to wait before re-attempting such I/O requests.
|
|
||||||
|
|
||||||
## max_etcd_attempts
|
## max_etcd_attempts
|
||||||
|
|
||||||
- Type: integer
|
- Type: integer
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
- [peer_connect_timeout](#peer_connect_timeout)
|
- [peer_connect_timeout](#peer_connect_timeout)
|
||||||
- [osd_idle_timeout](#osd_idle_timeout)
|
- [osd_idle_timeout](#osd_idle_timeout)
|
||||||
- [osd_ping_timeout](#osd_ping_timeout)
|
- [osd_ping_timeout](#osd_ping_timeout)
|
||||||
- [up_wait_retry_interval](#up_wait_retry_interval)
|
|
||||||
- [max_etcd_attempts](#max_etcd_attempts)
|
- [max_etcd_attempts](#max_etcd_attempts)
|
||||||
- [etcd_quick_timeout](#etcd_quick_timeout)
|
- [etcd_quick_timeout](#etcd_quick_timeout)
|
||||||
- [etcd_slow_timeout](#etcd_slow_timeout)
|
- [etcd_slow_timeout](#etcd_slow_timeout)
|
||||||
|
@ -221,19 +220,6 @@ OSD в любом случае согласовывают реальное зн
|
||||||
Если OSD не отвечает за это время, соединение отключается и производится
|
Если OSD не отвечает за это время, соединение отключается и производится
|
||||||
повторная попытка соединения.
|
повторная попытка соединения.
|
||||||
|
|
||||||
## up_wait_retry_interval
|
|
||||||
|
|
||||||
- Тип: миллисекунды
|
|
||||||
- Значение по умолчанию: 50
|
|
||||||
- Минимальное значение: 10
|
|
||||||
- Можно менять на лету: да
|
|
||||||
|
|
||||||
Когда OSD получают от клиентов запросы ввода-вывода, относящиеся к не
|
|
||||||
поднятым на данный момент на них PG, либо к PG в процессе синхронизации,
|
|
||||||
они отвечают клиентам специальным кодом ошибки, означающим, что клиент
|
|
||||||
должен некоторое время подождать перед повторением запроса. Именно это время
|
|
||||||
ожидания задаёт данный параметр.
|
|
||||||
|
|
||||||
## max_etcd_attempts
|
## max_etcd_attempts
|
||||||
|
|
||||||
- Тип: целое число
|
- Тип: целое число
|
||||||
|
|
|
@ -154,8 +154,25 @@ That is, if it becomes impossible to place PG data on at least (pg_minsize)
|
||||||
OSDs, PG is deactivated for both read and write. So you know that a fresh
|
OSDs, PG is deactivated for both read and write. So you know that a fresh
|
||||||
write always goes to at least (pg_minsize) OSDs (disks).
|
write always goes to at least (pg_minsize) OSDs (disks).
|
||||||
|
|
||||||
That is, pg_size minus pg_minsize sets the number of disk failures to tolerate
|
For example, the difference between pg_minsize 2 and 1 in a 3-way replicated
|
||||||
without temporary downtime (for [osd_out_time](monitor.en.md#osd_out_time)).
|
pool (pg_size=3) is:
|
||||||
|
- If 2 hosts go down with pg_minsize=2, the pool becomes inactive and remains
|
||||||
|
inactive for [osd_out_time](monitor.en.md#osd_out_time) (10 minutes). After
|
||||||
|
this timeout, the monitor selects replacement hosts/OSDs and the pool comes
|
||||||
|
up and starts to heal. Therefore, if you don't have replacement OSDs, i.e.
|
||||||
|
if you only have 3 hosts with OSDs and 2 of them are down, the pool remains
|
||||||
|
inactive until you add or return at least 1 host (or change failure_domain
|
||||||
|
to "osd").
|
||||||
|
- If 2 hosts go down with pg_minsize=1, the pool only experiences a short
|
||||||
|
I/O pause until the monitor notices that OSDs are down (5-10 seconds with
|
||||||
|
the default [etcd_report_interval](osd.en.md#etcd_report_interval)). After
|
||||||
|
this pause, I/O resumes, but new data is temporarily written in only 1 copy.
|
||||||
|
Then, after osd_out_time, the monitor also selects replacement OSDs and the
|
||||||
|
pool starts to heal.
|
||||||
|
|
||||||
|
So, pg_minsize regulates the number of failures that a pool can tolerate
|
||||||
|
without temporary downtime for [osd_out_time](monitor.en.md#osd_out_time),
|
||||||
|
but at a cost of slightly reduced storage reliability.
|
||||||
|
|
||||||
FIXME: pg_minsize behaviour may be changed in the future to only make PGs
|
FIXME: pg_minsize behaviour may be changed in the future to only make PGs
|
||||||
read-only instead of deactivating them.
|
read-only instead of deactivating them.
|
||||||
|
@ -168,8 +185,8 @@ read-only instead of deactivating them.
|
||||||
Number of PGs for this pool. The value should be big enough for the monitor /
|
Number of PGs for this pool. The value should be big enough for the monitor /
|
||||||
LP solver to be able to optimize data placement.
|
LP solver to be able to optimize data placement.
|
||||||
|
|
||||||
"Enough" is usually around 64-128 PGs per OSD, i.e. you set pg_count for pool
|
"Enough" is usually around 10-100 PGs per OSD, i.e. you set pg_count for pool
|
||||||
to (total OSD count * 100 / pg_size). You can round it to the closest power of 2,
|
to (total OSD count * 10 / pg_size). You can round it to the closest power of 2,
|
||||||
because it makes it easier to reduce or increase PG count later by dividing or
|
because it makes it easier to reduce or increase PG count later by dividing or
|
||||||
multiplying it by 2.
|
multiplying it by 2.
|
||||||
|
|
||||||
|
|
|
@ -157,9 +157,25 @@
|
||||||
OSD, PG деактивируется на чтение и запись. Иными словами, всегда известно,
|
OSD, PG деактивируется на чтение и запись. Иными словами, всегда известно,
|
||||||
что новые блоки данных всегда записываются как минимум на pg_minsize дисков.
|
что новые блоки данных всегда записываются как минимум на pg_minsize дисков.
|
||||||
|
|
||||||
По сути, разница pg_size и pg_minsize задаёт число отказов дисков, которые пул
|
Для примера, разница между pg_minsize 2 и 1 в реплицированном пуле с 3 копиями
|
||||||
может пережить без временной (на [osd_out_time](monitor.ru.md#osd_out_time))
|
данных (pg_size=3), проявляется следующим образом:
|
||||||
остановки обслуживания.
|
- Если 2 сервера отключаются при pg_minsize=2, пул становится неактивным и
|
||||||
|
остаётся неактивным в течение [osd_out_time](monitor.en.md#osd_out_time)
|
||||||
|
(10 минут), после чего монитор назначает другие OSD/серверы на замену, пул
|
||||||
|
поднимается и начинает восстанавливать недостающие копии данных. Соответственно,
|
||||||
|
если OSD на замену нет - то есть, если у вас всего 3 сервера с OSD и 2 из них
|
||||||
|
недоступны - пул так и остаётся недоступным до тех пор, пока вы не вернёте
|
||||||
|
или не добавите хотя бы 1 сервер (или не переключите failure_domain на "osd").
|
||||||
|
- Если 2 сервера отключаются при pg_minsize=1, ввод-вывод лишь приостанавливается
|
||||||
|
на короткое время, до тех пор, пока монитор не поймёт, что OSD отключены
|
||||||
|
(что занимает 5-10 секунд при стандартном [etcd_report_interval](osd.en.md#etcd_report_interval)).
|
||||||
|
После этого ввод-вывод восстанавливается, но новые данные временно пишутся
|
||||||
|
всего в 1 копии. Когда же проходит osd_out_time, монитор точно так же назначает
|
||||||
|
другие OSD на замену выбывшим и пул начинает восстанавливать копии данных.
|
||||||
|
|
||||||
|
То есть, pg_minsize регулирует число отказов, которые пул может пережить без
|
||||||
|
временной остановки обслуживания на [osd_out_time](monitor.ru.md#osd_out_time),
|
||||||
|
но ценой немного пониженных гарантий надёжности.
|
||||||
|
|
||||||
FIXME: Поведение pg_minsize может быть изменено в будущем с полной деактивации
|
FIXME: Поведение pg_minsize может быть изменено в будущем с полной деактивации
|
||||||
PG на перевод их в режим только для чтения.
|
PG на перевод их в режим только для чтения.
|
||||||
|
@ -172,8 +188,8 @@ PG на перевод их в режим только для чтения.
|
||||||
Число PG для данного пула. Число должно быть достаточно большим, чтобы монитор
|
Число PG для данного пула. Число должно быть достаточно большим, чтобы монитор
|
||||||
мог равномерно распределить по ним данные.
|
мог равномерно распределить по ним данные.
|
||||||
|
|
||||||
Обычно это означает примерно 64-128 PG на 1 OSD, т.е. pg_count можно устанавливать
|
Обычно это означает примерно 10-100 PG на 1 OSD, т.е. pg_count можно устанавливать
|
||||||
равным (общему числу OSD * 100 / pg_size). Значение можно округлить до ближайшей
|
равным (общему числу OSD * 10 / pg_size). Значение можно округлить до ближайшей
|
||||||
степени 2, чтобы потом было легче уменьшать или увеличивать число PG, умножая
|
степени 2, чтобы потом было легче уменьшать или увеличивать число PG, умножая
|
||||||
или деля его на 2.
|
или деля его на 2.
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,27 @@
|
||||||
|
- name: client_retry_interval
|
||||||
|
type: ms
|
||||||
|
min: 10
|
||||||
|
default: 50
|
||||||
|
online: true
|
||||||
|
info: |
|
||||||
|
Retry time for I/O requests failed due to inactive PGs or network
|
||||||
|
connectivity errors.
|
||||||
|
info_ru: |
|
||||||
|
Время повтора запросов ввода-вывода, неудачных из-за неактивных PG или
|
||||||
|
ошибок сети.
|
||||||
|
- name: client_eio_retry_interval
|
||||||
|
type: ms
|
||||||
|
default: 1000
|
||||||
|
online: true
|
||||||
|
info: |
|
||||||
|
Retry time for I/O requests failed due to data corruption or unfinished
|
||||||
|
EC object deletions (has_incomplete PG state). 0 disables such retries
|
||||||
|
and clients are not blocked and just get EIO error code instead.
|
||||||
|
info_ru: |
|
||||||
|
Время повтора запросов ввода-вывода, неудачных из-за повреждения данных
|
||||||
|
или незавершённых удалений EC-объектов (состояния PG has_incomplete).
|
||||||
|
0 отключает повторы таких запросов и клиенты не блокируются, а вместо
|
||||||
|
этого просто получают код ошибки EIO.
|
||||||
- name: client_max_dirty_bytes
|
- name: client_max_dirty_bytes
|
||||||
type: int
|
type: int
|
||||||
default: 33554432
|
default: 33554432
|
||||||
|
|
|
@ -243,21 +243,6 @@
|
||||||
Максимальное время ожидания ответа на запрос проверки состояния соединения.
|
Максимальное время ожидания ответа на запрос проверки состояния соединения.
|
||||||
Если OSD не отвечает за это время, соединение отключается и производится
|
Если OSD не отвечает за это время, соединение отключается и производится
|
||||||
повторная попытка соединения.
|
повторная попытка соединения.
|
||||||
- name: up_wait_retry_interval
|
|
||||||
type: ms
|
|
||||||
min: 10
|
|
||||||
default: 50
|
|
||||||
online: true
|
|
||||||
info: |
|
|
||||||
OSDs respond to clients with a special error code when they receive I/O
|
|
||||||
requests for a PG that's not synchronized and started. This parameter sets
|
|
||||||
the time for the clients to wait before re-attempting such I/O requests.
|
|
||||||
info_ru: |
|
|
||||||
Когда OSD получают от клиентов запросы ввода-вывода, относящиеся к не
|
|
||||||
поднятым на данный момент на них PG, либо к PG в процессе синхронизации,
|
|
||||||
они отвечают клиентам специальным кодом ошибки, означающим, что клиент
|
|
||||||
должен некоторое время подождать перед повторением запроса. Именно это время
|
|
||||||
ожидания задаёт данный параметр.
|
|
||||||
- name: max_etcd_attempts
|
- name: max_etcd_attempts
|
||||||
type: int
|
type: int
|
||||||
default: 5
|
default: 5
|
||||||
|
|
|
@ -75,18 +75,16 @@ On the monitor hosts:
|
||||||
|
|
||||||
## Create a pool
|
## Create a pool
|
||||||
|
|
||||||
Create pool configuration in etcd:
|
Create a pool using vitastor-cli:
|
||||||
|
|
||||||
```
|
```
|
||||||
etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool",
|
vitastor-cli create-pool testpool --pg_size 2 --pg_count 256
|
||||||
"scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For EC pools the configuration should look like the following:
|
For EC pools the configuration should look like the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
|
vitastor-cli create-pool testpool --ec 2+2 --pg_count 256
|
||||||
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}}'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
After you do this, one of the monitors will configure PGs and OSDs will start them.
|
After you do this, one of the monitors will configure PGs and OSDs will start them.
|
||||||
|
|
|
@ -77,18 +77,16 @@
|
||||||
|
|
||||||
## Создайте пул
|
## Создайте пул
|
||||||
|
|
||||||
Создайте конфигурацию пула с помощью etcdctl:
|
Создайте пул с помощью vitastor-cli:
|
||||||
|
|
||||||
```
|
```
|
||||||
etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool",
|
vitastor-cli create-pool testpool --pg_size 2 --pg_count 256
|
||||||
"scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Для пулов с кодами коррекции ошибок конфигурация должна выглядеть примерно так:
|
Для пулов с кодами коррекции ошибок конфигурация должна выглядеть примерно так:
|
||||||
|
|
||||||
```
|
```
|
||||||
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
|
vitastor-cli create-pool testpool --ec 2+2 --pg_count 256
|
||||||
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}}'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
После этого один из мониторов должен сконфигурировать PG, а OSD должны запустить их.
|
После этого один из мониторов должен сконфигурировать PG, а OSD должны запустить их.
|
||||||
|
|
|
@ -24,6 +24,10 @@ It supports the following commands:
|
||||||
- [fix](#fix)
|
- [fix](#fix)
|
||||||
- [alloc-osd](#alloc-osd)
|
- [alloc-osd](#alloc-osd)
|
||||||
- [rm-osd](#rm-osd)
|
- [rm-osd](#rm-osd)
|
||||||
|
- [create-pool](#create-pool)
|
||||||
|
- [modify-pool](#modify-pool)
|
||||||
|
- [ls-pools](#ls-pools)
|
||||||
|
- [rm-pool](#rm-pool)
|
||||||
|
|
||||||
Global options:
|
Global options:
|
||||||
|
|
||||||
|
@ -131,19 +135,18 @@ See also about [how to export snapshots](qemu.en.md#exporting-snapshots).
|
||||||
|
|
||||||
## modify
|
## modify
|
||||||
|
|
||||||
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]`
|
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force] [--down-ok]`
|
||||||
|
|
||||||
Rename, resize image or change its readonly status. Images with children can't be made read-write.
|
Rename, resize image or change its readonly status. Images with children can't be made read-write.
|
||||||
If the new size is smaller than the old size, extra data will be purged.
|
If the new size is smaller than the old size, extra data will be purged.
|
||||||
You should resize file system in the image, if present, before shrinking it.
|
You should resize file system in the image, if present, before shrinking it.
|
||||||
|
|
||||||
```
|
| `-f|--force` | Proceed with shrinking or setting readwrite flag even if the image has children. |
|
||||||
-f|--force Proceed with shrinking or setting readwrite flag even if the image has children.
|
| `--down-ok` | Proceed with shrinking even if some data will be left on unavailable OSDs. |
|
||||||
```
|
|
||||||
|
|
||||||
## rm
|
## rm
|
||||||
|
|
||||||
`vitastor-cli rm <from> [<to>] [--writers-stopped]`
|
`vitastor-cli rm <from> [<to>] [--writers-stopped] [--down-ok]`
|
||||||
|
|
||||||
Remove `<from>` or all layers between `<from>` and `<to>` (`<to>` must be a child of `<from>`),
|
Remove `<from>` or all layers between `<from>` and `<to>` (`<to>` must be a child of `<from>`),
|
||||||
rebasing all their children accordingly. --writers-stopped allows merging to be a bit
|
rebasing all their children accordingly. --writers-stopped allows merging to be a bit
|
||||||
|
@ -151,6 +154,10 @@ more effective in case of a single 'slim' read-write child and 'fat' removed par
|
||||||
the child is merged into parent and parent is renamed to child in that case.
|
the child is merged into parent and parent is renamed to child in that case.
|
||||||
In other cases parent layers are always merged into children.
|
In other cases parent layers are always merged into children.
|
||||||
|
|
||||||
|
Other options:
|
||||||
|
|
||||||
|
| `--down-ok` | Continue deletion/merging even if some data will be left on unavailable OSDs. |
|
||||||
|
|
||||||
## flatten
|
## flatten
|
||||||
|
|
||||||
`vitastor-cli flatten <layer>`
|
`vitastor-cli flatten <layer>`
|
||||||
|
@ -238,3 +245,84 @@ Refuses to remove OSDs with data without `--force` and `--allow-data-loss`.
|
||||||
|
|
||||||
With `--dry-run` only checks if deletion is possible without data loss and
|
With `--dry-run` only checks if deletion is possible without data loss and
|
||||||
redundancy degradation.
|
redundancy degradation.
|
||||||
|
|
||||||
|
## create-pool
|
||||||
|
|
||||||
|
`vitastor-cli create-pool|pool-create <name> (-s <pg_size>|--ec <N>+<K>) -n <pg_count> [OPTIONS]`
|
||||||
|
|
||||||
|
Create a pool. Required parameters:
|
||||||
|
|
||||||
|
| `-s|--pg_size R` | Number of replicas for replicated pools |
|
||||||
|
| `--ec N+K` | Number of data (N) and parity (K) chunks for erasure-coded pools |
|
||||||
|
| `-n|--pg_count N` | PG count for the new pool (start with 10*<OSD count>/pg_size rounded to a power of 2) |
|
||||||
|
|
||||||
|
Optional parameters:
|
||||||
|
|
||||||
|
| `--pg_minsize <number>` | R or N+K minus number of failures to tolerate without downtime ([details](../config/pool.en.md#pg_minsize)) |
|
||||||
|
| `--failure_domain host` | Failure domain: host, osd or a level from placement_levels. Default: host |
|
||||||
|
| `--root_node <node>` | Put pool only on child OSDs of this placement tree node |
|
||||||
|
| `--osd_tags <tag>[,<tag>]...` | Put pool only on OSDs tagged with all specified tags |
|
||||||
|
| `--block_size 128k` | Put pool only on OSDs with this data block size |
|
||||||
|
| `--bitmap_granularity 4k` | Put pool only on OSDs with this logical sector size |
|
||||||
|
| `--immediate_commit none` | Put pool only on OSDs with this or larger immediate_commit (none < small < all) |
|
||||||
|
| `--primary_affinity_tags tags` | Prefer to put primary copies on OSDs with all specified tags |
|
||||||
|
| `--scrub_interval <time>` | Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y |
|
||||||
|
| `--pg_stripe_size <number>` | Increase object grouping stripe |
|
||||||
|
| `--max_osd_combinations 10000` | Maximum number of random combinations for LP solver input |
|
||||||
|
| `--wait` | Wait for the new pool to come online |
|
||||||
|
| `-f|--force` | Do not check that cluster has enough OSDs to create the pool |
|
||||||
|
|
||||||
|
See also [Pool configuration](../config/pool.en.md) for detailed parameter descriptions.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
`vitastor-cli create-pool test_x4 -s 4 -n 32`
|
||||||
|
|
||||||
|
`vitastor-cli create-pool test_ec42 --ec 4+2 -n 32`
|
||||||
|
|
||||||
|
## modify-pool
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool|pool-modify <id|name> [--name <new_name>] [PARAMETERS...]`
|
||||||
|
|
||||||
|
Modify an existing pool. Modifiable parameters:
|
||||||
|
|
||||||
|
```
|
||||||
|
[-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]
|
||||||
|
[--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>]
|
||||||
|
[--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]
|
||||||
|
```
|
||||||
|
|
||||||
|
Non-modifiable parameters (changing them WILL lead to data loss):
|
||||||
|
|
||||||
|
```
|
||||||
|
[--block_size <size>] [--bitmap_granularity <size>]
|
||||||
|
[--immediate_commit <all|small|none>] [--pg_stripe_size <size>]
|
||||||
|
```
|
||||||
|
|
||||||
|
These, however, can still be modified with -f|--force.
|
||||||
|
|
||||||
|
See [create-pool](#create-pool) for parameter descriptions.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool pool_A --name pool_B`
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool 2 --pg_size 4 -n 128`
|
||||||
|
|
||||||
|
## rm-pool
|
||||||
|
|
||||||
|
`vitastor-cli rm-pool|pool-rm [--force] <id|name>`
|
||||||
|
|
||||||
|
Remove a pool. Refuses to remove pools with images without `--force`.
|
||||||
|
|
||||||
|
## ls-pools
|
||||||
|
|
||||||
|
`vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]`
|
||||||
|
|
||||||
|
List pools (only matching <glob> patterns if passed).
|
||||||
|
|
||||||
|
| `-l|--long` | Also report I/O statistics |
|
||||||
|
| `--detail` | Use list format (not table), show all details |
|
||||||
|
| `--sort FIELD` | Sort by specified field (see fields in --json output) |
|
||||||
|
| `-r|--reverse` | Sort in descending order |
|
||||||
|
| `-n|--count N` | Only list first N items |
|
||||||
|
|
|
@ -23,6 +23,10 @@ vitastor-cli - интерфейс командной строки для адм
|
||||||
- [merge-data](#merge-data)
|
- [merge-data](#merge-data)
|
||||||
- [alloc-osd](#alloc-osd)
|
- [alloc-osd](#alloc-osd)
|
||||||
- [rm-osd](#rm-osd)
|
- [rm-osd](#rm-osd)
|
||||||
|
- [create-pool](#create-pool)
|
||||||
|
- [modify-pool](#modify-pool)
|
||||||
|
- [ls-pools](#ls-pools)
|
||||||
|
- [rm-pool](#rm-pool)
|
||||||
|
|
||||||
Глобальные опции:
|
Глобальные опции:
|
||||||
|
|
||||||
|
@ -85,8 +89,8 @@ kaveri 2/1 32 0 B 10 G 0 B 100% 0%
|
||||||
|
|
||||||
`vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]`
|
`vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]`
|
||||||
|
|
||||||
Показать список образов, если переданы шаблоны `<glob>`, то только с именами,
|
Показать список образов, если передан(ы) шаблон(ы) `<glob>`, то только с именами,
|
||||||
соответствующими этим шаблонам (стандартные ФС-шаблоны с * и ?).
|
соответствующими одному из шаблонов (стандартные ФС-шаблоны с * и ?).
|
||||||
|
|
||||||
Опции:
|
Опции:
|
||||||
|
|
||||||
|
@ -132,7 +136,7 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
||||||
|
|
||||||
## modify
|
## modify
|
||||||
|
|
||||||
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]`
|
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force] [--down-ok]`
|
||||||
|
|
||||||
Изменить размер, имя образа или флаг "только для чтения". Снимать флаг "только для чтения"
|
Изменить размер, имя образа или флаг "только для чтения". Снимать флаг "только для чтения"
|
||||||
и уменьшать размер образов, у которых есть дочерние клоны, без `--force` нельзя.
|
и уменьшать размер образов, у которых есть дочерние клоны, без `--force` нельзя.
|
||||||
|
@ -140,13 +144,12 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
||||||
Если новый размер меньше старого, "лишние" данные будут удалены, поэтому перед уменьшением
|
Если новый размер меньше старого, "лишние" данные будут удалены, поэтому перед уменьшением
|
||||||
образа сначала уменьшите файловую систему в нём.
|
образа сначала уменьшите файловую систему в нём.
|
||||||
|
|
||||||
```
|
| -f|--force | Разрешить уменьшение или перевод в чтение-запись образа, у которого есть клоны. |
|
||||||
-f|--force Разрешить уменьшение или перевод в чтение-запись образа, у которого есть клоны.
|
| --down-ok | Разрешить уменьшение, даже если часть данных останется неудалённой на недоступных OSD. |
|
||||||
```
|
|
||||||
|
|
||||||
## rm
|
## rm
|
||||||
|
|
||||||
`vitastor-cli rm <from> [<to>] [--writers-stopped]`
|
`vitastor-cli rm <from> [<to>] [--writers-stopped] [--down-ok]`
|
||||||
|
|
||||||
Удалить образ `<from>` или все слои от `<from>` до `<to>` (`<to>` должен быть дочерним
|
Удалить образ `<from>` или все слои от `<from>` до `<to>` (`<to>` должен быть дочерним
|
||||||
образом `<from>`), одновременно меняя родительские образы их клонов (если таковые есть).
|
образом `<from>`), одновременно меняя родительские образы их клонов (если таковые есть).
|
||||||
|
@ -158,6 +161,10 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
||||||
|
|
||||||
В других случаях родительские слои вливаются в дочерние.
|
В других случаях родительские слои вливаются в дочерние.
|
||||||
|
|
||||||
|
Другие опции:
|
||||||
|
|
||||||
|
| `--down-ok` | Продолжать удаление/слияние, даже если часть данных останется неудалённой на недоступных OSD. |
|
||||||
|
|
||||||
## flatten
|
## flatten
|
||||||
|
|
||||||
`vitastor-cli flatten <layer>`
|
`vitastor-cli flatten <layer>`
|
||||||
|
@ -255,3 +262,85 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
||||||
|
|
||||||
С опцией `--dry-run` только проверяет, возможно ли удаление без потери данных и деградации
|
С опцией `--dry-run` только проверяет, возможно ли удаление без потери данных и деградации
|
||||||
избыточности.
|
избыточности.
|
||||||
|
|
||||||
|
## create-pool
|
||||||
|
|
||||||
|
`vitastor-cli create-pool|pool-create <name> (-s <pg_size>|--ec <N>+<K>) -n <pg_count> [OPTIONS]`
|
||||||
|
|
||||||
|
Создать пул. Обязательные параметры:
|
||||||
|
|
||||||
|
| `-s|--pg_size R` | Число копий данных для реплицированных пулов |
|
||||||
|
| `--ec N+K` | Число частей данных (N) и чётности (K) для пулов с кодами коррекции ошибок |
|
||||||
|
| `-n|--pg_count N` | Число PG для нового пула (начните с 10*<число OSD>/pg_size, округлённого до степени двойки) |
|
||||||
|
|
||||||
|
Необязательные параметры:
|
||||||
|
|
||||||
|
| `--pg_minsize <number>` | (R или N+K) минус число разрешённых отказов без остановки пула ([подробнее](../config/pool.ru.md#pg_minsize)) |
|
||||||
|
| `--failure_domain host` | Домен отказа: host, osd или другой из placement_levels. По умолчанию: host |
|
||||||
|
| `--root_node <node>` | Использовать для пула только дочерние OSD этого узла дерева размещения |
|
||||||
|
| `--osd_tags <tag>[,<tag>]...` | ...только OSD со всеми заданными тегами |
|
||||||
|
| `--block_size 128k` | ...только OSD с данным размером блока |
|
||||||
|
| `--bitmap_granularity 4k` | ...только OSD с данным размером логического сектора |
|
||||||
|
| `--immediate_commit none` | ...только OSD с этим или большим immediate_commit (none < small < all) |
|
||||||
|
| `--primary_affinity_tags tags` | Предпочитать OSD со всеми данными тегами для роли первичных |
|
||||||
|
| `--scrub_interval <time>` | Включить скрабы с заданным интервалом времени (число + единица s/m/h/d/M/y) |
|
||||||
|
| `--pg_stripe_size <number>` | Увеличить блок группировки объектов по PG |
|
||||||
|
| `--max_osd_combinations 10000` | Максимальное число случайных комбинаций OSD для ЛП-солвера |
|
||||||
|
| `--wait` | Подождать, пока новый пул будет активирован |
|
||||||
|
| `-f|--force` | Не проверять, что в кластере достаточно доменов отказа для создания пула |
|
||||||
|
|
||||||
|
Подробно о параметрах см. [Конфигурация пулов](../config/pool.ru.md).
|
||||||
|
|
||||||
|
Примеры:
|
||||||
|
|
||||||
|
`vitastor-cli create-pool test_x4 -s 4 -n 32`
|
||||||
|
|
||||||
|
`vitastor-cli create-pool test_ec42 --ec 4+2 -n 32`
|
||||||
|
|
||||||
|
## modify-pool
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool|pool-modify <id|name> [--name <new_name>] [PARAMETERS...]`
|
||||||
|
|
||||||
|
Изменить настройки существующего пула. Изменяемые параметры:
|
||||||
|
|
||||||
|
```
|
||||||
|
[-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]
|
||||||
|
[--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>]
|
||||||
|
[--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]
|
||||||
|
```
|
||||||
|
|
||||||
|
Неизменяемые параметры (их изменение ПРИВЕДЁТ к потере данных):
|
||||||
|
|
||||||
|
```
|
||||||
|
[--block_size <size>] [--bitmap_granularity <size>]
|
||||||
|
[--immediate_commit <all|small|none>] [--pg_stripe_size <size>]
|
||||||
|
```
|
||||||
|
|
||||||
|
Эти параметры можно изменить, только если явно передать опцию -f или --force.
|
||||||
|
|
||||||
|
Описания параметров смотрите в [create-pool](#create-pool).
|
||||||
|
|
||||||
|
Примеры:
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool pool_A --name pool_B`
|
||||||
|
|
||||||
|
`vitastor-cli modify-pool 2 --pg_size 4 -n 128`
|
||||||
|
|
||||||
|
## rm-pool
|
||||||
|
|
||||||
|
`vitastor-cli rm-pool|pool-rm [--force] <id|name>`
|
||||||
|
|
||||||
|
Удалить пул. Отказывается удалять пул, в котором ещё есть образы, без `--force`.
|
||||||
|
|
||||||
|
## ls-pools
|
||||||
|
|
||||||
|
`vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]`
|
||||||
|
|
||||||
|
Показать список пулов. Если передан(ы) шаблон(ы) `<glob>`, то только с именами,
|
||||||
|
соответствующими одному из шаблонов (стандартные ФС-шаблоны с * и ?).
|
||||||
|
|
||||||
|
| `-l|--long` | Вывести также статистику ввода-вывода |
|
||||||
|
| `--detail` | Максимально подробный вывод в виде списка (а не таблицы) |
|
||||||
|
| `--sort FIELD` | Сортировать по заданному полю (поля см. в выводе с --json) |
|
||||||
|
| `-r|--reverse` | Сортировать в обратном порядке |
|
||||||
|
| `-n|--count N` | Выводить только первые N записей |
|
||||||
|
|
|
@ -86,13 +86,14 @@ const etcd_tree = {
|
||||||
client_max_buffered_bytes: 33554432,
|
client_max_buffered_bytes: 33554432,
|
||||||
client_max_buffered_ops: 1024,
|
client_max_buffered_ops: 1024,
|
||||||
client_max_writeback_iodepth: 256,
|
client_max_writeback_iodepth: 256,
|
||||||
|
client_retry_interval: 50, // ms. min: 10
|
||||||
|
client_eio_retry_interval: 1000, // ms
|
||||||
// client and osd - configurable online
|
// client and osd - configurable online
|
||||||
log_level: 0,
|
log_level: 0,
|
||||||
peer_connect_interval: 5, // seconds. min: 1
|
peer_connect_interval: 5, // seconds. min: 1
|
||||||
peer_connect_timeout: 5, // seconds. min: 1
|
peer_connect_timeout: 5, // seconds. min: 1
|
||||||
osd_idle_timeout: 5, // seconds. min: 1
|
osd_idle_timeout: 5, // seconds. min: 1
|
||||||
osd_ping_timeout: 5, // seconds. min: 1
|
osd_ping_timeout: 5, // seconds. min: 1
|
||||||
up_wait_retry_interval: 50, // ms. min: 10
|
|
||||||
max_etcd_attempts: 5,
|
max_etcd_attempts: 5,
|
||||||
etcd_quick_timeout: 1000, // ms
|
etcd_quick_timeout: 1000, // ms
|
||||||
etcd_slow_timeout: 5000, // ms
|
etcd_slow_timeout: 5000, // ms
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "vitastor-mon",
|
"name": "vitastor-mon",
|
||||||
"version": "1.4.7",
|
"version": "1.4.8",
|
||||||
"description": "Vitastor SDS monitor service",
|
"description": "Vitastor SDS monitor service",
|
||||||
"main": "mon-main.js",
|
"main": "mon-main.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|
|
@ -50,7 +50,7 @@ from cinder.volume import configuration
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume import volume_utils
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
VERSION = '1.4.7'
|
VERSION = '1.4.8'
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,692 @@
|
||||||
|
commit d85024bd803b3b91f15578ed22de4ce31856626f
|
||||||
|
Author: Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||||
|
Date: Wed Jan 24 18:07:43 2024 +0300
|
||||||
|
|
||||||
|
Add Vitastor support
|
||||||
|
|
||||||
|
diff --git a/docs/schemas/domaincommon.rng b/docs/schemas/domaincommon.rng
|
||||||
|
index 7fa5c2b8b5..2d77f391e7 100644
|
||||||
|
--- a/docs/schemas/domaincommon.rng
|
||||||
|
+++ b/docs/schemas/domaincommon.rng
|
||||||
|
@@ -1898,6 +1898,35 @@
|
||||||
|
</element>
|
||||||
|
</define>
|
||||||
|
|
||||||
|
+ <define name="diskSourceNetworkProtocolVitastor">
|
||||||
|
+ <element name="source">
|
||||||
|
+ <interleave>
|
||||||
|
+ <attribute name="protocol">
|
||||||
|
+ <value>vitastor</value>
|
||||||
|
+ </attribute>
|
||||||
|
+ <ref name="diskSourceCommon"/>
|
||||||
|
+ <optional>
|
||||||
|
+ <attribute name="name"/>
|
||||||
|
+ </optional>
|
||||||
|
+ <optional>
|
||||||
|
+ <attribute name="query"/>
|
||||||
|
+ </optional>
|
||||||
|
+ <zeroOrMore>
|
||||||
|
+ <ref name="diskSourceNetworkHost"/>
|
||||||
|
+ </zeroOrMore>
|
||||||
|
+ <optional>
|
||||||
|
+ <element name="config">
|
||||||
|
+ <attribute name="file">
|
||||||
|
+ <ref name="absFilePath"/>
|
||||||
|
+ </attribute>
|
||||||
|
+ <empty/>
|
||||||
|
+ </element>
|
||||||
|
+ </optional>
|
||||||
|
+ <empty/>
|
||||||
|
+ </interleave>
|
||||||
|
+ </element>
|
||||||
|
+ </define>
|
||||||
|
+
|
||||||
|
<define name="diskSourceNetworkProtocolISCSI">
|
||||||
|
<element name="source">
|
||||||
|
<attribute name="protocol">
|
||||||
|
@@ -2154,6 +2183,7 @@
|
||||||
|
<ref name="diskSourceNetworkProtocolSimple"/>
|
||||||
|
<ref name="diskSourceNetworkProtocolVxHS"/>
|
||||||
|
<ref name="diskSourceNetworkProtocolNFS"/>
|
||||||
|
+ <ref name="diskSourceNetworkProtocolVitastor"/>
|
||||||
|
</choice>
|
||||||
|
</define>
|
||||||
|
|
||||||
|
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
|
||||||
|
index f89856b93e..a8cb9387e2 100644
|
||||||
|
--- a/include/libvirt/libvirt-storage.h
|
||||||
|
+++ b/include/libvirt/libvirt-storage.h
|
||||||
|
@@ -246,6 +246,7 @@ typedef enum {
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17,
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18,
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19,
|
||||||
|
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR = 1 << 20,
|
||||||
|
} virConnectListAllStoragePoolsFlags;
|
||||||
|
|
||||||
|
int virConnectListAllStoragePools(virConnectPtr conn,
|
||||||
|
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
||||||
|
index 5691b8d2d5..6669e8451d 100644
|
||||||
|
--- a/src/conf/domain_conf.c
|
||||||
|
+++ b/src/conf/domain_conf.c
|
||||||
|
@@ -8293,7 +8293,8 @@ virDomainDiskSourceNetworkParse(xmlNodePtr node,
|
||||||
|
src->configFile = virXPathString("string(./config/@file)", ctxt);
|
||||||
|
|
||||||
|
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
|
||||||
|
- src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
|
||||||
|
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
|
||||||
|
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_VITASTOR)
|
||||||
|
src->query = virXMLPropString(node, "query");
|
||||||
|
|
||||||
|
if (virDomainStorageNetworkParseHosts(node, ctxt, &src->hosts, &src->nhosts) < 0)
|
||||||
|
@@ -31267,6 +31268,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSource *src,
|
||||||
|
|
||||||
|
case VIR_STORAGE_POOL_MPATH:
|
||||||
|
case VIR_STORAGE_POOL_RBD:
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_POOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_POOL_LAST:
|
||||||
|
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
|
||||||
|
index a4271f1247..621c1b7b31 100644
|
||||||
|
--- a/src/conf/domain_validate.c
|
||||||
|
+++ b/src/conf/domain_validate.c
|
||||||
|
@@ -508,7 +508,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- /* internal snapshots and config files are currently supported only with rbd: */
|
||||||
|
+ /* internal snapshots are currently supported only with rbd: */
|
||||||
|
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||||
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD) {
|
||||||
|
if (src->snapshot) {
|
||||||
|
@@ -517,11 +517,15 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||||
|
"only with 'rbd' disks"));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
-
|
||||||
|
+ }
|
||||||
|
+ /* config files are currently supported only with rbd and vitastor: */
|
||||||
|
+ if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||||
|
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD &&
|
||||||
|
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR) {
|
||||||
|
if (src->configFile) {
|
||||||
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||||
|
_("<config> element is currently supported "
|
||||||
|
- "only with 'rbd' disks"));
|
||||||
|
+ "only with 'rbd' and 'vitastor' disks"));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
|
||||||
|
index 6690d26ffd..2255df9d28 100644
|
||||||
|
--- a/src/conf/storage_conf.c
|
||||||
|
+++ b/src/conf/storage_conf.c
|
||||||
|
@@ -60,7 +60,7 @@ VIR_ENUM_IMPL(virStoragePool,
|
||||||
|
"logical", "disk", "iscsi",
|
||||||
|
"iscsi-direct", "scsi", "mpath",
|
||||||
|
"rbd", "sheepdog", "gluster",
|
||||||
|
- "zfs", "vstorage",
|
||||||
|
+ "zfs", "vstorage", "vitastor",
|
||||||
|
);
|
||||||
|
|
||||||
|
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
|
||||||
|
@@ -246,6 +246,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
|
||||||
|
.formatToString = virStorageFileFormatTypeToString,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
+ {.poolType = VIR_STORAGE_POOL_VITASTOR,
|
||||||
|
+ .poolOptions = {
|
||||||
|
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||||
|
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
|
||||||
|
+ VIR_STORAGE_POOL_SOURCE_NAME),
|
||||||
|
+ },
|
||||||
|
+ .volOptions = {
|
||||||
|
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
|
||||||
|
+ .formatFromString = virStorageVolumeFormatFromString,
|
||||||
|
+ .formatToString = virStorageFileFormatTypeToString,
|
||||||
|
+ }
|
||||||
|
+ },
|
||||||
|
{.poolType = VIR_STORAGE_POOL_SHEEPDOG,
|
||||||
|
.poolOptions = {
|
||||||
|
.flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||||
|
@@ -546,6 +558,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
|
||||||
|
_("element 'name' is mandatory for RBD pool"));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
+ if (pool_type == VIR_STORAGE_POOL_VITASTOR && source->name == NULL) {
|
||||||
|
+ virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||||
|
+ _("element 'name' is mandatory for Vitastor pool"));
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
if (options->formatFromString) {
|
||||||
|
g_autofree char *format = NULL;
|
||||||
|
@@ -1176,6 +1193,7 @@ virStoragePoolDefFormatBuf(virBuffer *buf,
|
||||||
|
/* RBD, Sheepdog, Gluster and Iscsi-direct devices are not local block devs nor
|
||||||
|
* files, so they don't have a target */
|
||||||
|
if (def->type != VIR_STORAGE_POOL_RBD &&
|
||||||
|
+ def->type != VIR_STORAGE_POOL_VITASTOR &&
|
||||||
|
def->type != VIR_STORAGE_POOL_SHEEPDOG &&
|
||||||
|
def->type != VIR_STORAGE_POOL_GLUSTER &&
|
||||||
|
def->type != VIR_STORAGE_POOL_ISCSI_DIRECT) {
|
||||||
|
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
|
||||||
|
index aaecf138d6..97172db38b 100644
|
||||||
|
--- a/src/conf/storage_conf.h
|
||||||
|
+++ b/src/conf/storage_conf.h
|
||||||
|
@@ -106,6 +106,7 @@ typedef enum {
|
||||||
|
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
|
||||||
|
VIR_STORAGE_POOL_ZFS, /* ZFS */
|
||||||
|
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
|
||||||
|
+ VIR_STORAGE_POOL_VITASTOR, /* Vitastor */
|
||||||
|
|
||||||
|
VIR_STORAGE_POOL_LAST,
|
||||||
|
} virStoragePoolType;
|
||||||
|
@@ -466,6 +467,7 @@ VIR_ENUM_DECL(virStoragePartedFs);
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_SCSI | \
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_MPATH | \
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_RBD | \
|
||||||
|
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR | \
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG | \
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER | \
|
||||||
|
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS | \
|
||||||
|
diff --git a/src/conf/storage_source_conf.c b/src/conf/storage_source_conf.c
|
||||||
|
index d42f715f26..29d8da3d10 100644
|
||||||
|
--- a/src/conf/storage_source_conf.c
|
||||||
|
+++ b/src/conf/storage_source_conf.c
|
||||||
|
@@ -86,6 +86,7 @@ VIR_ENUM_IMPL(virStorageNetProtocol,
|
||||||
|
"ssh",
|
||||||
|
"vxhs",
|
||||||
|
"nfs",
|
||||||
|
+ "vitastor",
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1265,6 +1266,7 @@ virStorageSourceNetworkDefaultPort(virStorageNetProtocol protocol)
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||||
|
return 24007;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
/* we don't provide a default for RBD */
|
||||||
|
return 0;
|
||||||
|
diff --git a/src/conf/storage_source_conf.h b/src/conf/storage_source_conf.h
|
||||||
|
index c4a026881c..67568e9181 100644
|
||||||
|
--- a/src/conf/storage_source_conf.h
|
||||||
|
+++ b/src/conf/storage_source_conf.h
|
||||||
|
@@ -128,6 +128,7 @@ typedef enum {
|
||||||
|
VIR_STORAGE_NET_PROTOCOL_SSH,
|
||||||
|
VIR_STORAGE_NET_PROTOCOL_VXHS,
|
||||||
|
VIR_STORAGE_NET_PROTOCOL_NFS,
|
||||||
|
+ VIR_STORAGE_NET_PROTOCOL_VITASTOR,
|
||||||
|
|
||||||
|
VIR_STORAGE_NET_PROTOCOL_LAST
|
||||||
|
} virStorageNetProtocol;
|
||||||
|
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
|
||||||
|
index 02903ac487..504df599fb 100644
|
||||||
|
--- a/src/conf/virstorageobj.c
|
||||||
|
+++ b/src/conf/virstorageobj.c
|
||||||
|
@@ -1481,6 +1481,7 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
|
||||||
|
return 1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_POOL_RBD:
|
||||||
|
case VIR_STORAGE_POOL_LAST:
|
||||||
|
break;
|
||||||
|
@@ -1978,6 +1979,8 @@ virStoragePoolObjMatch(virStoragePoolObj *obj,
|
||||||
|
(obj->def->type == VIR_STORAGE_POOL_MPATH)) ||
|
||||||
|
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_RBD) &&
|
||||||
|
(obj->def->type == VIR_STORAGE_POOL_RBD)) ||
|
||||||
|
+ (MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR) &&
|
||||||
|
+ (obj->def->type == VIR_STORAGE_POOL_VITASTOR)) ||
|
||||||
|
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG) &&
|
||||||
|
(obj->def->type == VIR_STORAGE_POOL_SHEEPDOG)) ||
|
||||||
|
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER) &&
|
||||||
|
diff --git a/src/libvirt-storage.c b/src/libvirt-storage.c
|
||||||
|
index cbc522b300..b4760fa58d 100644
|
||||||
|
--- a/src/libvirt-storage.c
|
||||||
|
+++ b/src/libvirt-storage.c
|
||||||
|
@@ -92,6 +92,7 @@ virStoragePoolGetConnect(virStoragePoolPtr pool)
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_RBD
|
||||||
|
+ * VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
|
||||||
|
* VIR_CONNECT_LIST_STORAGE_POOLS_ZFS
|
||||||
|
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
|
||||||
|
index 1ac6253ad7..abe4587f94 100644
|
||||||
|
--- a/src/libxl/libxl_conf.c
|
||||||
|
+++ b/src/libxl/libxl_conf.c
|
||||||
|
@@ -962,6 +962,7 @@ libxlMakeNetworkDiskSrcStr(virStorageSource *src,
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
virReportError(VIR_ERR_NO_SUPPORT,
|
||||||
|
diff --git a/src/libxl/xen_xl.c b/src/libxl/xen_xl.c
|
||||||
|
index 7604e3d534..6453bb9776 100644
|
||||||
|
--- a/src/libxl/xen_xl.c
|
||||||
|
+++ b/src/libxl/xen_xl.c
|
||||||
|
@@ -1506,6 +1506,7 @@ xenFormatXLDiskSrcNet(virStorageSource *src)
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
virReportError(VIR_ERR_NO_SUPPORT,
|
||||||
|
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
|
||||||
|
index e5ff653a60..884ecc79ea 100644
|
||||||
|
--- a/src/qemu/qemu_block.c
|
||||||
|
+++ b/src/qemu/qemu_block.c
|
||||||
|
@@ -943,6 +943,38 @@ qemuBlockStorageSourceGetRBDProps(virStorageSource *src,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
+static virJSONValue *
|
||||||
|
+qemuBlockStorageSourceGetVitastorProps(virStorageSource *src)
|
||||||
|
+{
|
||||||
|
+ virJSONValue *ret = NULL;
|
||||||
|
+ virStorageNetHostDef *host;
|
||||||
|
+ size_t i;
|
||||||
|
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
||||||
|
+ g_autofree char *etcd = NULL;
|
||||||
|
+
|
||||||
|
+ for (i = 0; i < src->nhosts; i++) {
|
||||||
|
+ host = src->hosts + i;
|
||||||
|
+ if ((virStorageNetHostTransport)host->transport != VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+ virBufferAsprintf(&buf, i > 0 ? ",%s:%u" : "%s:%u", host->name, host->port);
|
||||||
|
+ }
|
||||||
|
+ if (src->nhosts > 0) {
|
||||||
|
+ etcd = virBufferContentAndReset(&buf);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (virJSONValueObjectCreate(&ret,
|
||||||
|
+ "S:etcd-host", etcd,
|
||||||
|
+ "S:etcd-prefix", src->query,
|
||||||
|
+ "S:config-path", src->configFile,
|
||||||
|
+ "s:image", src->path,
|
||||||
|
+ NULL) < 0)
|
||||||
|
+ return NULL;
|
||||||
|
+
|
||||||
|
+ return ret;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+
|
||||||
|
static virJSONValue *
|
||||||
|
qemuBlockStorageSourceGetSheepdogProps(virStorageSource *src)
|
||||||
|
{
|
||||||
|
@@ -1233,6 +1265,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSource *src,
|
||||||
|
return NULL;
|
||||||
|
break;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
+ driver = "vitastor";
|
||||||
|
+ if (!(fileprops = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||||
|
+ return NULL;
|
||||||
|
+ break;
|
||||||
|
+
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
driver = "sheepdog";
|
||||||
|
if (!(fileprops = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||||
|
@@ -2244,6 +2282,7 @@ qemuBlockGetBackingStoreString(virStorageSource *src,
|
||||||
|
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||||
|
@@ -2626,6 +2665,12 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSource *src,
|
||||||
|
return -1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
+ driver = "vitastor";
|
||||||
|
+ if (!(location = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||||
|
+ return -1;
|
||||||
|
+ break;
|
||||||
|
+
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
driver = "sheepdog";
|
||||||
|
if (!(location = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||||
|
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
|
||||||
|
index d822533ccb..afe2087303 100644
|
||||||
|
--- a/src/qemu/qemu_command.c
|
||||||
|
+++ b/src/qemu/qemu_command.c
|
||||||
|
@@ -1723,6 +1723,43 @@ qemuBuildNetworkDriveStr(virStorageSource *src,
|
||||||
|
ret = virBufferContentAndReset(&buf);
|
||||||
|
break;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
+ if (strchr(src->path, ':')) {
|
||||||
|
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
||||||
|
+ _("':' not allowed in Vitastor source volume name '%s'"),
|
||||||
|
+ src->path);
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ virBufferStrcat(&buf, "vitastor:image=", src->path, NULL);
|
||||||
|
+
|
||||||
|
+ if (src->nhosts > 0) {
|
||||||
|
+ virBufferAddLit(&buf, ":etcd-host=");
|
||||||
|
+ for (i = 0; i < src->nhosts; i++) {
|
||||||
|
+ if (i)
|
||||||
|
+ virBufferAddLit(&buf, ",");
|
||||||
|
+
|
||||||
|
+ /* assume host containing : is ipv6 */
|
||||||
|
+ if (strchr(src->hosts[i].name, ':'))
|
||||||
|
+ virBufferEscape(&buf, '\\', ":", "[%s]",
|
||||||
|
+ src->hosts[i].name);
|
||||||
|
+ else
|
||||||
|
+ virBufferAsprintf(&buf, "%s", src->hosts[i].name);
|
||||||
|
+
|
||||||
|
+ if (src->hosts[i].port)
|
||||||
|
+ virBufferAsprintf(&buf, "\\:%u", src->hosts[i].port);
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (src->configFile)
|
||||||
|
+ virBufferEscape(&buf, '\\', ":", ":config-path=%s", src->configFile);
|
||||||
|
+
|
||||||
|
+ if (src->query)
|
||||||
|
+ virBufferEscape(&buf, '\\', ":", ":etcd-prefix=%s", src->query);
|
||||||
|
+
|
||||||
|
+ ret = virBufferContentAndReset(&buf);
|
||||||
|
+ break;
|
||||||
|
+
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||||
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||||
|
_("VxHS protocol does not support URI syntax"));
|
||||||
|
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
||||||
|
index a8401bac30..3dc1fe6db0 100644
|
||||||
|
--- a/src/qemu/qemu_domain.c
|
||||||
|
+++ b/src/qemu/qemu_domain.c
|
||||||
|
@@ -4731,7 +4731,8 @@ qemuDomainValidateStorageSource(virStorageSource *src,
|
||||||
|
if (src->query &&
|
||||||
|
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
||||||
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
||||||
|
- src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
||||||
|
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
||||||
|
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR))) {
|
||||||
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
||||||
|
_("query is supported only with HTTP(S) protocols"));
|
||||||
|
return -1;
|
||||||
|
@@ -9919,6 +9920,7 @@ qemuDomainPrepareStorageSourceTLS(virStorageSource *src,
|
||||||
|
break;
|
||||||
|
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||||
|
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
|
||||||
|
index f92e00f9c0..854a3fbc90 100644
|
||||||
|
--- a/src/qemu/qemu_snapshot.c
|
||||||
|
+++ b/src/qemu/qemu_snapshot.c
|
||||||
|
@@ -393,6 +393,7 @@ qemuSnapshotPrepareDiskExternalInactive(virDomainSnapshotDiskDef *snapdisk,
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||||
|
@@ -485,6 +486,7 @@ qemuSnapshotPrepareDiskExternalActive(virDomainObj *vm,
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_HTTP:
|
||||||
|
@@ -638,6 +640,7 @@ qemuSnapshotPrepareDiskInternal(virDomainDiskDef *disk,
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||||
|
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
|
||||||
|
index 4df2c75a2b..5a5e48ef71 100644
|
||||||
|
--- a/src/storage/storage_driver.c
|
||||||
|
+++ b/src/storage/storage_driver.c
|
||||||
|
@@ -1643,6 +1643,7 @@ storageVolLookupByPathCallback(virStoragePoolObj *obj,
|
||||||
|
|
||||||
|
case VIR_STORAGE_POOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_POOL_RBD:
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_POOL_ZFS:
|
||||||
|
case VIR_STORAGE_POOL_LAST:
|
||||||
|
diff --git a/src/storage_file/storage_source_backingstore.c b/src/storage_file/storage_source_backingstore.c
|
||||||
|
index e48ae725ab..2017ccc88c 100644
|
||||||
|
--- a/src/storage_file/storage_source_backingstore.c
|
||||||
|
+++ b/src/storage_file/storage_source_backingstore.c
|
||||||
|
@@ -284,6 +284,75 @@ virStorageSourceParseRBDColonString(const char *rbdstr,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
+static int
|
||||||
|
+virStorageSourceParseVitastorColonString(const char *colonstr,
|
||||||
|
+ virStorageSource *src)
|
||||||
|
+{
|
||||||
|
+ char *p, *e, *next;
|
||||||
|
+ g_autofree char *options = NULL;
|
||||||
|
+
|
||||||
|
+ /* optionally skip the "vitastor:" prefix if provided */
|
||||||
|
+ if (STRPREFIX(colonstr, "vitastor:"))
|
||||||
|
+ colonstr += strlen("vitastor:");
|
||||||
|
+
|
||||||
|
+ options = g_strdup(colonstr);
|
||||||
|
+
|
||||||
|
+ p = options;
|
||||||
|
+ while (*p) {
|
||||||
|
+ /* find : delimiter or end of string */
|
||||||
|
+ for (e = p; *e && *e != ':'; ++e) {
|
||||||
|
+ if (*e == '\\') {
|
||||||
|
+ e++;
|
||||||
|
+ if (*e == '\0')
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ if (*e == '\0') {
|
||||||
|
+ next = e; /* last kv pair */
|
||||||
|
+ } else {
|
||||||
|
+ next = e + 1;
|
||||||
|
+ *e = '\0';
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (STRPREFIX(p, "image=")) {
|
||||||
|
+ src->path = g_strdup(p + strlen("image="));
|
||||||
|
+ } else if (STRPREFIX(p, "etcd-prefix=")) {
|
||||||
|
+ src->query = g_strdup(p + strlen("etcd-prefix="));
|
||||||
|
+ } else if (STRPREFIX(p, "config-path=")) {
|
||||||
|
+ src->configFile = g_strdup(p + strlen("config-path="));
|
||||||
|
+ } else if (STRPREFIX(p, "etcd-host=")) {
|
||||||
|
+ char *h, *sep;
|
||||||
|
+
|
||||||
|
+ h = p + strlen("etcd-host=");
|
||||||
|
+ while (h < e) {
|
||||||
|
+ for (sep = h; sep < e; ++sep) {
|
||||||
|
+ if (*sep == '\\' && (sep[1] == ',' ||
|
||||||
|
+ sep[1] == ';' ||
|
||||||
|
+ sep[1] == ' ')) {
|
||||||
|
+ *sep = '\0';
|
||||||
|
+ sep += 2;
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (virStorageSourceRBDAddHost(src, h) < 0)
|
||||||
|
+ return -1;
|
||||||
|
+
|
||||||
|
+ h = sep;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ p = next;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (!src->path) {
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ return 0;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+
|
||||||
|
static int
|
||||||
|
virStorageSourceParseNBDColonString(const char *nbdstr,
|
||||||
|
virStorageSource *src)
|
||||||
|
@@ -396,6 +465,11 @@ virStorageSourceParseBackingColon(virStorageSource *src,
|
||||||
|
return -1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||||
|
+ if (virStorageSourceParseVitastorColonString(path, src) < 0)
|
||||||
|
+ return -1;
|
||||||
|
+ break;
|
||||||
|
+
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||||
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||||
|
@@ -984,6 +1058,54 @@ virStorageSourceParseBackingJSONRBD(virStorageSource *src,
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
+static int
|
||||||
|
+virStorageSourceParseBackingJSONVitastor(virStorageSource *src,
|
||||||
|
+ virJSONValue *json,
|
||||||
|
+ const char *jsonstr G_GNUC_UNUSED,
|
||||||
|
+ int opaque G_GNUC_UNUSED)
|
||||||
|
+{
|
||||||
|
+ const char *filename;
|
||||||
|
+ const char *image = virJSONValueObjectGetString(json, "image");
|
||||||
|
+ const char *conf = virJSONValueObjectGetString(json, "config-path");
|
||||||
|
+ const char *etcd_prefix = virJSONValueObjectGetString(json, "etcd-prefix");
|
||||||
|
+ virJSONValue *servers = virJSONValueObjectGetArray(json, "server");
|
||||||
|
+ size_t nservers;
|
||||||
|
+ size_t i;
|
||||||
|
+
|
||||||
|
+ src->type = VIR_STORAGE_TYPE_NETWORK;
|
||||||
|
+ src->protocol = VIR_STORAGE_NET_PROTOCOL_VITASTOR;
|
||||||
|
+
|
||||||
|
+ /* legacy syntax passed via 'filename' option */
|
||||||
|
+ if ((filename = virJSONValueObjectGetString(json, "filename")))
|
||||||
|
+ return virStorageSourceParseVitastorColonString(filename, src);
|
||||||
|
+
|
||||||
|
+ if (!image) {
|
||||||
|
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
|
||||||
|
+ _("missing image name in Vitastor backing volume "
|
||||||
|
+ "JSON specification"));
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ src->path = g_strdup(image);
|
||||||
|
+ src->configFile = g_strdup(conf);
|
||||||
|
+ src->query = g_strdup(etcd_prefix);
|
||||||
|
+
|
||||||
|
+ if (servers) {
|
||||||
|
+ nservers = virJSONValueArraySize(servers);
|
||||||
|
+
|
||||||
|
+ src->hosts = g_new0(virStorageNetHostDef, nservers);
|
||||||
|
+ src->nhosts = nservers;
|
||||||
|
+
|
||||||
|
+ for (i = 0; i < nservers; i++) {
|
||||||
|
+ if (virStorageSourceParseBackingJSONInetSocketAddress(src->hosts + i,
|
||||||
|
+ virJSONValueArrayGet(servers, i)) < 0)
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ return 0;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
static int
|
||||||
|
virStorageSourceParseBackingJSONRaw(virStorageSource *src,
|
||||||
|
virJSONValue *json,
|
||||||
|
@@ -1162,6 +1284,7 @@ static const struct virStorageSourceJSONDriverParser jsonParsers[] = {
|
||||||
|
{"sheepdog", false, virStorageSourceParseBackingJSONSheepdog, 0},
|
||||||
|
{"ssh", false, virStorageSourceParseBackingJSONSSH, 0},
|
||||||
|
{"rbd", false, virStorageSourceParseBackingJSONRBD, 0},
|
||||||
|
+ {"vitastor", false, virStorageSourceParseBackingJSONVitastor, 0},
|
||||||
|
{"raw", true, virStorageSourceParseBackingJSONRaw, 0},
|
||||||
|
{"nfs", false, virStorageSourceParseBackingJSONNFS, 0},
|
||||||
|
{"vxhs", false, virStorageSourceParseBackingJSONVxHS, 0},
|
||||||
|
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
|
||||||
|
index 0e93b79922..b4d33f5f56 100644
|
||||||
|
--- a/src/test/test_driver.c
|
||||||
|
+++ b/src/test/test_driver.c
|
||||||
|
@@ -7367,6 +7367,7 @@ testStorageVolumeTypeForPool(int pooltype)
|
||||||
|
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||||
|
case VIR_STORAGE_POOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_POOL_RBD:
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
return VIR_STORAGE_VOL_NETWORK;
|
||||||
|
case VIR_STORAGE_POOL_LOGICAL:
|
||||||
|
case VIR_STORAGE_POOL_DISK:
|
||||||
|
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||||
|
index eee75af746..8bd0a57bdd 100644
|
||||||
|
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||||
|
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||||
|
@@ -204,4 +204,11 @@
|
||||||
|
</enum>
|
||||||
|
</volOptions>
|
||||||
|
</pool>
|
||||||
|
+ <pool type='vitastor' supported='no'>
|
||||||
|
+ <volOptions>
|
||||||
|
+ <defaultFormat type='raw'/>
|
||||||
|
+ <enum name='targetFormatType'>
|
||||||
|
+ </enum>
|
||||||
|
+ </volOptions>
|
||||||
|
+ </pool>
|
||||||
|
</storagepoolCapabilities>
|
||||||
|
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||||
|
index 805950a937..852df0de16 100644
|
||||||
|
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||||
|
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||||
|
@@ -204,4 +204,11 @@
|
||||||
|
</enum>
|
||||||
|
</volOptions>
|
||||||
|
</pool>
|
||||||
|
+ <pool type='vitastor' supported='yes'>
|
||||||
|
+ <volOptions>
|
||||||
|
+ <defaultFormat type='raw'/>
|
||||||
|
+ <enum name='targetFormatType'>
|
||||||
|
+ </enum>
|
||||||
|
+ </volOptions>
|
||||||
|
+ </pool>
|
||||||
|
</storagepoolCapabilities>
|
||||||
|
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
|
||||||
|
index 449b745519..7f95cc8e08 100644
|
||||||
|
--- a/tests/storagepoolxml2argvtest.c
|
||||||
|
+++ b/tests/storagepoolxml2argvtest.c
|
||||||
|
@@ -68,6 +68,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
|
||||||
|
case VIR_STORAGE_POOL_GLUSTER:
|
||||||
|
case VIR_STORAGE_POOL_ZFS:
|
||||||
|
case VIR_STORAGE_POOL_VSTORAGE:
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
case VIR_STORAGE_POOL_LAST:
|
||||||
|
default:
|
||||||
|
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
|
||||||
|
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
|
||||||
|
index d391257f6e..46799c4a90 100644
|
||||||
|
--- a/tools/virsh-pool.c
|
||||||
|
+++ b/tools/virsh-pool.c
|
||||||
|
@@ -1213,6 +1213,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
|
||||||
|
case VIR_STORAGE_POOL_VSTORAGE:
|
||||||
|
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
|
||||||
|
break;
|
||||||
|
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||||
|
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR;
|
||||||
|
+ break;
|
||||||
|
case VIR_STORAGE_POOL_LAST:
|
||||||
|
break;
|
||||||
|
}
|
|
@ -24,4 +24,4 @@ rm fio
|
||||||
mv fio-copy fio
|
mv fio-copy fio
|
||||||
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||||
tar --transform 's#^#vitastor-1.4.7/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.4.7$(rpm --eval '%dist').tar.gz *
|
tar --transform 's#^#vitastor-1.4.8/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.4.8$(rpm --eval '%dist').tar.gz *
|
||||||
|
|
|
@ -36,7 +36,7 @@ ADD . /root/vitastor
|
||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-1.4.7.el7.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-1.4.8.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 1.4.7
|
Version: 1.4.8
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.1
|
License: Vitastor Network Public License 1.1
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-1.4.7.el7.tar.gz
|
Source0: vitastor-1.4.8.el7.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
|
|
|
@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-1.4.7.el8.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-1.4.8.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 1.4.7
|
Version: 1.4.8
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.1
|
License: Vitastor Network Public License 1.1
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-1.4.7.el8.tar.gz
|
Source0: vitastor-1.4.8.el8.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
|
|
|
@ -18,7 +18,7 @@ ADD . /root/vitastor
|
||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-1.4.7.el9.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-1.4.8.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 1.4.7
|
Version: 1.4.8
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.1
|
License: Vitastor Network Public License 1.1
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-1.4.7.el9.tar.gz
|
Source0: vitastor-1.4.8.el9.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
|
|
|
@ -16,7 +16,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
|
||||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_definitions(-DVERSION="1.4.7")
|
add_definitions(-DVERSION="1.4.8")
|
||||||
add_definitions(-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
|
add_definitions(-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
|
||||||
add_link_options(-fno-omit-frame-pointer)
|
add_link_options(-fno-omit-frame-pointer)
|
||||||
if (${WITH_ASAN})
|
if (${WITH_ASAN})
|
||||||
|
@ -145,7 +145,6 @@ add_library(vitastor_client SHARED
|
||||||
cli_status.cpp
|
cli_status.cpp
|
||||||
cli_describe.cpp
|
cli_describe.cpp
|
||||||
cli_fix.cpp
|
cli_fix.cpp
|
||||||
cli_df.cpp
|
|
||||||
cli_ls.cpp
|
cli_ls.cpp
|
||||||
cli_create.cpp
|
cli_create.cpp
|
||||||
cli_modify.cpp
|
cli_modify.cpp
|
||||||
|
@ -154,6 +153,11 @@ add_library(vitastor_client SHARED
|
||||||
cli_rm_data.cpp
|
cli_rm_data.cpp
|
||||||
cli_rm.cpp
|
cli_rm.cpp
|
||||||
cli_rm_osd.cpp
|
cli_rm_osd.cpp
|
||||||
|
cli_pool_cfg.cpp
|
||||||
|
cli_pool_create.cpp
|
||||||
|
cli_pool_ls.cpp
|
||||||
|
cli_pool_modify.cpp
|
||||||
|
cli_pool_rm.cpp
|
||||||
)
|
)
|
||||||
set_target_properties(vitastor_client PROPERTIES PUBLIC_HEADER "vitastor_c.h")
|
set_target_properties(vitastor_client PROPERTIES PUBLIC_HEADER "vitastor_c.h")
|
||||||
target_link_libraries(vitastor_client
|
target_link_libraries(vitastor_client
|
||||||
|
@ -181,6 +185,25 @@ target_link_libraries(vitastor-nbd
|
||||||
vitastor_client
|
vitastor_client
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# vitastor-kv
|
||||||
|
add_executable(vitastor-kv
|
||||||
|
kv_cli.cpp
|
||||||
|
kv_db.cpp
|
||||||
|
kv_db.h
|
||||||
|
)
|
||||||
|
target_link_libraries(vitastor-kv
|
||||||
|
vitastor_client
|
||||||
|
)
|
||||||
|
|
||||||
|
add_executable(vitastor-kv-stress
|
||||||
|
kv_stress.cpp
|
||||||
|
kv_db.cpp
|
||||||
|
kv_db.h
|
||||||
|
)
|
||||||
|
target_link_libraries(vitastor-kv-stress
|
||||||
|
vitastor_client
|
||||||
|
)
|
||||||
|
|
||||||
# vitastor-nfs
|
# vitastor-nfs
|
||||||
add_executable(vitastor-nfs
|
add_executable(vitastor-nfs
|
||||||
nfs_proxy.cpp
|
nfs_proxy.cpp
|
||||||
|
|
120
src/cli.cpp
120
src/cli.cpp
|
@ -46,18 +46,21 @@ static const char* help_text =
|
||||||
"vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>\n"
|
"vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>\n"
|
||||||
" Create a snapshot of image <name>. May be used live if only a single writer is active.\n"
|
" Create a snapshot of image <name>. May be used live if only a single writer is active.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]\n"
|
"vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force] [--down-ok]\n"
|
||||||
" Rename, resize image or change its readonly status. Images with children can't be made read-write.\n"
|
" Rename, resize image or change its readonly status. Images with children can't be made read-write.\n"
|
||||||
" If the new size is smaller than the old size, extra data will be purged.\n"
|
" If the new size is smaller than the old size, extra data will be purged.\n"
|
||||||
" You should resize file system in the image, if present, before shrinking it.\n"
|
" You should resize file system in the image, if present, before shrinking it.\n"
|
||||||
" -f|--force Proceed with shrinking or setting readwrite flag even if the image has children.\n"
|
" -f|--force Proceed with shrinking or setting readwrite flag even if the image has children.\n"
|
||||||
|
" --down-ok Proceed with shrinking even if some data will be left on unavailable OSDs.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"vitastor-cli rm <from> [<to>] [--writers-stopped]\n"
|
"vitastor-cli rm <from> [<to>] [--writers-stopped] [--down-ok]\n"
|
||||||
" Remove <from> or all layers between <from> and <to> (<to> must be a child of <from>),\n"
|
" Remove <from> or all layers between <from> and <to> (<to> must be a child of <from>),\n"
|
||||||
" rebasing all their children accordingly. --writers-stopped allows merging to be a bit\n"
|
" rebasing all their children accordingly. --writers-stopped allows merging to be a bit\n"
|
||||||
" more effective in case of a single 'slim' read-write child and 'fat' removed parent:\n"
|
" more effective in case of a single 'slim' read-write child and 'fat' removed parent:\n"
|
||||||
" the child is merged into parent and parent is renamed to child in that case.\n"
|
" the child is merged into parent and parent is renamed to child in that case.\n"
|
||||||
" In other cases parent layers are always merged into children.\n"
|
" In other cases parent layers are always merged into children.\n"
|
||||||
|
" Other options:\n"
|
||||||
|
" --down-ok Continue deletion/merging even if some data will be left on unavailable OSDs.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"vitastor-cli flatten <layer>\n"
|
"vitastor-cli flatten <layer>\n"
|
||||||
" Flatten a layer, i.e. merge data and detach it from parents.\n"
|
" Flatten a layer, i.e. merge data and detach it from parents.\n"
|
||||||
|
@ -113,6 +116,54 @@ static const char* help_text =
|
||||||
" With --dry-run only checks if deletion is possible without data loss and\n"
|
" With --dry-run only checks if deletion is possible without data loss and\n"
|
||||||
" redundancy degradation.\n"
|
" redundancy degradation.\n"
|
||||||
"\n"
|
"\n"
|
||||||
|
"vitastor-cli create-pool|pool-create <name> (-s <pg_size>|--ec <N>+<K>) -n <pg_count> [OPTIONS]\n"
|
||||||
|
" Create a pool. Required parameters:\n"
|
||||||
|
" -s|--pg_size R Number of replicas for replicated pools\n"
|
||||||
|
" --ec N+K Number of data (N) and parity (K) chunks for erasure-coded pools\n"
|
||||||
|
" -n|--pg_count N PG count for the new pool (start with 10*<OSD count>/pg_size rounded to a power of 2)\n"
|
||||||
|
" Optional parameters:\n"
|
||||||
|
" --pg_minsize <number> R or N+K minus number of failures to tolerate without downtime\n"
|
||||||
|
" --failure_domain host Failure domain: host, osd or a level from placement_levels. Default: host\n"
|
||||||
|
" --root_node <node> Put pool only on child OSDs of this placement tree node\n"
|
||||||
|
" --osd_tags <tag>[,<tag>]... Put pool only on OSDs tagged with all specified tags\n"
|
||||||
|
" --block_size 128k Put pool only on OSDs with this data block size\n"
|
||||||
|
" --bitmap_granularity 4k Put pool only on OSDs with this logical sector size\n"
|
||||||
|
" --immediate_commit none Put pool only on OSDs with this or larger immediate_commit (none < small < all)\n"
|
||||||
|
" --primary_affinity_tags tags Prefer to put primary copies on OSDs with all specified tags\n"
|
||||||
|
" --scrub_interval <time> Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y\n"
|
||||||
|
" --pg_stripe_size <number> Increase object grouping stripe\n"
|
||||||
|
" --max_osd_combinations 10000 Maximum number of random combinations for LP solver input\n"
|
||||||
|
" --wait Wait for the new pool to come online\n"
|
||||||
|
" -f|--force Do not check that cluster has enough OSDs to create the pool\n"
|
||||||
|
" Examples:\n"
|
||||||
|
" vitastor-cli create-pool test_x4 -s 4 -n 32\n"
|
||||||
|
" vitastor-cli create-pool test_ec42 --ec 4+2 -n 32\n"
|
||||||
|
"\n"
|
||||||
|
"vitastor-cli modify-pool|pool-modify <id|name> [--name <new_name>] [PARAMETERS...]\n"
|
||||||
|
" Modify an existing pool. Modifiable parameters:\n"
|
||||||
|
" [-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]\n"
|
||||||
|
" [--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>]\n"
|
||||||
|
" [--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]\n"
|
||||||
|
" Non-modifiable parameters (changing them WILL lead to data loss):\n"
|
||||||
|
" [--block_size <size>] [--bitmap_granularity <size>]\n"
|
||||||
|
" [--immediate_commit <all|small|none>] [--pg_stripe_size <size>]\n"
|
||||||
|
" These, however, can still be modified with -f|--force.\n"
|
||||||
|
" See create-pool for parameter descriptions.\n"
|
||||||
|
" Examples:\n"
|
||||||
|
" vitastor-cli modify-pool pool_A --name pool_B\n"
|
||||||
|
" vitastor-cli modify-pool 2 --pg_size 4 -n 128\n"
|
||||||
|
"\n"
|
||||||
|
"vitastor-cli rm-pool|pool-rm [--force] <id|name>\n"
|
||||||
|
" Remove a pool. Refuses to remove pools with images without --force.\n"
|
||||||
|
"\n"
|
||||||
|
"vitastor-cli ls-pools|pool-ls|ls-pool|pools [-l] [--detail] [--sort FIELD] [-r] [-n N] [--stats] [<glob> ...]\n"
|
||||||
|
" List pools (only matching <glob> patterns if passed).\n"
|
||||||
|
" -l|--long Also report I/O statistics\n"
|
||||||
|
" --detail Use list format (not table), show all details\n"
|
||||||
|
" --sort FIELD Sort by specified field (see fields in --json output)\n"
|
||||||
|
" -r|--reverse Sort in descending order\n"
|
||||||
|
" -n|--count N Only list first N items\n"
|
||||||
|
"\n"
|
||||||
"Use vitastor-cli --help <command> for command details or vitastor-cli --help --all for all details.\n"
|
"Use vitastor-cli --help <command> for command details or vitastor-cli --help --all for all details.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"GLOBAL OPTIONS:\n"
|
"GLOBAL OPTIONS:\n"
|
||||||
|
@ -122,7 +173,7 @@ static const char* help_text =
|
||||||
" --parallel_osds M Work with M osds in parallel when possible (default 4)\n"
|
" --parallel_osds M Work with M osds in parallel when possible (default 4)\n"
|
||||||
" --progress 1|0 Report progress (default 1)\n"
|
" --progress 1|0 Report progress (default 1)\n"
|
||||||
" --cas 1|0 Use CAS writes for flatten, merge, rm (default is decide automatically)\n"
|
" --cas 1|0 Use CAS writes for flatten, merge, rm (default is decide automatically)\n"
|
||||||
" --no-color Disable colored output\n"
|
" --color 1|0 Enable/disable colored output and CR symbols (default 1 if stdout is a terminal)\n"
|
||||||
" --json JSON output\n"
|
" --json JSON output\n"
|
||||||
;
|
;
|
||||||
|
|
||||||
|
@ -133,6 +184,8 @@ static json11::Json::object parse_args(int narg, const char *args[])
|
||||||
cfg["progress"] = "1";
|
cfg["progress"] = "1";
|
||||||
for (int i = 1; i < narg; i++)
|
for (int i = 1; i < narg; i++)
|
||||||
{
|
{
|
||||||
|
bool argHasValue = (!(i == narg-1) && (args[i+1][0] != '-'));
|
||||||
|
|
||||||
if (args[i][0] == '-' && args[i][1] == 'h' && args[i][2] == 0)
|
if (args[i][0] == '-' && args[i][1] == 'h' && args[i][2] == 0)
|
||||||
{
|
{
|
||||||
cfg["help"] = "1";
|
cfg["help"] = "1";
|
||||||
|
@ -143,15 +196,15 @@ static json11::Json::object parse_args(int narg, const char *args[])
|
||||||
}
|
}
|
||||||
else if (args[i][0] == '-' && args[i][1] == 'n' && args[i][2] == 0)
|
else if (args[i][0] == '-' && args[i][1] == 'n' && args[i][2] == 0)
|
||||||
{
|
{
|
||||||
cfg["count"] = args[++i];
|
cfg["count"] = argHasValue ? args[++i] : "";
|
||||||
}
|
}
|
||||||
else if (args[i][0] == '-' && args[i][1] == 'p' && args[i][2] == 0)
|
else if (args[i][0] == '-' && args[i][1] == 'p' && args[i][2] == 0)
|
||||||
{
|
{
|
||||||
cfg["pool"] = args[++i];
|
cfg["pool"] = argHasValue ? args[++i] : "";
|
||||||
}
|
}
|
||||||
else if (args[i][0] == '-' && args[i][1] == 's' && args[i][2] == 0)
|
else if (args[i][0] == '-' && args[i][1] == 's' && args[i][2] == 0)
|
||||||
{
|
{
|
||||||
cfg["size"] = args[++i];
|
cfg["size"] = argHasValue ? args[++i] : "";
|
||||||
}
|
}
|
||||||
else if (args[i][0] == '-' && args[i][1] == 'r' && args[i][2] == 0)
|
else if (args[i][0] == '-' && args[i][1] == 'r' && args[i][2] == 0)
|
||||||
{
|
{
|
||||||
|
@ -164,17 +217,24 @@ static json11::Json::object parse_args(int narg, const char *args[])
|
||||||
else if (args[i][0] == '-' && args[i][1] == '-')
|
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||||
{
|
{
|
||||||
const char *opt = args[i]+2;
|
const char *opt = args[i]+2;
|
||||||
cfg[opt] = i == narg-1 || !strcmp(opt, "json") ||
|
if (!strcmp(opt, "json") || !strcmp(opt, "wait") ||
|
||||||
!strcmp(opt, "wait-list") || !strcmp(opt, "wait_list") ||
|
!strcmp(opt, "wait-list") || !strcmp(opt, "wait_list") ||
|
||||||
!strcmp(opt, "long") || !strcmp(opt, "del") ||
|
!strcmp(opt, "long") || !strcmp(opt, "detail") || !strcmp(opt, "del") ||
|
||||||
!strcmp(opt, "no-color") || !strcmp(opt, "no_color") ||
|
!strcmp(opt, "no-color") || !strcmp(opt, "no_color") ||
|
||||||
!strcmp(opt, "readonly") || !strcmp(opt, "readwrite") ||
|
!strcmp(opt, "readonly") || !strcmp(opt, "readwrite") ||
|
||||||
!strcmp(opt, "force") || !strcmp(opt, "reverse") ||
|
!strcmp(opt, "force") || !strcmp(opt, "reverse") ||
|
||||||
!strcmp(opt, "allow-data-loss") || !strcmp(opt, "allow_data_loss") ||
|
!strcmp(opt, "allow-data-loss") || !strcmp(opt, "allow_data_loss") ||
|
||||||
|
!strcmp(opt, "down-ok") || !strcmp(opt, "down_ok") ||
|
||||||
!strcmp(opt, "dry-run") || !strcmp(opt, "dry_run") ||
|
!strcmp(opt, "dry-run") || !strcmp(opt, "dry_run") ||
|
||||||
!strcmp(opt, "help") || !strcmp(opt, "all") ||
|
!strcmp(opt, "help") || !strcmp(opt, "all") ||
|
||||||
(!strcmp(opt, "writers-stopped") || !strcmp(opt, "writers_stopped")) && strcmp("1", args[i+1]) != 0
|
!strcmp(opt, "writers-stopped") || !strcmp(opt, "writers_stopped"))
|
||||||
? "1" : args[++i];
|
{
|
||||||
|
cfg[opt] = "1";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
cfg[opt] = argHasValue ? args[++i] : "";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -217,7 +277,7 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
|
||||||
else if (cmd[0] == "df")
|
else if (cmd[0] == "df")
|
||||||
{
|
{
|
||||||
// Show pool space stats
|
// Show pool space stats
|
||||||
action_cb = p->start_df(cfg);
|
action_cb = p->start_pool_ls(cfg);
|
||||||
}
|
}
|
||||||
else if (cmd[0] == "ls")
|
else if (cmd[0] == "ls")
|
||||||
{
|
{
|
||||||
|
@ -324,6 +384,44 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
|
||||||
// Allocate a new OSD number
|
// Allocate a new OSD number
|
||||||
action_cb = p->start_alloc_osd(cfg);
|
action_cb = p->start_alloc_osd(cfg);
|
||||||
}
|
}
|
||||||
|
else if (cmd[0] == "create-pool" || cmd[0] == "pool-create")
|
||||||
|
{
|
||||||
|
// Create a new pool
|
||||||
|
if (cmd.size() > 1 && cfg["name"].is_null())
|
||||||
|
{
|
||||||
|
cfg["name"] = cmd[1];
|
||||||
|
}
|
||||||
|
action_cb = p->start_pool_create(cfg);
|
||||||
|
}
|
||||||
|
else if (cmd[0] == "modify-pool" || cmd[0] == "pool-modify")
|
||||||
|
{
|
||||||
|
// Modify existing pool
|
||||||
|
if (cmd.size() > 1)
|
||||||
|
{
|
||||||
|
cfg["old_name"] = cmd[1];
|
||||||
|
}
|
||||||
|
action_cb = p->start_pool_modify(cfg);
|
||||||
|
}
|
||||||
|
else if (cmd[0] == "rm-pool" || cmd[0] == "pool-rm")
|
||||||
|
{
|
||||||
|
// Remove existing pool
|
||||||
|
if (cmd.size() > 1)
|
||||||
|
{
|
||||||
|
cfg["pool"] = cmd[1];
|
||||||
|
}
|
||||||
|
action_cb = p->start_pool_rm(cfg);
|
||||||
|
}
|
||||||
|
else if (cmd[0] == "ls-pool" || cmd[0] == "pool-ls" || cmd[0] == "ls-pools" || cmd[0] == "pools")
|
||||||
|
{
|
||||||
|
// Show pool list
|
||||||
|
cfg["show_recovery"] = 1;
|
||||||
|
if (cmd.size() > 1)
|
||||||
|
{
|
||||||
|
cmd.erase(cmd.begin(), cmd.begin()+1);
|
||||||
|
cfg["names"] = cmd;
|
||||||
|
}
|
||||||
|
action_cb = p->start_pool_ls(cfg);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
result = { .err = EINVAL, .text = "unknown command: "+cmd[0].string_value() };
|
result = { .err = EINVAL, .text = "unknown command: "+cmd[0].string_value() };
|
||||||
|
|
11
src/cli.h
11
src/cli.h
|
@ -46,6 +46,7 @@ public:
|
||||||
json11::Json etcd_result;
|
json11::Json etcd_result;
|
||||||
|
|
||||||
void parse_config(json11::Json::object & cfg);
|
void parse_config(json11::Json::object & cfg);
|
||||||
|
json11::Json parse_tags(std::string tags);
|
||||||
|
|
||||||
void change_parent(inode_t cur, inode_t new_parent, cli_result_t *result);
|
void change_parent(inode_t cur, inode_t new_parent, cli_result_t *result);
|
||||||
inode_config_t* get_inode_cfg(const std::string & name);
|
inode_config_t* get_inode_cfg(const std::string & name);
|
||||||
|
@ -58,7 +59,6 @@ public:
|
||||||
std::function<bool(cli_result_t &)> start_status(json11::Json);
|
std::function<bool(cli_result_t &)> start_status(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_describe(json11::Json);
|
std::function<bool(cli_result_t &)> start_describe(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_fix(json11::Json);
|
std::function<bool(cli_result_t &)> start_fix(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_df(json11::Json);
|
|
||||||
std::function<bool(cli_result_t &)> start_ls(json11::Json);
|
std::function<bool(cli_result_t &)> start_ls(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_create(json11::Json);
|
std::function<bool(cli_result_t &)> start_create(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_modify(json11::Json);
|
std::function<bool(cli_result_t &)> start_modify(json11::Json);
|
||||||
|
@ -68,6 +68,10 @@ public:
|
||||||
std::function<bool(cli_result_t &)> start_rm(json11::Json);
|
std::function<bool(cli_result_t &)> start_rm(json11::Json);
|
||||||
std::function<bool(cli_result_t &)> start_rm_osd(json11::Json cfg);
|
std::function<bool(cli_result_t &)> start_rm_osd(json11::Json cfg);
|
||||||
std::function<bool(cli_result_t &)> start_alloc_osd(json11::Json cfg);
|
std::function<bool(cli_result_t &)> start_alloc_osd(json11::Json cfg);
|
||||||
|
std::function<bool(cli_result_t &)> start_pool_create(json11::Json);
|
||||||
|
std::function<bool(cli_result_t &)> start_pool_modify(json11::Json);
|
||||||
|
std::function<bool(cli_result_t &)> start_pool_rm(json11::Json);
|
||||||
|
std::function<bool(cli_result_t &)> start_pool_ls(json11::Json);
|
||||||
|
|
||||||
// Should be called like loop_and_wait(start_status(), <completion callback>)
|
// Should be called like loop_and_wait(start_status(), <completion callback>)
|
||||||
void loop_and_wait(std::function<bool(cli_result_t &)> loop_cb, std::function<void(const cli_result_t &)> complete_cb);
|
void loop_and_wait(std::function<bool(cli_result_t &)> loop_cb, std::function<void(const cli_result_t &)> complete_cb);
|
||||||
|
@ -77,8 +81,13 @@ public:
|
||||||
|
|
||||||
std::string print_table(json11::Json items, json11::Json header, bool use_esc);
|
std::string print_table(json11::Json items, json11::Json header, bool use_esc);
|
||||||
|
|
||||||
|
size_t print_detail_title_len(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t prev_len);
|
||||||
|
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t title_len, bool use_esc);
|
||||||
|
|
||||||
std::string format_lat(uint64_t lat);
|
std::string format_lat(uint64_t lat);
|
||||||
|
|
||||||
std::string format_q(double depth);
|
std::string format_q(double depth);
|
||||||
|
|
||||||
bool stupid_glob(const std::string str, const std::string glob);
|
bool stupid_glob(const std::string str, const std::string glob);
|
||||||
|
|
||||||
|
std::string implode(const std::string & sep, json11::Json array);
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// Copyright (c) Vitaliy Filippov, 2019+
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
// License: VNPL-1.1 (see README.md for details)
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include <unistd.h>
|
||||||
#include "str_util.h"
|
#include "str_util.h"
|
||||||
#include "cluster_client.h"
|
#include "cluster_client.h"
|
||||||
#include "cli.h"
|
#include "cli.h"
|
||||||
|
@ -113,7 +114,12 @@ void cli_tool_t::parse_config(json11::Json::object & cfg)
|
||||||
else
|
else
|
||||||
kv_it++;
|
kv_it++;
|
||||||
}
|
}
|
||||||
color = !cfg["no_color"].bool_value();
|
if (cfg.find("no_color") != cfg.end())
|
||||||
|
color = !cfg["no_color"].bool_value();
|
||||||
|
else if (cfg.find("color") != cfg.end())
|
||||||
|
color = cfg["color"].bool_value();
|
||||||
|
else
|
||||||
|
color = isatty(1);
|
||||||
json_output = cfg["json"].bool_value();
|
json_output = cfg["json"].bool_value();
|
||||||
iodepth = cfg["iodepth"].uint64_value();
|
iodepth = cfg["iodepth"].uint64_value();
|
||||||
if (!iodepth)
|
if (!iodepth)
|
||||||
|
|
|
@ -183,7 +183,16 @@ resume_3:
|
||||||
// Save into inode_config for library users to be able to take it from there immediately
|
// Save into inode_config for library users to be able to take it from there immediately
|
||||||
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
|
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
|
||||||
parent->cli->st_cli.insert_inode_config(new_cfg);
|
parent->cli->st_cli.insert_inode_config(new_cfg);
|
||||||
result = (cli_result_t){ .err = 0, .text = "Image "+image_name+" created" };
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Image "+image_name+" created",
|
||||||
|
.data = json11::Json::object {
|
||||||
|
{ "name", image_name },
|
||||||
|
{ "pool", new_pool_name },
|
||||||
|
{ "parent", new_parent },
|
||||||
|
{ "size", size },
|
||||||
|
}
|
||||||
|
};
|
||||||
state = 100;
|
state = 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,7 +260,16 @@ resume_4:
|
||||||
// Save into inode_config for library users to be able to take it from there immediately
|
// Save into inode_config for library users to be able to take it from there immediately
|
||||||
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
|
new_cfg.mod_revision = parent->etcd_result["responses"][0]["response_put"]["header"]["revision"].uint64_value();
|
||||||
parent->cli->st_cli.insert_inode_config(new_cfg);
|
parent->cli->st_cli.insert_inode_config(new_cfg);
|
||||||
result = (cli_result_t){ .err = 0, .text = "Snapshot "+image_name+"@"+new_snap+" created" };
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Snapshot "+image_name+"@"+new_snap+" created",
|
||||||
|
.data = json11::Json::object {
|
||||||
|
{ "name", image_name+"@"+new_snap },
|
||||||
|
{ "pool", (uint64_t)new_pool_id },
|
||||||
|
{ "parent", new_parent },
|
||||||
|
{ "size", size },
|
||||||
|
}
|
||||||
|
};
|
||||||
state = 100;
|
state = 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
243
src/cli_df.cpp
243
src/cli_df.cpp
|
@ -1,243 +0,0 @@
|
||||||
// Copyright (c) Vitaliy Filippov, 2019+
|
|
||||||
// License: VNPL-1.1 (see README.md for details)
|
|
||||||
|
|
||||||
#include "cli.h"
|
|
||||||
#include "cluster_client.h"
|
|
||||||
#include "str_util.h"
|
|
||||||
|
|
||||||
// List pools with space statistics
|
|
||||||
struct pool_lister_t
|
|
||||||
{
|
|
||||||
cli_tool_t *parent;
|
|
||||||
|
|
||||||
int state = 0;
|
|
||||||
json11::Json space_info;
|
|
||||||
cli_result_t result;
|
|
||||||
std::map<pool_id_t, json11::Json::object> pool_stats;
|
|
||||||
|
|
||||||
bool is_done()
|
|
||||||
{
|
|
||||||
return state == 100;
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_stats()
|
|
||||||
{
|
|
||||||
if (state == 1)
|
|
||||||
goto resume_1;
|
|
||||||
// Space statistics - pool/stats/<pool>
|
|
||||||
parent->etcd_txn(json11::Json::object {
|
|
||||||
{ "success", json11::Json::array {
|
|
||||||
json11::Json::object {
|
|
||||||
{ "request_range", json11::Json::object {
|
|
||||||
{ "key", base64_encode(
|
|
||||||
parent->cli->st_cli.etcd_prefix+"/pool/stats/"
|
|
||||||
) },
|
|
||||||
{ "range_end", base64_encode(
|
|
||||||
parent->cli->st_cli.etcd_prefix+"/pool/stats0"
|
|
||||||
) },
|
|
||||||
} },
|
|
||||||
},
|
|
||||||
json11::Json::object {
|
|
||||||
{ "request_range", json11::Json::object {
|
|
||||||
{ "key", base64_encode(
|
|
||||||
parent->cli->st_cli.etcd_prefix+"/osd/stats/"
|
|
||||||
) },
|
|
||||||
{ "range_end", base64_encode(
|
|
||||||
parent->cli->st_cli.etcd_prefix+"/osd/stats0"
|
|
||||||
) },
|
|
||||||
} },
|
|
||||||
},
|
|
||||||
} },
|
|
||||||
});
|
|
||||||
state = 1;
|
|
||||||
resume_1:
|
|
||||||
if (parent->waiting > 0)
|
|
||||||
return;
|
|
||||||
if (parent->etcd_err.err)
|
|
||||||
{
|
|
||||||
result = parent->etcd_err;
|
|
||||||
state = 100;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
space_info = parent->etcd_result;
|
|
||||||
std::map<pool_id_t, uint64_t> osd_free;
|
|
||||||
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
|
|
||||||
{
|
|
||||||
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
|
||||||
// pool ID
|
|
||||||
pool_id_t pool_id;
|
|
||||||
char null_byte = 0;
|
|
||||||
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
|
|
||||||
if (scanned != 1 || !pool_id || pool_id >= POOL_ID_MAX)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// pool/stats/<N>
|
|
||||||
pool_stats[pool_id] = kv.value.object_items();
|
|
||||||
}
|
|
||||||
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
|
|
||||||
{
|
|
||||||
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
|
||||||
// osd ID
|
|
||||||
osd_num_t osd_num;
|
|
||||||
char null_byte = 0;
|
|
||||||
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%ju%c", &osd_num, &null_byte);
|
|
||||||
if (scanned != 1 || !osd_num || osd_num >= POOL_ID_MAX)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// osd/stats/<N>::free
|
|
||||||
osd_free[osd_num] = kv.value["free"].uint64_value();
|
|
||||||
}
|
|
||||||
// Calculate max_avail for each pool
|
|
||||||
for (auto & pp: parent->cli->st_cli.pool_config)
|
|
||||||
{
|
|
||||||
auto & pool_cfg = pp.second;
|
|
||||||
uint64_t pool_avail = UINT64_MAX;
|
|
||||||
std::map<osd_num_t, uint64_t> pg_per_osd;
|
|
||||||
for (auto & pgp: pool_cfg.pg_config)
|
|
||||||
{
|
|
||||||
for (auto pg_osd: pgp.second.target_set)
|
|
||||||
{
|
|
||||||
if (pg_osd != 0)
|
|
||||||
{
|
|
||||||
pg_per_osd[pg_osd]++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (auto pg_per_pair: pg_per_osd)
|
|
||||||
{
|
|
||||||
uint64_t pg_free = osd_free[pg_per_pair.first] * pool_cfg.real_pg_count / pg_per_pair.second;
|
|
||||||
if (pool_avail > pg_free)
|
|
||||||
{
|
|
||||||
pool_avail = pg_free;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (pool_avail == UINT64_MAX)
|
|
||||||
{
|
|
||||||
pool_avail = 0;
|
|
||||||
}
|
|
||||||
if (pool_cfg.scheme != POOL_SCHEME_REPLICATED)
|
|
||||||
{
|
|
||||||
pool_avail *= (pool_cfg.pg_size - pool_cfg.parity_chunks);
|
|
||||||
}
|
|
||||||
pool_stats[pool_cfg.id] = json11::Json::object {
|
|
||||||
{ "id", (uint64_t)pool_cfg.id },
|
|
||||||
{ "name", pool_cfg.name },
|
|
||||||
{ "pg_count", pool_cfg.pg_count },
|
|
||||||
{ "real_pg_count", pool_cfg.real_pg_count },
|
|
||||||
{ "scheme", pool_cfg.scheme == POOL_SCHEME_REPLICATED ? "replicated" : "ec" },
|
|
||||||
{ "scheme_name", pool_cfg.scheme == POOL_SCHEME_REPLICATED
|
|
||||||
? std::to_string(pool_cfg.pg_size)+"/"+std::to_string(pool_cfg.pg_minsize)
|
|
||||||
: "EC "+std::to_string(pool_cfg.pg_size-pool_cfg.parity_chunks)+"+"+std::to_string(pool_cfg.parity_chunks) },
|
|
||||||
{ "used_raw", (uint64_t)(pool_stats[pool_cfg.id]["used_raw_tb"].number_value() * ((uint64_t)1<<40)) },
|
|
||||||
{ "total_raw", (uint64_t)(pool_stats[pool_cfg.id]["total_raw_tb"].number_value() * ((uint64_t)1<<40)) },
|
|
||||||
{ "max_available", pool_avail },
|
|
||||||
{ "raw_to_usable", pool_stats[pool_cfg.id]["raw_to_usable"].number_value() },
|
|
||||||
{ "space_efficiency", pool_stats[pool_cfg.id]["space_efficiency"].number_value() },
|
|
||||||
{ "pg_real_size", pool_stats[pool_cfg.id]["pg_real_size"].uint64_value() },
|
|
||||||
{ "failure_domain", pool_cfg.failure_domain },
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json11::Json::array to_list()
|
|
||||||
{
|
|
||||||
json11::Json::array list;
|
|
||||||
for (auto & kv: pool_stats)
|
|
||||||
{
|
|
||||||
list.push_back(kv.second);
|
|
||||||
}
|
|
||||||
return list;
|
|
||||||
}
|
|
||||||
|
|
||||||
void loop()
|
|
||||||
{
|
|
||||||
get_stats();
|
|
||||||
if (parent->waiting > 0)
|
|
||||||
return;
|
|
||||||
if (state == 100)
|
|
||||||
return;
|
|
||||||
if (parent->json_output)
|
|
||||||
{
|
|
||||||
// JSON output
|
|
||||||
result.data = to_list();
|
|
||||||
state = 100;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Table output: name, scheme_name, pg_count, total, used, max_avail, used%, efficiency
|
|
||||||
json11::Json::array cols;
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "name" },
|
|
||||||
{ "title", "NAME" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "scheme_name" },
|
|
||||||
{ "title", "SCHEME" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "pg_count_fmt" },
|
|
||||||
{ "title", "PGS" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "total_fmt" },
|
|
||||||
{ "title", "TOTAL" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "used_fmt" },
|
|
||||||
{ "title", "USED" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "max_avail_fmt" },
|
|
||||||
{ "title", "AVAILABLE" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "used_pct" },
|
|
||||||
{ "title", "USED%" },
|
|
||||||
});
|
|
||||||
cols.push_back(json11::Json::object{
|
|
||||||
{ "key", "eff_fmt" },
|
|
||||||
{ "title", "EFFICIENCY" },
|
|
||||||
});
|
|
||||||
json11::Json::array list;
|
|
||||||
for (auto & kv: pool_stats)
|
|
||||||
{
|
|
||||||
double raw_to = kv.second["raw_to_usable"].number_value();
|
|
||||||
if (raw_to < 0.000001 && raw_to > -0.000001)
|
|
||||||
raw_to = 1;
|
|
||||||
kv.second["pg_count_fmt"] = kv.second["real_pg_count"] == kv.second["pg_count"]
|
|
||||||
? kv.second["real_pg_count"].as_string()
|
|
||||||
: kv.second["real_pg_count"].as_string()+"->"+kv.second["pg_count"].as_string();
|
|
||||||
kv.second["total_fmt"] = format_size(kv.second["total_raw"].uint64_value() / raw_to);
|
|
||||||
kv.second["used_fmt"] = format_size(kv.second["used_raw"].uint64_value() / raw_to);
|
|
||||||
kv.second["max_avail_fmt"] = format_size(kv.second["max_available"].uint64_value());
|
|
||||||
kv.second["used_pct"] = format_q(kv.second["total_raw"].uint64_value()
|
|
||||||
? (100 - 100*kv.second["max_available"].uint64_value() *
|
|
||||||
kv.second["raw_to_usable"].number_value() / kv.second["total_raw"].uint64_value())
|
|
||||||
: 100)+"%";
|
|
||||||
kv.second["eff_fmt"] = format_q(kv.second["space_efficiency"].number_value()*100)+"%";
|
|
||||||
}
|
|
||||||
result.data = to_list();
|
|
||||||
result.text = print_table(result.data, cols, parent->color);
|
|
||||||
state = 100;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::function<bool(cli_result_t &)> cli_tool_t::start_df(json11::Json cfg)
|
|
||||||
{
|
|
||||||
auto lister = new pool_lister_t();
|
|
||||||
lister->parent = this;
|
|
||||||
return [lister](cli_result_t & result)
|
|
||||||
{
|
|
||||||
lister->loop();
|
|
||||||
if (lister->is_done())
|
|
||||||
{
|
|
||||||
result = lister->result;
|
|
||||||
delete lister;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -275,7 +275,9 @@ struct snap_merger_t
|
||||||
processed++;
|
processed++;
|
||||||
if (parent->progress && !(processed % 128))
|
if (parent->progress && !(processed % 128))
|
||||||
{
|
{
|
||||||
printf("\rFiltering target blocks: %ju/%ju", processed, to_process);
|
fprintf(stderr, parent->color
|
||||||
|
? "\rFiltering target blocks: %ju/%ju"
|
||||||
|
: "Filtering target blocks: %ju/%ju\n", processed, to_process);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (in_flight > 0 || oit != merge_offsets.end())
|
if (in_flight > 0 || oit != merge_offsets.end())
|
||||||
|
@ -285,7 +287,9 @@ struct snap_merger_t
|
||||||
}
|
}
|
||||||
if (parent->progress)
|
if (parent->progress)
|
||||||
{
|
{
|
||||||
printf("\r%ju full blocks of target filtered out\n", to_process-merge_offsets.size());
|
fprintf(stderr, parent->color
|
||||||
|
? "\r%ju full blocks of target filtered out\n"
|
||||||
|
: "%ju full blocks of target filtered out\n", to_process-merge_offsets.size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
state = 3;
|
state = 3;
|
||||||
|
@ -320,7 +324,9 @@ struct snap_merger_t
|
||||||
processed++;
|
processed++;
|
||||||
if (parent->progress && !(processed % 128))
|
if (parent->progress && !(processed % 128))
|
||||||
{
|
{
|
||||||
printf("\rOverwriting blocks: %ju/%ju", processed, to_process);
|
fprintf(stderr, parent->color
|
||||||
|
? "\rOverwriting blocks: %ju/%ju"
|
||||||
|
: "Overwriting blocks: %ju/%ju\n", processed, to_process);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (in_flight == 0 && rwo_error.size())
|
if (in_flight == 0 && rwo_error.size())
|
||||||
|
@ -339,10 +345,16 @@ struct snap_merger_t
|
||||||
}
|
}
|
||||||
if (parent->progress)
|
if (parent->progress)
|
||||||
{
|
{
|
||||||
printf("\rOverwriting blocks: %ju/%ju\n", to_process, to_process);
|
fprintf(stderr, parent->color
|
||||||
|
? "\rOverwriting blocks: %ju/%ju\n"
|
||||||
|
: "Overwriting blocks: %ju/%ju\n", to_process, to_process);
|
||||||
}
|
}
|
||||||
// Done
|
// Done
|
||||||
result = (cli_result_t){ .text = "Done, layers from "+from_name+" to "+to_name+" merged into "+target_name };
|
result = (cli_result_t){ .text = "Done, layers from "+from_name+" to "+to_name+" merged into "+target_name, .data = json11::Json::object {
|
||||||
|
{ "from", from_name },
|
||||||
|
{ "to", to_name },
|
||||||
|
{ "into", target_name },
|
||||||
|
}};
|
||||||
state = 100;
|
state = 100;
|
||||||
resume_100:
|
resume_100:
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -15,6 +15,7 @@ struct image_changer_t
|
||||||
uint64_t new_size = 0;
|
uint64_t new_size = 0;
|
||||||
bool force_size = false, inc_size = false;
|
bool force_size = false, inc_size = false;
|
||||||
bool set_readonly = false, set_readwrite = false, force = false;
|
bool set_readonly = false, set_readwrite = false, force = false;
|
||||||
|
bool down_ok = false;
|
||||||
// interval between fsyncs
|
// interval between fsyncs
|
||||||
int fsync_interval = 128;
|
int fsync_interval = 128;
|
||||||
|
|
||||||
|
@ -84,7 +85,10 @@ struct image_changer_t
|
||||||
(!new_size && !force_size || cfg.size == new_size || cfg.size >= new_size && inc_size) &&
|
(!new_size && !force_size || cfg.size == new_size || cfg.size >= new_size && inc_size) &&
|
||||||
(new_name == "" || new_name == image_name))
|
(new_name == "" || new_name == image_name))
|
||||||
{
|
{
|
||||||
result = (cli_result_t){ .text = "No change" };
|
result = (cli_result_t){ .err = 0, .text = "No change", .data = json11::Json::object {
|
||||||
|
{ "error_code", 0 },
|
||||||
|
{ "error_text", "No change" },
|
||||||
|
}};
|
||||||
state = 100;
|
state = 100;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -105,6 +109,7 @@ struct image_changer_t
|
||||||
{ "pool", (uint64_t)INODE_POOL(inode_num) },
|
{ "pool", (uint64_t)INODE_POOL(inode_num) },
|
||||||
{ "fsync-interval", fsync_interval },
|
{ "fsync-interval", fsync_interval },
|
||||||
{ "min-offset", ((new_size+4095)/4096)*4096 },
|
{ "min-offset", ((new_size+4095)/4096)*4096 },
|
||||||
|
{ "down-ok", down_ok },
|
||||||
});
|
});
|
||||||
resume_1:
|
resume_1:
|
||||||
while (!cb(result))
|
while (!cb(result))
|
||||||
|
@ -220,7 +225,16 @@ resume_2:
|
||||||
parent->cli->st_cli.inode_by_name.erase(image_name);
|
parent->cli->st_cli.inode_by_name.erase(image_name);
|
||||||
}
|
}
|
||||||
parent->cli->st_cli.insert_inode_config(cfg);
|
parent->cli->st_cli.insert_inode_config(cfg);
|
||||||
result = (cli_result_t){ .err = 0, .text = "Image "+image_name+" modified" };
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Image "+image_name+" modified",
|
||||||
|
.data = json11::Json::object {
|
||||||
|
{ "name", image_name },
|
||||||
|
{ "inode", INODE_NO_POOL(inode_num) },
|
||||||
|
{ "pool", (uint64_t)INODE_POOL(inode_num) },
|
||||||
|
{ "size", new_size },
|
||||||
|
}
|
||||||
|
};
|
||||||
state = 100;
|
state = 100;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -240,6 +254,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_modify(json11::Json cfg)
|
||||||
changer->fsync_interval = cfg["fsync_interval"].uint64_value();
|
changer->fsync_interval = cfg["fsync_interval"].uint64_value();
|
||||||
if (!changer->fsync_interval)
|
if (!changer->fsync_interval)
|
||||||
changer->fsync_interval = 128;
|
changer->fsync_interval = 128;
|
||||||
|
changer->down_ok = cfg["down_ok"].bool_value();
|
||||||
// FIXME Check that the image doesn't have children when shrinking
|
// FIXME Check that the image doesn't have children when shrinking
|
||||||
return [changer](cli_result_t & result)
|
return [changer](cli_result_t & result)
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2024
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include "cli_pool_cfg.h"
|
||||||
|
#include "etcd_state_client.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
|
||||||
|
std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json old_cfg,
|
||||||
|
uint64_t global_block_size, uint64_t global_bitmap_granularity, bool force)
|
||||||
|
{
|
||||||
|
// short option names
|
||||||
|
if (new_cfg.find("count") != new_cfg.end())
|
||||||
|
{
|
||||||
|
new_cfg["pg_count"] = new_cfg["count"];
|
||||||
|
new_cfg.erase("count");
|
||||||
|
}
|
||||||
|
if (new_cfg.find("size") != new_cfg.end())
|
||||||
|
{
|
||||||
|
new_cfg["pg_size"] = new_cfg["size"];
|
||||||
|
new_cfg.erase("size");
|
||||||
|
}
|
||||||
|
|
||||||
|
// --ec shortcut
|
||||||
|
if (new_cfg.find("ec") != new_cfg.end())
|
||||||
|
{
|
||||||
|
if (new_cfg.find("scheme") != new_cfg.end() ||
|
||||||
|
new_cfg.find("pg_size") != new_cfg.end() ||
|
||||||
|
new_cfg.find("parity_chunks") != new_cfg.end())
|
||||||
|
{
|
||||||
|
return "--ec can't be used with --pg_size, --parity_chunks or --scheme";
|
||||||
|
}
|
||||||
|
// pg_size = N+K
|
||||||
|
// parity_chunks = K
|
||||||
|
uint64_t data_chunks = 0, parity_chunks = 0;
|
||||||
|
char null_byte = 0;
|
||||||
|
int ret = sscanf(new_cfg["ec"].string_value().c_str(), "%ju+%ju%c", &data_chunks, &parity_chunks, &null_byte);
|
||||||
|
if (ret != 2 || !data_chunks || !parity_chunks)
|
||||||
|
{
|
||||||
|
return "--ec should be <N>+<K> format (<N>, <K> - numbers)";
|
||||||
|
}
|
||||||
|
new_cfg.erase("ec");
|
||||||
|
new_cfg["scheme"] = "ec";
|
||||||
|
new_cfg["pg_size"] = data_chunks+parity_chunks;
|
||||||
|
new_cfg["parity_chunks"] = parity_chunks;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (old_cfg.is_null() && new_cfg["scheme"].string_value() == "")
|
||||||
|
{
|
||||||
|
// Default scheme
|
||||||
|
new_cfg["scheme"] = "replicated";
|
||||||
|
}
|
||||||
|
if (new_cfg.find("pg_minsize") == new_cfg.end() && (old_cfg.is_null() || new_cfg.find("pg_size") != new_cfg.end()))
|
||||||
|
{
|
||||||
|
// Default pg_minsize
|
||||||
|
if (new_cfg["scheme"] == "replicated")
|
||||||
|
{
|
||||||
|
// pg_minsize = (N+K > 2) ? 2 : 1
|
||||||
|
new_cfg["pg_minsize"] = new_cfg["pg_size"].uint64_value() > 2 ? 2 : 1;
|
||||||
|
}
|
||||||
|
else // ec or xor
|
||||||
|
{
|
||||||
|
// pg_minsize = (K > 1) ? N + 1 : N
|
||||||
|
new_cfg["pg_minsize"] = new_cfg["pg_size"].uint64_value() - new_cfg["parity_chunks"].uint64_value() +
|
||||||
|
(new_cfg["parity_chunks"].uint64_value() > 1 ? 1 : 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (new_cfg["scheme"] != "ec")
|
||||||
|
{
|
||||||
|
new_cfg.erase("parity_chunks");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check integer values and unknown keys
|
||||||
|
for (auto kv_it = new_cfg.begin(); kv_it != new_cfg.end(); )
|
||||||
|
{
|
||||||
|
auto & key = kv_it->first;
|
||||||
|
auto & value = kv_it->second;
|
||||||
|
if (key == "pg_size" || key == "parity_chunks" || key == "pg_minsize" ||
|
||||||
|
key == "pg_count" || key == "max_osd_combinations" || key == "block_size" ||
|
||||||
|
key == "bitmap_granularity" || key == "pg_stripe_size")
|
||||||
|
{
|
||||||
|
if (value.is_number() && value.uint64_value() != value.number_value() ||
|
||||||
|
value.is_string() && !value.uint64_value() && value.string_value() != "0")
|
||||||
|
{
|
||||||
|
return key+" must be a non-negative integer";
|
||||||
|
}
|
||||||
|
value = value.uint64_value();
|
||||||
|
}
|
||||||
|
else if (key == "name" || key == "scheme" || key == "immediate_commit" ||
|
||||||
|
key == "failure_domain" || key == "root_node" || key == "scrub_interval")
|
||||||
|
{
|
||||||
|
// OK
|
||||||
|
}
|
||||||
|
else if (key == "osd_tags" || key == "primary_affinity_tags")
|
||||||
|
{
|
||||||
|
if (value.is_string())
|
||||||
|
{
|
||||||
|
value = explode(",", value.string_value(), true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Unknown parameter
|
||||||
|
new_cfg.erase(kv_it++);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
kv_it++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge with the old config
|
||||||
|
if (!old_cfg.is_null())
|
||||||
|
{
|
||||||
|
for (auto & kv: old_cfg.object_items())
|
||||||
|
{
|
||||||
|
if (new_cfg.find(kv.first) == new_cfg.end())
|
||||||
|
{
|
||||||
|
new_cfg[kv.first] = kv.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent autovivification of object keys. Now we don't modify the config, we just check it
|
||||||
|
json11::Json cfg = new_cfg;
|
||||||
|
|
||||||
|
// Validate changes
|
||||||
|
if (!old_cfg.is_null() && !force)
|
||||||
|
{
|
||||||
|
if (old_cfg["scheme"] != cfg["scheme"])
|
||||||
|
{
|
||||||
|
return "Changing scheme for an existing pool will lead to data loss. Use --force to proceed";
|
||||||
|
}
|
||||||
|
if (etcd_state_client_t::parse_scheme(old_cfg["scheme"].string_value()) == POOL_SCHEME_EC)
|
||||||
|
{
|
||||||
|
uint64_t old_data_chunks = old_cfg["pg_size"].uint64_value() - old_cfg["parity_chunks"].uint64_value();
|
||||||
|
uint64_t new_data_chunks = cfg["pg_size"].uint64_value() - cfg["parity_chunks"].uint64_value();
|
||||||
|
if (old_data_chunks != new_data_chunks)
|
||||||
|
{
|
||||||
|
return "Changing EC data chunk count for an existing pool will lead to data loss. Use --force to proceed";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (old_cfg["block_size"] != cfg["block_size"] ||
|
||||||
|
old_cfg["bitmap_granularity"] != cfg["bitmap_granularity"] ||
|
||||||
|
old_cfg["immediate_commit"] != cfg["immediate_commit"])
|
||||||
|
{
|
||||||
|
return "Changing block_size, bitmap_granularity or immediate_commit"
|
||||||
|
" for an existing pool will lead to incomplete PGs. Use --force to proceed";
|
||||||
|
}
|
||||||
|
if (old_cfg["pg_stripe_size"] != cfg["pg_stripe_size"])
|
||||||
|
{
|
||||||
|
return "Changing pg_stripe_size for an existing pool will lead to data loss. Use --force to proceed";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate values
|
||||||
|
if (cfg["name"].string_value() == "")
|
||||||
|
{
|
||||||
|
return "Non-empty pool name is required";
|
||||||
|
}
|
||||||
|
|
||||||
|
// scheme
|
||||||
|
auto scheme = etcd_state_client_t::parse_scheme(cfg["scheme"].string_value());
|
||||||
|
if (!scheme)
|
||||||
|
{
|
||||||
|
return "Scheme must be one of \"replicated\", \"ec\" or \"xor\"";
|
||||||
|
}
|
||||||
|
|
||||||
|
// pg_size
|
||||||
|
auto pg_size = cfg["pg_size"].uint64_value();
|
||||||
|
if (!pg_size)
|
||||||
|
{
|
||||||
|
return "Non-zero PG size is required";
|
||||||
|
}
|
||||||
|
if (scheme != POOL_SCHEME_REPLICATED && pg_size < 3)
|
||||||
|
{
|
||||||
|
return "PG size can't be smaller than 3 for EC/XOR pools";
|
||||||
|
}
|
||||||
|
if (pg_size > 256)
|
||||||
|
{
|
||||||
|
return "PG size can't be greater than 256";
|
||||||
|
}
|
||||||
|
|
||||||
|
// parity_chunks
|
||||||
|
uint64_t parity_chunks = 1;
|
||||||
|
if (scheme == POOL_SCHEME_EC)
|
||||||
|
{
|
||||||
|
parity_chunks = cfg["parity_chunks"].uint64_value();
|
||||||
|
if (!parity_chunks)
|
||||||
|
{
|
||||||
|
return "Non-zero parity_chunks is required";
|
||||||
|
}
|
||||||
|
if (parity_chunks > pg_size-2)
|
||||||
|
{
|
||||||
|
return "parity_chunks can't be greater than "+std::to_string(pg_size-2)+" (PG size - 2)";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pg_minsize
|
||||||
|
auto pg_minsize = cfg["pg_minsize"].uint64_value();
|
||||||
|
if (!pg_minsize)
|
||||||
|
{
|
||||||
|
return "Non-zero pg_minsize is required";
|
||||||
|
}
|
||||||
|
else if (pg_minsize > pg_size)
|
||||||
|
{
|
||||||
|
return "pg_minsize can't be greater than "+std::to_string(pg_size)+" (PG size)";
|
||||||
|
}
|
||||||
|
else if (scheme != POOL_SCHEME_REPLICATED && pg_minsize < pg_size-parity_chunks)
|
||||||
|
{
|
||||||
|
return "pg_minsize can't be smaller than "+std::to_string(pg_size-parity_chunks)+
|
||||||
|
" (pg_size - parity_chunks) for XOR/EC pool";
|
||||||
|
}
|
||||||
|
|
||||||
|
// pg_count
|
||||||
|
if (!cfg["pg_count"].uint64_value())
|
||||||
|
{
|
||||||
|
return "Non-zero pg_count is required";
|
||||||
|
}
|
||||||
|
|
||||||
|
// max_osd_combinations
|
||||||
|
if (!cfg["max_osd_combinations"].is_null() && cfg["max_osd_combinations"].uint64_value() < 100)
|
||||||
|
{
|
||||||
|
return "max_osd_combinations must be at least 100, but it is "+cfg["max_osd_combinations"].as_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
// block_size
|
||||||
|
auto block_size = cfg["block_size"].uint64_value();
|
||||||
|
if (!cfg["block_size"].is_null() && ((block_size & (block_size-1)) ||
|
||||||
|
block_size < MIN_DATA_BLOCK_SIZE || block_size > MAX_DATA_BLOCK_SIZE))
|
||||||
|
{
|
||||||
|
return "block_size must be a power of two between "+std::to_string(MIN_DATA_BLOCK_SIZE)+
|
||||||
|
" and "+std::to_string(MAX_DATA_BLOCK_SIZE)+", but it is "+std::to_string(block_size);
|
||||||
|
}
|
||||||
|
block_size = (block_size ? block_size : global_block_size);
|
||||||
|
|
||||||
|
// bitmap_granularity
|
||||||
|
auto bitmap_granularity = cfg["bitmap_granularity"].uint64_value();
|
||||||
|
if (!cfg["bitmap_granularity"].is_null() && (!bitmap_granularity || (bitmap_granularity % 512)))
|
||||||
|
{
|
||||||
|
return "bitmap_granularity must be a multiple of 512, but it is "+std::to_string(bitmap_granularity);
|
||||||
|
}
|
||||||
|
bitmap_granularity = (bitmap_granularity ? bitmap_granularity : global_bitmap_granularity);
|
||||||
|
if (block_size % bitmap_granularity)
|
||||||
|
{
|
||||||
|
return "bitmap_granularity must divide data block size ("+std::to_string(block_size)+"), but it is "+std::to_string(bitmap_granularity);
|
||||||
|
}
|
||||||
|
|
||||||
|
// immediate_commit
|
||||||
|
if (!cfg["immediate_commit"].is_null() && !etcd_state_client_t::parse_immediate_commit(cfg["immediate_commit"].string_value()))
|
||||||
|
{
|
||||||
|
return "immediate_commit must be one of \"all\", \"small\", or \"none\", but it is "+cfg["scrub_interval"].as_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
// scrub_interval
|
||||||
|
if (!cfg["scrub_interval"].is_null())
|
||||||
|
{
|
||||||
|
bool ok;
|
||||||
|
parse_time(cfg["scrub_interval"].string_value(), &ok);
|
||||||
|
if (!ok)
|
||||||
|
{
|
||||||
|
return "scrub_interval must be a time interval (number + unit s/m/h/d/M/y), but it is "+cfg["scrub_interval"].as_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "";
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2024
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "json11/json11.hpp"
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json old_cfg,
|
||||||
|
uint64_t global_block_size, uint64_t global_bitmap_granularity, bool force);
|
|
@ -0,0 +1,622 @@
|
||||||
|
// Copyright (c) MIND Software LLC, 2023 (info@mindsw.io)
|
||||||
|
// I accept Vitastor CLA: see CLA-en.md for details
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2024
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include <ctype.h>
|
||||||
|
#include "cli.h"
|
||||||
|
#include "cli_pool_cfg.h"
|
||||||
|
#include "cluster_client.h"
|
||||||
|
#include "epoll_manager.h"
|
||||||
|
#include "pg_states.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
|
||||||
|
struct pool_creator_t
|
||||||
|
{
|
||||||
|
cli_tool_t *parent;
|
||||||
|
json11::Json::object cfg;
|
||||||
|
|
||||||
|
bool force = false;
|
||||||
|
bool wait = false;
|
||||||
|
|
||||||
|
int state = 0;
|
||||||
|
cli_result_t result;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
uint32_t retries = 5;
|
||||||
|
uint32_t interval = 0;
|
||||||
|
bool passed = false;
|
||||||
|
} create_check;
|
||||||
|
|
||||||
|
uint64_t new_id = 1;
|
||||||
|
uint64_t new_pools_mod_rev;
|
||||||
|
json11::Json state_node_tree;
|
||||||
|
json11::Json new_pools;
|
||||||
|
|
||||||
|
bool is_done() { return state == 100; }
|
||||||
|
|
||||||
|
void loop()
|
||||||
|
{
|
||||||
|
if (state == 1)
|
||||||
|
goto resume_1;
|
||||||
|
else if (state == 2)
|
||||||
|
goto resume_2;
|
||||||
|
else if (state == 3)
|
||||||
|
goto resume_3;
|
||||||
|
else if (state == 4)
|
||||||
|
goto resume_4;
|
||||||
|
else if (state == 5)
|
||||||
|
goto resume_5;
|
||||||
|
else if (state == 6)
|
||||||
|
goto resume_6;
|
||||||
|
else if (state == 7)
|
||||||
|
goto resume_7;
|
||||||
|
else if (state == 8)
|
||||||
|
goto resume_8;
|
||||||
|
|
||||||
|
// Validate pool parameters
|
||||||
|
result.text = validate_pool_config(cfg, json11::Json(), parent->cli->st_cli.global_block_size,
|
||||||
|
parent->cli->st_cli.global_bitmap_granularity, force);
|
||||||
|
if (result.text != "")
|
||||||
|
{
|
||||||
|
result.err = EINVAL;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
state = 1;
|
||||||
|
resume_1:
|
||||||
|
// If not forced, check that we have enough osds for pg_size
|
||||||
|
if (!force)
|
||||||
|
{
|
||||||
|
// Get node_placement configuration from etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/node_placement") },
|
||||||
|
} }
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
|
||||||
|
state = 2;
|
||||||
|
resume_2:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get state_node_tree based on node_placement and osd peer states
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
|
||||||
|
state_node_tree = get_state_node_tree(kv.value.object_items());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip tag checks, if pool has none
|
||||||
|
if (cfg["osd_tags"].array_items().size())
|
||||||
|
{
|
||||||
|
// Get osd configs (for tags) of osds in state_node_tree
|
||||||
|
{
|
||||||
|
json11::Json::array osd_configs;
|
||||||
|
for (auto osd_num: state_node_tree["osds"].array_items())
|
||||||
|
{
|
||||||
|
osd_configs.push_back(json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/osd/"+osd_num.as_string()) },
|
||||||
|
} }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
parent->etcd_txn(json11::Json::object { { "success", osd_configs, }, });
|
||||||
|
}
|
||||||
|
|
||||||
|
state = 3;
|
||||||
|
resume_3:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out osds from state_node_tree based on pool/osd tags
|
||||||
|
{
|
||||||
|
std::vector<json11::Json> osd_configs;
|
||||||
|
for (auto & ocr: parent->etcd_result["responses"].array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(ocr["response_range"]["kvs"][0]);
|
||||||
|
osd_configs.push_back(kv.value);
|
||||||
|
}
|
||||||
|
state_node_tree = filter_state_node_tree_by_tags(state_node_tree, osd_configs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get stats (for block_size, bitmap_granularity, ...) of osds in state_node_tree
|
||||||
|
{
|
||||||
|
json11::Json::array osd_stats;
|
||||||
|
|
||||||
|
for (auto osd_num: state_node_tree["osds"].array_items())
|
||||||
|
{
|
||||||
|
osd_stats.push_back(json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/osd/stats/"+osd_num.as_string()) },
|
||||||
|
} }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
parent->etcd_txn(json11::Json::object { { "success", osd_stats, }, });
|
||||||
|
}
|
||||||
|
|
||||||
|
state = 4;
|
||||||
|
resume_4:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter osds from state_node_tree based on pool parameters and osd stats
|
||||||
|
{
|
||||||
|
std::vector<json11::Json> osd_stats;
|
||||||
|
for (auto & ocr: parent->etcd_result["responses"].array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(ocr["response_range"]["kvs"][0]);
|
||||||
|
osd_stats.push_back(kv.value);
|
||||||
|
}
|
||||||
|
state_node_tree = filter_state_node_tree_by_stats(state_node_tree, osd_stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that pg_size <= max_pg_size
|
||||||
|
{
|
||||||
|
auto failure_domain = cfg["failure_domain"].string_value() == ""
|
||||||
|
? "host" : cfg["failure_domain"].string_value();
|
||||||
|
uint64_t max_pg_size = get_max_pg_size(state_node_tree["nodes"].object_items(),
|
||||||
|
failure_domain, cfg["root_node"].string_value());
|
||||||
|
|
||||||
|
if (cfg["pg_size"].uint64_value() > max_pg_size)
|
||||||
|
{
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = EINVAL,
|
||||||
|
.text =
|
||||||
|
"There are "+std::to_string(max_pg_size)+" \""+failure_domain+"\" failure domains with OSDs matching tags and"
|
||||||
|
" block_size/bitmap_granularity/immediate_commit parameters, but you want to create a"
|
||||||
|
" pool with "+cfg["pg_size"].as_string()+" OSDs from different failure domains in a PG."
|
||||||
|
" Change parameters or add --force if you want to create a degraded pool and add OSDs later."
|
||||||
|
};
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create pool
|
||||||
|
state = 5;
|
||||||
|
resume_5:
|
||||||
|
// Get pools from etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
} }
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 6;
|
||||||
|
resume_6:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Add new pool
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
|
||||||
|
new_pools = create_pool(kv);
|
||||||
|
if (new_pools.is_string())
|
||||||
|
{
|
||||||
|
result = (cli_result_t){ .err = EEXIST, .text = new_pools.string_value() };
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
new_pools_mod_rev = kv.mod_revision;
|
||||||
|
}
|
||||||
|
// Update pools in etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "compare", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "target", "MOD" },
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "result", "LESS" },
|
||||||
|
{ "mod_revision", new_pools_mod_rev+1 },
|
||||||
|
}
|
||||||
|
} },
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_put", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "value", base64_encode(new_pools.dump()) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 7;
|
||||||
|
resume_7:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform final create-check
|
||||||
|
create_check.interval = parent->cli->config["mon_change_timeout"].uint64_value();
|
||||||
|
if (!create_check.interval)
|
||||||
|
create_check.interval = 1000;
|
||||||
|
|
||||||
|
state = 8;
|
||||||
|
resume_8:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Unless forced, check that pool was created and is active
|
||||||
|
if (!wait)
|
||||||
|
{
|
||||||
|
create_check.passed = true;
|
||||||
|
}
|
||||||
|
else if (create_check.retries)
|
||||||
|
{
|
||||||
|
create_check.retries--;
|
||||||
|
parent->waiting++;
|
||||||
|
parent->epmgr->tfd->set_timer(create_check.interval, false, [this](int timer_id)
|
||||||
|
{
|
||||||
|
if (parent->cli->st_cli.pool_config.find(new_id) != parent->cli->st_cli.pool_config.end())
|
||||||
|
{
|
||||||
|
auto & pool_cfg = parent->cli->st_cli.pool_config[new_id];
|
||||||
|
create_check.passed = pool_cfg.real_pg_count > 0;
|
||||||
|
for (auto pg_it = pool_cfg.pg_config.begin(); pg_it != pool_cfg.pg_config.end(); pg_it++)
|
||||||
|
{
|
||||||
|
if (!(pg_it->second.cur_state & PG_ACTIVE))
|
||||||
|
{
|
||||||
|
create_check.passed = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (create_check.passed)
|
||||||
|
create_check.retries = 0;
|
||||||
|
}
|
||||||
|
parent->waiting--;
|
||||||
|
parent->ringloop->wakeup();
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!create_check.passed)
|
||||||
|
{
|
||||||
|
result = (cli_result_t) {
|
||||||
|
.err = EAGAIN,
|
||||||
|
.text = "Pool "+cfg["name"].string_value()+" was created, but failed to become active."
|
||||||
|
" This may indicate that cluster state has changed while the pool was being created."
|
||||||
|
" Please check the current state and adjust the pool configuration if necessary.",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Pool "+cfg["name"].string_value()+" created",
|
||||||
|
.data = new_pools[std::to_string(new_id)],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
state = 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a JSON object of form {"nodes": {...}, "osds": [...]} that
|
||||||
|
// contains: all nodes (osds, hosts, ...) based on node_placement config
|
||||||
|
// and current peer state, and a list of active peer osds.
|
||||||
|
json11::Json get_state_node_tree(json11::Json::object node_placement)
|
||||||
|
{
|
||||||
|
// Erase non-peer osd nodes from node_placement
|
||||||
|
for (auto np_it = node_placement.begin(); np_it != node_placement.end();)
|
||||||
|
{
|
||||||
|
// Numeric nodes are osds
|
||||||
|
osd_num_t osd_num = stoull_full(np_it->first);
|
||||||
|
|
||||||
|
// If node is osd and it is not in peer states, erase it
|
||||||
|
if (osd_num > 0 &&
|
||||||
|
parent->cli->st_cli.peer_states.find(osd_num) == parent->cli->st_cli.peer_states.end())
|
||||||
|
{
|
||||||
|
node_placement.erase(np_it++);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
np_it++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of peer osds
|
||||||
|
std::vector<std::string> peer_osds;
|
||||||
|
|
||||||
|
// Record peer osds and add missing osds/hosts to np
|
||||||
|
for (auto & ps: parent->cli->st_cli.peer_states)
|
||||||
|
{
|
||||||
|
std::string osd_num = std::to_string(ps.first);
|
||||||
|
|
||||||
|
// Record peer osd
|
||||||
|
peer_osds.push_back(osd_num);
|
||||||
|
|
||||||
|
// Add osd, if necessary
|
||||||
|
if (node_placement.find(osd_num) == node_placement.end())
|
||||||
|
{
|
||||||
|
std::string osd_host = ps.second["host"].as_string();
|
||||||
|
|
||||||
|
// Add host, if necessary
|
||||||
|
if (node_placement.find(osd_host) == node_placement.end())
|
||||||
|
{
|
||||||
|
node_placement[osd_host] = json11::Json::object {
|
||||||
|
{ "level", "host" }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
node_placement[osd_num] = json11::Json::object {
|
||||||
|
{ "parent", osd_host }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json11::Json::object { { "osds", peer_osds }, { "nodes", node_placement } };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns new state_node_tree based on given state_node_tree with osds
|
||||||
|
// filtered out by tags in given osd_configs and current pool config.
|
||||||
|
// Requires: state_node_tree["osds"] must match osd_configs 1-1
|
||||||
|
json11::Json filter_state_node_tree_by_tags(const json11::Json & state_node_tree, std::vector<json11::Json> & osd_configs)
|
||||||
|
{
|
||||||
|
auto & osds = state_node_tree["osds"].array_items();
|
||||||
|
|
||||||
|
// Accepted state_node_tree nodes
|
||||||
|
auto accepted_nodes = state_node_tree["nodes"].object_items();
|
||||||
|
|
||||||
|
// List of accepted osds
|
||||||
|
std::vector<std::string> accepted_osds;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < osd_configs.size(); i++)
|
||||||
|
{
|
||||||
|
auto & oc = osd_configs[i].object_items();
|
||||||
|
|
||||||
|
// Get osd number
|
||||||
|
auto osd_num = osds[i].as_string();
|
||||||
|
|
||||||
|
// We need tags in config to check against pool tags
|
||||||
|
if (oc.find("tags") == oc.end())
|
||||||
|
{
|
||||||
|
// Exclude osd from state_node_tree nodes
|
||||||
|
accepted_nodes.erase(osd_num);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// If all pool tags are in osd tags, accept osd
|
||||||
|
if (all_in_tags(osd_configs[i]["tags"], cfg["osd_tags"]))
|
||||||
|
{
|
||||||
|
accepted_osds.push_back(osd_num);
|
||||||
|
}
|
||||||
|
// Otherwise, exclude osd
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Exclude osd from state_node_tree nodes
|
||||||
|
accepted_nodes.erase(osd_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json11::Json::object { { "osds", accepted_osds }, { "nodes", accepted_nodes } };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns new state_node_tree based on given state_node_tree with osds
|
||||||
|
// filtered out by stats parameters (block_size, bitmap_granularity) in
|
||||||
|
// given osd_stats and current pool config.
|
||||||
|
// Requires: state_node_tree["osds"] must match osd_stats 1-1
|
||||||
|
json11::Json filter_state_node_tree_by_stats(const json11::Json & state_node_tree, std::vector<json11::Json> & osd_stats)
|
||||||
|
{
|
||||||
|
auto & osds = state_node_tree["osds"].array_items();
|
||||||
|
|
||||||
|
// Accepted state_node_tree nodes
|
||||||
|
auto accepted_nodes = state_node_tree["nodes"].object_items();
|
||||||
|
|
||||||
|
// List of accepted osds
|
||||||
|
std::vector<std::string> accepted_osds;
|
||||||
|
|
||||||
|
uint64_t p_block_size = cfg["block_size"].uint64_value()
|
||||||
|
? cfg["block_size"].uint64_value()
|
||||||
|
: parent->cli->st_cli.global_block_size;
|
||||||
|
uint64_t p_bitmap_granularity = cfg["bitmap_granularity"].uint64_value()
|
||||||
|
? cfg["bitmap_granularity"].uint64_value()
|
||||||
|
: parent->cli->st_cli.global_bitmap_granularity;
|
||||||
|
uint32_t p_immediate_commit = cfg["immediate_commit"].is_string()
|
||||||
|
? etcd_state_client_t::parse_immediate_commit(cfg["immediate_commit"].string_value())
|
||||||
|
: parent->cli->st_cli.global_immediate_commit;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < osd_stats.size(); i++)
|
||||||
|
{
|
||||||
|
auto & os = osd_stats[i];
|
||||||
|
// Get osd number
|
||||||
|
auto osd_num = osds[i].as_string();
|
||||||
|
if (!os["data_block_size"].is_null() && os["data_block_size"] != p_block_size ||
|
||||||
|
!os["bitmap_granularity"].is_null() && os["bitmap_granularity"] != p_bitmap_granularity ||
|
||||||
|
!os["immediate_commit"].is_null() &&
|
||||||
|
etcd_state_client_t::parse_immediate_commit(os["immediate_commit"].string_value()) < p_immediate_commit)
|
||||||
|
{
|
||||||
|
accepted_nodes.erase(osd_num);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
accepted_osds.push_back(osd_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json11::Json::object { { "osds", accepted_osds }, { "nodes", accepted_nodes } };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns maximum pg_size possible for given node_tree and failure_domain, starting at parent_node
|
||||||
|
uint64_t get_max_pg_size(json11::Json::object node_tree, const std::string & level, const std::string & parent_node)
|
||||||
|
{
|
||||||
|
uint64_t max_pg_sz = 0;
|
||||||
|
|
||||||
|
std::vector<std::string> nodes;
|
||||||
|
|
||||||
|
// Check if parent node is an osd (numeric)
|
||||||
|
if (parent_node != "" && stoull_full(parent_node))
|
||||||
|
{
|
||||||
|
// Add it to node list if osd is in node tree
|
||||||
|
if (node_tree.find(parent_node) != node_tree.end())
|
||||||
|
nodes.push_back(parent_node);
|
||||||
|
}
|
||||||
|
// If parent node given, ...
|
||||||
|
else if (parent_node != "")
|
||||||
|
{
|
||||||
|
// ... look for children nodes of this parent
|
||||||
|
for (auto & sn: node_tree)
|
||||||
|
{
|
||||||
|
auto & props = sn.second.object_items();
|
||||||
|
|
||||||
|
auto parent_prop = props.find("parent");
|
||||||
|
if (parent_prop != props.end() && (parent_prop->second.as_string() == parent_node))
|
||||||
|
{
|
||||||
|
nodes.push_back(sn.first);
|
||||||
|
|
||||||
|
// If we're not looking for all osds, we only need a single
|
||||||
|
// child osd node
|
||||||
|
if (level != "osd" && stoull_full(sn.first))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// No parent node given, and we're not looking for all osds
|
||||||
|
else if (level != "osd")
|
||||||
|
{
|
||||||
|
// ... look for all level nodes
|
||||||
|
for (auto & sn: node_tree)
|
||||||
|
{
|
||||||
|
auto & props = sn.second.object_items();
|
||||||
|
|
||||||
|
auto level_prop = props.find("level");
|
||||||
|
if (level_prop != props.end() && (level_prop->second.as_string() == level))
|
||||||
|
{
|
||||||
|
nodes.push_back(sn.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise, ...
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// ... we're looking for osd nodes only
|
||||||
|
for (auto & sn: node_tree)
|
||||||
|
{
|
||||||
|
if (stoull_full(sn.first))
|
||||||
|
{
|
||||||
|
nodes.push_back(sn.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process gathered nodes
|
||||||
|
for (auto & node: nodes)
|
||||||
|
{
|
||||||
|
// Check for osd node, return constant max size
|
||||||
|
if (stoull_full(node))
|
||||||
|
{
|
||||||
|
max_pg_sz += 1;
|
||||||
|
}
|
||||||
|
// Otherwise, ...
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// ... exclude parent node from tree, and ...
|
||||||
|
node_tree.erase(parent_node);
|
||||||
|
|
||||||
|
// ... descend onto the resulting tree
|
||||||
|
max_pg_sz += get_max_pg_size(node_tree, level, node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return max_pg_sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
json11::Json create_pool(const etcd_kv_t & kv)
|
||||||
|
{
|
||||||
|
for (auto & p: kv.value.object_items())
|
||||||
|
{
|
||||||
|
// ID
|
||||||
|
uint64_t pool_id = stoull_full(p.first);
|
||||||
|
new_id = std::max(pool_id+1, new_id);
|
||||||
|
// Name
|
||||||
|
if (p.second["name"].string_value() == cfg["name"].string_value())
|
||||||
|
{
|
||||||
|
return "Pool with name \""+cfg["name"].string_value()+"\" already exists (ID "+std::to_string(pool_id)+")";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
auto res = kv.value.object_items();
|
||||||
|
res[std::to_string(new_id)] = cfg;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks whether tags2 tags are all in tags1 tags
|
||||||
|
bool all_in_tags(json11::Json tags1, json11::Json tags2)
|
||||||
|
{
|
||||||
|
if (!tags2.is_array())
|
||||||
|
{
|
||||||
|
tags2 = json11::Json::array{ tags2.string_value() };
|
||||||
|
}
|
||||||
|
if (!tags1.is_array())
|
||||||
|
{
|
||||||
|
tags1 = json11::Json::array{ tags1.string_value() };
|
||||||
|
}
|
||||||
|
for (auto & tag2: tags2.array_items())
|
||||||
|
{
|
||||||
|
bool found = false;
|
||||||
|
for (auto & tag1: tags1.array_items())
|
||||||
|
{
|
||||||
|
if (tag1 == tag2)
|
||||||
|
{
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!found)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_create(json11::Json cfg)
|
||||||
|
{
|
||||||
|
auto pool_creator = new pool_creator_t();
|
||||||
|
pool_creator->parent = this;
|
||||||
|
pool_creator->cfg = cfg.object_items();
|
||||||
|
pool_creator->force = cfg["force"].bool_value();
|
||||||
|
pool_creator->wait = cfg["wait"].bool_value();
|
||||||
|
return [pool_creator](cli_result_t & result)
|
||||||
|
{
|
||||||
|
pool_creator->loop();
|
||||||
|
if (pool_creator->is_done())
|
||||||
|
{
|
||||||
|
result = pool_creator->result;
|
||||||
|
delete pool_creator;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,721 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include "cli.h"
|
||||||
|
#include "cluster_client.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
#include "pg_states.h"
|
||||||
|
|
||||||
|
// List pools with space statistics
|
||||||
|
// - df - minimal list with % used space
|
||||||
|
// - pool-ls - same but with PG state and recovery %
|
||||||
|
// - pool-ls -l - same but also include I/O statistics
|
||||||
|
// - pool-ls --detail - use list format, include PG states, I/O stats and all pool parameters
|
||||||
|
struct pool_lister_t
|
||||||
|
{
|
||||||
|
cli_tool_t *parent;
|
||||||
|
std::string sort_field;
|
||||||
|
std::set<std::string> only_names;
|
||||||
|
bool reverse = false;
|
||||||
|
int max_count = 0;
|
||||||
|
bool show_recovery = false;
|
||||||
|
bool show_stats = false;
|
||||||
|
bool detailed = false;
|
||||||
|
|
||||||
|
int state = 0;
|
||||||
|
cli_result_t result;
|
||||||
|
std::map<pool_id_t, json11::Json::object> pool_stats;
|
||||||
|
struct io_stats_t
|
||||||
|
{
|
||||||
|
uint64_t count = 0;
|
||||||
|
uint64_t read_iops = 0;
|
||||||
|
uint64_t read_bps = 0;
|
||||||
|
uint64_t read_lat = 0;
|
||||||
|
uint64_t write_iops = 0;
|
||||||
|
uint64_t write_bps = 0;
|
||||||
|
uint64_t write_lat = 0;
|
||||||
|
uint64_t delete_iops = 0;
|
||||||
|
uint64_t delete_bps = 0;
|
||||||
|
uint64_t delete_lat = 0;
|
||||||
|
};
|
||||||
|
struct object_counts_t
|
||||||
|
{
|
||||||
|
uint64_t object_count = 0;
|
||||||
|
uint64_t misplaced_count = 0;
|
||||||
|
uint64_t degraded_count = 0;
|
||||||
|
uint64_t incomplete_count = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool is_done()
|
||||||
|
{
|
||||||
|
return state == 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_pool_stats(int base_state)
|
||||||
|
{
|
||||||
|
if (state == base_state+1)
|
||||||
|
goto resume_1;
|
||||||
|
// Space statistics - pool/stats/<pool>
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/pool/stats/"
|
||||||
|
) },
|
||||||
|
{ "range_end", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/pool/stats0"
|
||||||
|
) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/osd/stats/"
|
||||||
|
) },
|
||||||
|
{ "range_end", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/osd/stats0"
|
||||||
|
) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/config/pools"
|
||||||
|
) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = base_state+1;
|
||||||
|
resume_1:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto space_info = parent->etcd_result;
|
||||||
|
auto config_pools = space_info["responses"][2]["response_range"]["kvs"][0];
|
||||||
|
if (!config_pools.is_null())
|
||||||
|
{
|
||||||
|
config_pools = parent->cli->st_cli.parse_etcd_kv(config_pools).value;
|
||||||
|
}
|
||||||
|
for (auto & kv_item: space_info["responses"][0]["response_range"]["kvs"].array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
||||||
|
// pool ID
|
||||||
|
pool_id_t pool_id;
|
||||||
|
char null_byte = 0;
|
||||||
|
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/pool/stats/%u%c", &pool_id, &null_byte);
|
||||||
|
if (scanned != 1 || !pool_id || pool_id >= POOL_ID_MAX)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// pool/stats/<N>
|
||||||
|
pool_stats[pool_id] = kv.value.object_items();
|
||||||
|
}
|
||||||
|
std::map<pool_id_t, uint64_t> osd_free;
|
||||||
|
for (auto & kv_item: space_info["responses"][1]["response_range"]["kvs"].array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
||||||
|
// osd ID
|
||||||
|
osd_num_t osd_num;
|
||||||
|
char null_byte = 0;
|
||||||
|
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%ju%c", &osd_num, &null_byte);
|
||||||
|
if (scanned != 1 || !osd_num || osd_num >= POOL_ID_MAX)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// osd/stats/<N>::free
|
||||||
|
osd_free[osd_num] = kv.value["free"].uint64_value();
|
||||||
|
}
|
||||||
|
// Calculate max_avail for each pool
|
||||||
|
for (auto & pp: parent->cli->st_cli.pool_config)
|
||||||
|
{
|
||||||
|
auto & pool_cfg = pp.second;
|
||||||
|
uint64_t pool_avail = UINT64_MAX;
|
||||||
|
std::map<osd_num_t, uint64_t> pg_per_osd;
|
||||||
|
bool active = pool_cfg.real_pg_count > 0;
|
||||||
|
uint64_t pg_states = 0;
|
||||||
|
for (auto & pgp: pool_cfg.pg_config)
|
||||||
|
{
|
||||||
|
if (!(pgp.second.cur_state & PG_ACTIVE))
|
||||||
|
{
|
||||||
|
active = false;
|
||||||
|
}
|
||||||
|
pg_states |= pgp.second.cur_state;
|
||||||
|
for (auto pg_osd: pgp.second.target_set)
|
||||||
|
{
|
||||||
|
if (pg_osd != 0)
|
||||||
|
{
|
||||||
|
pg_per_osd[pg_osd]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto pg_per_pair: pg_per_osd)
|
||||||
|
{
|
||||||
|
uint64_t pg_free = osd_free[pg_per_pair.first] * pool_cfg.real_pg_count / pg_per_pair.second;
|
||||||
|
if (pool_avail > pg_free)
|
||||||
|
{
|
||||||
|
pool_avail = pg_free;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pool_avail == UINT64_MAX)
|
||||||
|
{
|
||||||
|
pool_avail = 0;
|
||||||
|
}
|
||||||
|
if (pool_cfg.scheme != POOL_SCHEME_REPLICATED)
|
||||||
|
{
|
||||||
|
pool_avail *= (pool_cfg.pg_size - pool_cfg.parity_chunks);
|
||||||
|
}
|
||||||
|
// incomplete > has_incomplete > degraded > has_degraded > has_misplaced
|
||||||
|
std::string status;
|
||||||
|
if (!active)
|
||||||
|
status = "inactive";
|
||||||
|
else if (pg_states & PG_INCOMPLETE)
|
||||||
|
status = "incomplete";
|
||||||
|
else if (pg_states & PG_HAS_INCOMPLETE)
|
||||||
|
status = "has_incomplete";
|
||||||
|
else if (pg_states & PG_DEGRADED)
|
||||||
|
status = "degraded";
|
||||||
|
else if (pg_states & PG_HAS_DEGRADED)
|
||||||
|
status = "has_degraded";
|
||||||
|
else if (pg_states & PG_HAS_MISPLACED)
|
||||||
|
status = "has_misplaced";
|
||||||
|
else
|
||||||
|
status = "active";
|
||||||
|
pool_stats[pool_cfg.id] = json11::Json::object {
|
||||||
|
{ "id", (uint64_t)pool_cfg.id },
|
||||||
|
{ "name", pool_cfg.name },
|
||||||
|
{ "status", status },
|
||||||
|
{ "pg_count", pool_cfg.pg_count },
|
||||||
|
{ "real_pg_count", pool_cfg.real_pg_count },
|
||||||
|
{ "scheme_name", pool_cfg.scheme == POOL_SCHEME_REPLICATED
|
||||||
|
? std::to_string(pool_cfg.pg_size)+"/"+std::to_string(pool_cfg.pg_minsize)
|
||||||
|
: "EC "+std::to_string(pool_cfg.pg_size-pool_cfg.parity_chunks)+"+"+std::to_string(pool_cfg.parity_chunks) },
|
||||||
|
{ "used_raw", (uint64_t)(pool_stats[pool_cfg.id]["used_raw_tb"].number_value() * ((uint64_t)1<<40)) },
|
||||||
|
{ "total_raw", (uint64_t)(pool_stats[pool_cfg.id]["total_raw_tb"].number_value() * ((uint64_t)1<<40)) },
|
||||||
|
{ "max_available", pool_avail },
|
||||||
|
{ "raw_to_usable", pool_stats[pool_cfg.id]["raw_to_usable"].number_value() },
|
||||||
|
{ "space_efficiency", pool_stats[pool_cfg.id]["space_efficiency"].number_value() },
|
||||||
|
{ "pg_real_size", pool_stats[pool_cfg.id]["pg_real_size"].uint64_value() },
|
||||||
|
{ "osd_count", pg_per_osd.size() },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// Include full pool config
|
||||||
|
for (auto & pp: config_pools.object_items())
|
||||||
|
{
|
||||||
|
if (!pp.second.is_object())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto pool_id = stoull_full(pp.first);
|
||||||
|
auto & st = pool_stats[pool_id];
|
||||||
|
for (auto & kv: pp.second.object_items())
|
||||||
|
{
|
||||||
|
if (st.find(kv.first) == st.end())
|
||||||
|
st[kv.first] = kv.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_pg_stats(int base_state)
|
||||||
|
{
|
||||||
|
if (state == base_state+1)
|
||||||
|
goto resume_1;
|
||||||
|
// Space statistics - pool/stats/<pool>
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/pg/stats/"
|
||||||
|
) },
|
||||||
|
{ "range_end", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/pg/stats0"
|
||||||
|
) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = base_state+1;
|
||||||
|
resume_1:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto pg_stats = parent->etcd_result["responses"][0]["response_range"]["kvs"];
|
||||||
|
// Calculate recovery percent
|
||||||
|
std::map<pool_id_t, object_counts_t> counts;
|
||||||
|
for (auto & kv_item: pg_stats.array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
||||||
|
// pool ID & pg number
|
||||||
|
pool_id_t pool_id;
|
||||||
|
pg_num_t pg_num = 0;
|
||||||
|
char null_byte = 0;
|
||||||
|
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
|
||||||
|
"/pg/stats/%u/%u%c", &pool_id, &pg_num, &null_byte);
|
||||||
|
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto & cnt = counts[pool_id];
|
||||||
|
cnt.object_count += kv.value["object_count"].uint64_value();
|
||||||
|
cnt.misplaced_count += kv.value["misplaced_count"].uint64_value();
|
||||||
|
cnt.degraded_count += kv.value["degraded_count"].uint64_value();
|
||||||
|
cnt.incomplete_count += kv.value["incomplete_count"].uint64_value();
|
||||||
|
}
|
||||||
|
for (auto & pp: pool_stats)
|
||||||
|
{
|
||||||
|
auto & cnt = counts[pp.first];
|
||||||
|
auto & st = pp.second;
|
||||||
|
st["object_count"] = cnt.object_count;
|
||||||
|
st["misplaced_count"] = cnt.misplaced_count;
|
||||||
|
st["degraded_count"] = cnt.degraded_count;
|
||||||
|
st["incomplete_count"] = cnt.incomplete_count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_inode_stats(int base_state)
|
||||||
|
{
|
||||||
|
if (state == base_state+1)
|
||||||
|
goto resume_1;
|
||||||
|
// Space statistics - pool/stats/<pool>
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/inode/stats/"
|
||||||
|
) },
|
||||||
|
{ "range_end", base64_encode(
|
||||||
|
parent->cli->st_cli.etcd_prefix+"/inode/stats0"
|
||||||
|
) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = base_state+1;
|
||||||
|
resume_1:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto inode_stats = parent->etcd_result["responses"][0]["response_range"]["kvs"];
|
||||||
|
// Performance statistics
|
||||||
|
std::map<pool_id_t, io_stats_t> pool_io;
|
||||||
|
for (auto & kv_item: inode_stats.array_items())
|
||||||
|
{
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(kv_item);
|
||||||
|
// pool ID & inode number
|
||||||
|
pool_id_t pool_id;
|
||||||
|
inode_t only_inode_num;
|
||||||
|
char null_byte = 0;
|
||||||
|
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
|
||||||
|
"/inode/stats/%u/%ju%c", &pool_id, &only_inode_num, &null_byte);
|
||||||
|
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX || INODE_POOL(only_inode_num) != 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto & io = pool_io[pool_id];
|
||||||
|
io.read_iops += kv.value["read"]["iops"].uint64_value();
|
||||||
|
io.read_bps += kv.value["read"]["bps"].uint64_value();
|
||||||
|
io.read_lat += kv.value["read"]["lat"].uint64_value();
|
||||||
|
io.write_iops += kv.value["write"]["iops"].uint64_value();
|
||||||
|
io.write_bps += kv.value["write"]["bps"].uint64_value();
|
||||||
|
io.write_lat += kv.value["write"]["lat"].uint64_value();
|
||||||
|
io.delete_iops += kv.value["delete"]["iops"].uint64_value();
|
||||||
|
io.delete_bps += kv.value["delete"]["bps"].uint64_value();
|
||||||
|
io.delete_lat += kv.value["delete"]["lat"].uint64_value();
|
||||||
|
io.count++;
|
||||||
|
}
|
||||||
|
for (auto & pp: pool_stats)
|
||||||
|
{
|
||||||
|
auto & io = pool_io[pp.first];
|
||||||
|
if (io.count > 0)
|
||||||
|
{
|
||||||
|
io.read_lat /= io.count;
|
||||||
|
io.write_lat /= io.count;
|
||||||
|
io.delete_lat /= io.count;
|
||||||
|
}
|
||||||
|
auto & st = pp.second;
|
||||||
|
st["read_iops"] = io.read_iops;
|
||||||
|
st["read_bps"] = io.read_bps;
|
||||||
|
st["read_lat"] = io.read_lat;
|
||||||
|
st["write_iops"] = io.write_iops;
|
||||||
|
st["write_bps"] = io.write_bps;
|
||||||
|
st["write_lat"] = io.write_lat;
|
||||||
|
st["delete_iops"] = io.delete_iops;
|
||||||
|
st["delete_bps"] = io.delete_bps;
|
||||||
|
st["delete_lat"] = io.delete_lat;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
json11::Json::array to_list()
|
||||||
|
{
|
||||||
|
json11::Json::array list;
|
||||||
|
for (auto & kv: pool_stats)
|
||||||
|
{
|
||||||
|
if (!only_names.size())
|
||||||
|
{
|
||||||
|
list.push_back(kv.second);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for (auto glob: only_names)
|
||||||
|
{
|
||||||
|
if (stupid_glob(kv.second["name"].string_value(), glob))
|
||||||
|
{
|
||||||
|
list.push_back(kv.second);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (sort_field == "name" || sort_field == "scheme" ||
|
||||||
|
sort_field == "scheme_name" || sort_field == "status")
|
||||||
|
{
|
||||||
|
std::sort(list.begin(), list.end(), [this](json11::Json a, json11::Json b)
|
||||||
|
{
|
||||||
|
auto av = a[sort_field].as_string();
|
||||||
|
auto bv = b[sort_field].as_string();
|
||||||
|
return reverse ? av > bv : av < bv;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::sort(list.begin(), list.end(), [this](json11::Json a, json11::Json b)
|
||||||
|
{
|
||||||
|
auto av = a[sort_field].number_value();
|
||||||
|
auto bv = b[sort_field].number_value();
|
||||||
|
return reverse ? av > bv : av < bv;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (max_count > 0 && list.size() > max_count)
|
||||||
|
{
|
||||||
|
list.resize(max_count);
|
||||||
|
}
|
||||||
|
return list;
|
||||||
|
}
|
||||||
|
|
||||||
|
void loop()
|
||||||
|
{
|
||||||
|
if (state == 1)
|
||||||
|
goto resume_1;
|
||||||
|
if (state == 2)
|
||||||
|
goto resume_2;
|
||||||
|
if (state == 3)
|
||||||
|
goto resume_3;
|
||||||
|
if (state == 100)
|
||||||
|
return;
|
||||||
|
show_stats = show_stats || detailed;
|
||||||
|
show_recovery = show_recovery || detailed;
|
||||||
|
resume_1:
|
||||||
|
get_pool_stats(0);
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (show_stats)
|
||||||
|
{
|
||||||
|
resume_2:
|
||||||
|
get_inode_stats(1);
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (show_recovery)
|
||||||
|
{
|
||||||
|
resume_3:
|
||||||
|
get_pg_stats(2);
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (parent->json_output)
|
||||||
|
{
|
||||||
|
// JSON output
|
||||||
|
result.data = to_list();
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
json11::Json::array list;
|
||||||
|
for (auto & kv: pool_stats)
|
||||||
|
{
|
||||||
|
auto & st = kv.second;
|
||||||
|
double raw_to = st["raw_to_usable"].number_value();
|
||||||
|
if (raw_to < 0.000001 && raw_to > -0.000001)
|
||||||
|
raw_to = 1;
|
||||||
|
st["pg_count_fmt"] = st["real_pg_count"] == st["pg_count"]
|
||||||
|
? st["real_pg_count"].as_string()
|
||||||
|
: st["real_pg_count"].as_string()+"->"+st["pg_count"].as_string();
|
||||||
|
st["total_fmt"] = format_size(st["total_raw"].uint64_value() / raw_to);
|
||||||
|
st["used_fmt"] = format_size(st["used_raw"].uint64_value() / raw_to);
|
||||||
|
st["max_avail_fmt"] = format_size(st["max_available"].uint64_value());
|
||||||
|
st["used_pct"] = format_q(st["total_raw"].uint64_value()
|
||||||
|
? (100 - 100*st["max_available"].uint64_value() *
|
||||||
|
st["raw_to_usable"].number_value() / st["total_raw"].uint64_value())
|
||||||
|
: 100)+"%";
|
||||||
|
st["eff_fmt"] = format_q(st["space_efficiency"].number_value()*100)+"%";
|
||||||
|
if (show_stats)
|
||||||
|
{
|
||||||
|
st["read_bw"] = format_size(st["read_bps"].uint64_value())+"/s";
|
||||||
|
st["write_bw"] = format_size(st["write_bps"].uint64_value())+"/s";
|
||||||
|
st["delete_bw"] = format_size(st["delete_bps"].uint64_value())+"/s";
|
||||||
|
st["read_iops"] = format_q(st["read_iops"].number_value());
|
||||||
|
st["write_iops"] = format_q(st["write_iops"].number_value());
|
||||||
|
st["delete_iops"] = format_q(st["delete_iops"].number_value());
|
||||||
|
st["read_lat_f"] = format_lat(st["read_lat"].uint64_value());
|
||||||
|
st["write_lat_f"] = format_lat(st["write_lat"].uint64_value());
|
||||||
|
st["delete_lat_f"] = format_lat(st["delete_lat"].uint64_value());
|
||||||
|
}
|
||||||
|
if (show_recovery)
|
||||||
|
{
|
||||||
|
auto object_count = st["object_count"].uint64_value();
|
||||||
|
auto recovery_pct = 100.0 * (object_count - (st["misplaced_count"].uint64_value() +
|
||||||
|
st["degraded_count"].uint64_value() + st["incomplete_count"].uint64_value())) /
|
||||||
|
(object_count ? object_count : 1);
|
||||||
|
st["recovery_fmt"] = format_q(recovery_pct)+"%";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (detailed)
|
||||||
|
{
|
||||||
|
for (auto & kv: pool_stats)
|
||||||
|
{
|
||||||
|
auto & st = kv.second;
|
||||||
|
auto total = st["object_count"].uint64_value();
|
||||||
|
auto obj_size = st["block_size"].uint64_value();
|
||||||
|
if (!obj_size)
|
||||||
|
obj_size = parent->cli->st_cli.global_block_size;
|
||||||
|
if (st["scheme"] == "ec")
|
||||||
|
obj_size *= st["pg_size"].uint64_value() - st["parity_chunks"].uint64_value();
|
||||||
|
else if (st["scheme"] == "xor")
|
||||||
|
obj_size *= st["pg_size"].uint64_value() - 1;
|
||||||
|
auto n = st["misplaced_count"].uint64_value();
|
||||||
|
if (n > 0)
|
||||||
|
st["misplaced_fmt"] = format_size(n * obj_size) + " / " + format_q(100.0 * n / total);
|
||||||
|
n = st["degraded_count"].uint64_value();
|
||||||
|
if (n > 0)
|
||||||
|
st["degraded_fmt"] = format_size(n * obj_size) + " / " + format_q(100.0 * n / total);
|
||||||
|
n = st["incomplete_count"].uint64_value();
|
||||||
|
if (n > 0)
|
||||||
|
st["incomplete_fmt"] = format_size(n * obj_size) + " / " + format_q(100.0 * n / total);
|
||||||
|
st["read_fmt"] = st["read_bw"].string_value()+", "+st["read_iops"].string_value()+" op/s, "+
|
||||||
|
st["read_lat_f"].string_value()+" lat";
|
||||||
|
st["write_fmt"] = st["write_bw"].string_value()+", "+st["write_iops"].string_value()+" op/s, "+
|
||||||
|
st["write_lat_f"].string_value()+" lat";
|
||||||
|
st["delete_fmt"] = st["delete_bw"].string_value()+", "+st["delete_iops"].string_value()+" op/s, "+
|
||||||
|
st["delete_lat_f"].string_value()+" lat";
|
||||||
|
if (st["scheme"] == "replicated")
|
||||||
|
st["scheme_name"] = "x"+st["pg_size"].as_string();
|
||||||
|
if (st["failure_domain"].string_value() == "")
|
||||||
|
st["failure_domain"] = "host";
|
||||||
|
st["osd_tags_fmt"] = implode(", ", st["osd_tags"]);
|
||||||
|
st["primary_affinity_tags_fmt"] = implode(", ", st["primary_affinity_tags"]);
|
||||||
|
if (st["block_size"].uint64_value())
|
||||||
|
st["block_size_fmt"] = format_size(st["block_size"].uint64_value());
|
||||||
|
if (st["bitmap_granularity"].uint64_value())
|
||||||
|
st["bitmap_granularity_fmt"] = format_size(st["bitmap_granularity"].uint64_value());
|
||||||
|
}
|
||||||
|
// All pool parameters are only displayed in the "detailed" mode
|
||||||
|
// because there's too many of them to show them in table
|
||||||
|
auto cols = std::vector<std::pair<std::string, std::string>>{
|
||||||
|
{ "name", "Name" },
|
||||||
|
{ "id", "ID" },
|
||||||
|
{ "scheme_name", "Scheme" },
|
||||||
|
{ "status", "Status" },
|
||||||
|
{ "pg_count_fmt", "PGs" },
|
||||||
|
{ "pg_minsize", "PG minsize" },
|
||||||
|
{ "failure_domain", "Failure domain" },
|
||||||
|
{ "root_node", "Root node" },
|
||||||
|
{ "osd_tags_fmt", "OSD tags" },
|
||||||
|
{ "primary_affinity_tags_fmt", "Primary affinity" },
|
||||||
|
{ "block_size_fmt", "Block size" },
|
||||||
|
{ "bitmap_granularity_fmt", "Bitmap granularity" },
|
||||||
|
{ "immediate_commit", "Immediate commit" },
|
||||||
|
{ "scrub_interval", "Scrub interval" },
|
||||||
|
{ "pg_stripe_size", "PG stripe size" },
|
||||||
|
{ "max_osd_combinations", "Max OSD combinations" },
|
||||||
|
{ "total_fmt", "Total" },
|
||||||
|
{ "used_fmt", "Used" },
|
||||||
|
{ "max_avail_fmt", "Available" },
|
||||||
|
{ "used_pct", "Used%" },
|
||||||
|
{ "eff_fmt", "Efficiency" },
|
||||||
|
{ "osd_count", "OSD count" },
|
||||||
|
{ "misplaced_fmt", "Misplaced" },
|
||||||
|
{ "degraded_fmt", "Degraded" },
|
||||||
|
{ "incomplete_fmt", "Incomplete" },
|
||||||
|
{ "read_fmt", "Read" },
|
||||||
|
{ "write_fmt", "Write" },
|
||||||
|
{ "delete_fmt", "Delete" },
|
||||||
|
};
|
||||||
|
auto list = to_list();
|
||||||
|
size_t title_len = 0;
|
||||||
|
for (auto & item: list)
|
||||||
|
{
|
||||||
|
title_len = print_detail_title_len(item, cols, title_len);
|
||||||
|
}
|
||||||
|
for (auto & item: list)
|
||||||
|
{
|
||||||
|
if (result.text != "")
|
||||||
|
result.text += "\n";
|
||||||
|
result.text += print_detail(item, cols, title_len, parent->color);
|
||||||
|
}
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Table output: name, scheme_name, pg_count, total, used, max_avail, used%, efficiency
|
||||||
|
json11::Json::array cols;
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "name" },
|
||||||
|
{ "title", "NAME" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "scheme_name" },
|
||||||
|
{ "title", "SCHEME" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "status" },
|
||||||
|
{ "title", "STATUS" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "pg_count_fmt" },
|
||||||
|
{ "title", "PGS" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "total_fmt" },
|
||||||
|
{ "title", "TOTAL" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "used_fmt" },
|
||||||
|
{ "title", "USED" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "max_avail_fmt" },
|
||||||
|
{ "title", "AVAILABLE" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "used_pct" },
|
||||||
|
{ "title", "USED%" },
|
||||||
|
});
|
||||||
|
cols.push_back(json11::Json::object{
|
||||||
|
{ "key", "eff_fmt" },
|
||||||
|
{ "title", "EFFICIENCY" },
|
||||||
|
});
|
||||||
|
if (show_recovery)
|
||||||
|
{
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "recovery_fmt" }, { "title", "RECOVERY" } });
|
||||||
|
}
|
||||||
|
if (show_stats)
|
||||||
|
{
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "read_bw" }, { "title", "READ" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "read_iops" }, { "title", "IOPS" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "read_lat_f" }, { "title", "LAT" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "write_bw" }, { "title", "WRITE" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "write_iops" }, { "title", "IOPS" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "write_lat_f" }, { "title", "LAT" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "delete_bw" }, { "title", "DELETE" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "delete_iops" }, { "title", "IOPS" } });
|
||||||
|
cols.push_back(json11::Json::object{ { "key", "delete_lat_f" }, { "title", "LAT" } });
|
||||||
|
}
|
||||||
|
result.data = to_list();
|
||||||
|
result.text = print_table(result.data, cols, parent->color);
|
||||||
|
state = 100;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
size_t print_detail_title_len(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t prev_len)
|
||||||
|
{
|
||||||
|
size_t title_len = prev_len;
|
||||||
|
for (auto & kv: names)
|
||||||
|
{
|
||||||
|
if (!item[kv.first].is_null() && (!item[kv.first].is_string() || item[kv.first].string_value() != ""))
|
||||||
|
{
|
||||||
|
size_t len = utf8_length(kv.second);
|
||||||
|
title_len = title_len < len ? len : title_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return title_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string print_detail(json11::Json item, std::vector<std::pair<std::string, std::string>> names, size_t title_len, bool use_esc)
|
||||||
|
{
|
||||||
|
std::string str;
|
||||||
|
for (auto & kv: names)
|
||||||
|
{
|
||||||
|
if (!item[kv.first].is_null() && (!item[kv.first].is_string() || item[kv.first].string_value() != ""))
|
||||||
|
{
|
||||||
|
str += kv.second;
|
||||||
|
str += ": ";
|
||||||
|
size_t len = utf8_length(kv.second);
|
||||||
|
for (int j = 0; j < title_len-len; j++)
|
||||||
|
str += ' ';
|
||||||
|
if (use_esc)
|
||||||
|
str += "\033[1m";
|
||||||
|
str += item[kv.first].as_string();
|
||||||
|
if (use_esc)
|
||||||
|
str += "\033[0m";
|
||||||
|
str += "\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_ls(json11::Json cfg)
|
||||||
|
{
|
||||||
|
auto lister = new pool_lister_t();
|
||||||
|
lister->parent = this;
|
||||||
|
lister->show_recovery = cfg["show_recovery"].bool_value();
|
||||||
|
lister->show_stats = cfg["long"].bool_value();
|
||||||
|
lister->detailed = cfg["detail"].bool_value();
|
||||||
|
lister->sort_field = cfg["sort"].string_value();
|
||||||
|
if ((lister->sort_field == "osd_tags") ||
|
||||||
|
(lister->sort_field == "primary_affinity_tags" ))
|
||||||
|
lister->sort_field = lister->sort_field + "_fmt";
|
||||||
|
lister->reverse = cfg["reverse"].bool_value();
|
||||||
|
lister->max_count = cfg["count"].uint64_value();
|
||||||
|
for (auto & item: cfg["names"].array_items())
|
||||||
|
{
|
||||||
|
lister->only_names.insert(item.string_value());
|
||||||
|
}
|
||||||
|
return [lister](cli_result_t & result)
|
||||||
|
{
|
||||||
|
lister->loop();
|
||||||
|
if (lister->is_done())
|
||||||
|
{
|
||||||
|
result = lister->result;
|
||||||
|
delete lister;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string implode(const std::string & sep, json11::Json array)
|
||||||
|
{
|
||||||
|
if (array.is_number() || array.is_bool() || array.is_string())
|
||||||
|
{
|
||||||
|
return array.as_string();
|
||||||
|
}
|
||||||
|
std::string res;
|
||||||
|
bool first = true;
|
||||||
|
for (auto & item: array.array_items())
|
||||||
|
{
|
||||||
|
res += (first ? item.as_string() : sep+item.as_string());
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
// Copyright (c) MIND Software LLC, 2023 (info@mindsw.io)
|
||||||
|
// I accept Vitastor CLA: see CLA-en.md for details
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2024
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include <ctype.h>
|
||||||
|
#include "cli.h"
|
||||||
|
#include "cli_pool_cfg.h"
|
||||||
|
#include "cluster_client.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
|
||||||
|
struct pool_changer_t
|
||||||
|
{
|
||||||
|
cli_tool_t *parent;
|
||||||
|
|
||||||
|
// Required parameters (id/name)
|
||||||
|
pool_id_t pool_id = 0;
|
||||||
|
std::string pool_name;
|
||||||
|
json11::Json::object cfg;
|
||||||
|
json11::Json::object new_cfg;
|
||||||
|
bool force = false;
|
||||||
|
|
||||||
|
json11::Json old_cfg;
|
||||||
|
|
||||||
|
int state = 0;
|
||||||
|
cli_result_t result;
|
||||||
|
|
||||||
|
// Updated pools
|
||||||
|
json11::Json new_pools;
|
||||||
|
|
||||||
|
// Expected pools mod revision
|
||||||
|
uint64_t pools_mod_rev;
|
||||||
|
|
||||||
|
bool is_done() { return state == 100; }
|
||||||
|
|
||||||
|
void loop()
|
||||||
|
{
|
||||||
|
if (state == 1)
|
||||||
|
goto resume_1;
|
||||||
|
else if (state == 2)
|
||||||
|
goto resume_2;
|
||||||
|
pool_id = stoull_full(cfg["old_name"].string_value());
|
||||||
|
if (!pool_id)
|
||||||
|
{
|
||||||
|
pool_name = cfg["old_name"].string_value();
|
||||||
|
if (pool_name == "")
|
||||||
|
{
|
||||||
|
result = (cli_result_t){ .err = ENOENT, .text = "Pool ID or name is required to modify it" };
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resume_0:
|
||||||
|
// Get pools from etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
} }
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 1;
|
||||||
|
resume_1:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Parse received pools from etcd
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
|
||||||
|
|
||||||
|
// Get pool by name or ID
|
||||||
|
old_cfg = json11::Json();
|
||||||
|
if (pool_name != "")
|
||||||
|
{
|
||||||
|
for (auto & pce: kv.value.object_items())
|
||||||
|
{
|
||||||
|
if (pce.second["name"] == pool_name)
|
||||||
|
{
|
||||||
|
pool_id = stoull_full(pce.first);
|
||||||
|
old_cfg = pce.second;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
pool_name = std::to_string(pool_id);
|
||||||
|
old_cfg = kv.value[pool_name];
|
||||||
|
}
|
||||||
|
if (!old_cfg.is_object())
|
||||||
|
{
|
||||||
|
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+pool_name+" does not exist" };
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update pool
|
||||||
|
new_cfg = cfg;
|
||||||
|
result.text = validate_pool_config(new_cfg, old_cfg, parent->cli->st_cli.global_block_size,
|
||||||
|
parent->cli->st_cli.global_bitmap_granularity, force);
|
||||||
|
if (result.text != "")
|
||||||
|
{
|
||||||
|
result.err = EINVAL;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update pool
|
||||||
|
auto pls = kv.value.object_items();
|
||||||
|
pls[std::to_string(pool_id)] = new_cfg;
|
||||||
|
new_pools = pls;
|
||||||
|
|
||||||
|
// Expected pools mod revision
|
||||||
|
pools_mod_rev = kv.mod_revision;
|
||||||
|
}
|
||||||
|
// Update pools in etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "compare", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "target", "MOD" },
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "result", "LESS" },
|
||||||
|
{ "mod_revision", pools_mod_rev+1 },
|
||||||
|
}
|
||||||
|
} },
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_put", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "value", base64_encode(new_pools.dump()) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 2;
|
||||||
|
resume_2:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!parent->etcd_result["succeeded"].bool_value())
|
||||||
|
{
|
||||||
|
// CAS failure - retry
|
||||||
|
fprintf(stderr, "Warning: pool configuration was modified in the meantime by someone else\n");
|
||||||
|
goto resume_0;
|
||||||
|
}
|
||||||
|
// Successfully updated pool
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Pool "+pool_name+" updated",
|
||||||
|
.data = new_pools,
|
||||||
|
};
|
||||||
|
state = 100;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_modify(json11::Json cfg)
|
||||||
|
{
|
||||||
|
auto pool_changer = new pool_changer_t();
|
||||||
|
pool_changer->parent = this;
|
||||||
|
pool_changer->cfg = cfg.object_items();
|
||||||
|
pool_changer->force = cfg["force"].bool_value();
|
||||||
|
return [pool_changer](cli_result_t & result)
|
||||||
|
{
|
||||||
|
pool_changer->loop();
|
||||||
|
if (pool_changer->is_done())
|
||||||
|
{
|
||||||
|
result = pool_changer->result;
|
||||||
|
delete pool_changer;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,226 @@
|
||||||
|
// Copyright (c) MIND Software LLC, 2023 (info@mindsw.io)
|
||||||
|
// I accept Vitastor CLA: see CLA-en.md for details
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2024
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
|
||||||
|
#include <ctype.h>
|
||||||
|
#include "cli.h"
|
||||||
|
#include "cluster_client.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
|
||||||
|
struct pool_remover_t
|
||||||
|
{
|
||||||
|
cli_tool_t *parent;
|
||||||
|
|
||||||
|
// Required parameters (id/name)
|
||||||
|
|
||||||
|
pool_id_t pool_id = 0;
|
||||||
|
std::string pool_name;
|
||||||
|
|
||||||
|
// Force removal
|
||||||
|
bool force;
|
||||||
|
|
||||||
|
int state = 0;
|
||||||
|
cli_result_t result;
|
||||||
|
|
||||||
|
// Is pool valid?
|
||||||
|
bool pool_valid = false;
|
||||||
|
|
||||||
|
// Updated pools
|
||||||
|
json11::Json new_pools;
|
||||||
|
|
||||||
|
// Expected pools mod revision
|
||||||
|
uint64_t pools_mod_rev;
|
||||||
|
|
||||||
|
bool is_done() { return state == 100; }
|
||||||
|
|
||||||
|
void loop()
|
||||||
|
{
|
||||||
|
if (state == 1)
|
||||||
|
goto resume_1;
|
||||||
|
else if (state == 2)
|
||||||
|
goto resume_2;
|
||||||
|
else if (state == 3)
|
||||||
|
goto resume_3;
|
||||||
|
|
||||||
|
// Pool name (or id) required
|
||||||
|
if (!pool_id && pool_name == "")
|
||||||
|
{
|
||||||
|
result = (cli_result_t){ .err = EINVAL, .text = "Pool name or id must be given" };
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate pool name/id
|
||||||
|
|
||||||
|
// Get pool id by name (if name given)
|
||||||
|
if (pool_name != "")
|
||||||
|
{
|
||||||
|
for (auto & ic: parent->cli->st_cli.pool_config)
|
||||||
|
{
|
||||||
|
if (ic.second.name == pool_name)
|
||||||
|
{
|
||||||
|
pool_id = ic.first;
|
||||||
|
pool_valid = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise, check if given pool id is valid
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Set pool name from id (for easier logging)
|
||||||
|
pool_name = "id " + std::to_string(pool_id);
|
||||||
|
|
||||||
|
// Look-up pool id in pool_config
|
||||||
|
if (parent->cli->st_cli.pool_config.find(pool_id) != parent->cli->st_cli.pool_config.end())
|
||||||
|
{
|
||||||
|
pool_valid = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need a valid pool to proceed
|
||||||
|
if (!pool_valid)
|
||||||
|
{
|
||||||
|
result = (cli_result_t){ .err = ENOENT, .text = "Pool "+pool_name+" does not exist" };
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unless forced, check if pool has associated Images/Snapshots
|
||||||
|
if (!force)
|
||||||
|
{
|
||||||
|
std::string images;
|
||||||
|
|
||||||
|
for (auto & ic: parent->cli->st_cli.inode_config)
|
||||||
|
{
|
||||||
|
if (pool_id && INODE_POOL(ic.second.num) != pool_id)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
images += ((images != "") ? ", " : "") + ic.second.name;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (images != "")
|
||||||
|
{
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = ENOTEMPTY,
|
||||||
|
.text =
|
||||||
|
"Pool "+pool_name+" cannot be removed as it still has the following "
|
||||||
|
"images/snapshots associated with it: "+images
|
||||||
|
};
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proceed to deleting the pool
|
||||||
|
state = 1;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
resume_1:
|
||||||
|
// Get pools from etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_range", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
} }
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 2;
|
||||||
|
resume_2:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Parse received pools from etcd
|
||||||
|
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
|
||||||
|
|
||||||
|
// Remove pool
|
||||||
|
auto p = kv.value.object_items();
|
||||||
|
if (p.erase(std::to_string(pool_id)) != 1)
|
||||||
|
{
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = ENOENT,
|
||||||
|
.text = "Failed to erase pool "+pool_name+" from: "+kv.value.string_value()
|
||||||
|
};
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record updated pools
|
||||||
|
new_pools = p;
|
||||||
|
|
||||||
|
// Expected pools mod revision
|
||||||
|
pools_mod_rev = kv.mod_revision;
|
||||||
|
}
|
||||||
|
// Update pools in etcd
|
||||||
|
parent->etcd_txn(json11::Json::object {
|
||||||
|
{ "compare", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "target", "MOD" },
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "result", "LESS" },
|
||||||
|
{ "mod_revision", pools_mod_rev+1 },
|
||||||
|
}
|
||||||
|
} },
|
||||||
|
{ "success", json11::Json::array {
|
||||||
|
json11::Json::object {
|
||||||
|
{ "request_put", json11::Json::object {
|
||||||
|
{ "key", base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pools") },
|
||||||
|
{ "value", base64_encode(new_pools.dump()) },
|
||||||
|
} },
|
||||||
|
},
|
||||||
|
} },
|
||||||
|
});
|
||||||
|
state = 3;
|
||||||
|
resume_3:
|
||||||
|
if (parent->waiting > 0)
|
||||||
|
return;
|
||||||
|
if (parent->etcd_err.err)
|
||||||
|
{
|
||||||
|
result = parent->etcd_err;
|
||||||
|
state = 100;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} while (!parent->etcd_result["succeeded"].bool_value());
|
||||||
|
|
||||||
|
// Successfully deleted pool
|
||||||
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
|
.text = "Pool "+pool_name+" deleted",
|
||||||
|
.data = new_pools
|
||||||
|
};
|
||||||
|
state = 100;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::function<bool(cli_result_t &)> cli_tool_t::start_pool_rm(json11::Json cfg)
|
||||||
|
{
|
||||||
|
auto pool_remover = new pool_remover_t();
|
||||||
|
pool_remover->parent = this;
|
||||||
|
|
||||||
|
pool_remover->pool_id = cfg["pool"].uint64_value();
|
||||||
|
pool_remover->pool_name = pool_remover->pool_id ? "" : cfg["pool"].as_string();
|
||||||
|
|
||||||
|
pool_remover->force = !cfg["force"].is_null();
|
||||||
|
|
||||||
|
return [pool_remover](cli_result_t & result)
|
||||||
|
{
|
||||||
|
pool_remover->loop();
|
||||||
|
if (pool_remover->is_done())
|
||||||
|
{
|
||||||
|
result = pool_remover->result;
|
||||||
|
delete pool_remover;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
}
|
|
@ -53,6 +53,8 @@ struct snap_remover_t
|
||||||
int use_cas = 1;
|
int use_cas = 1;
|
||||||
// interval between fsyncs
|
// interval between fsyncs
|
||||||
int fsync_interval = 128;
|
int fsync_interval = 128;
|
||||||
|
// ignore deletion errors
|
||||||
|
bool down_ok = false;
|
||||||
|
|
||||||
std::map<inode_t,int> sources;
|
std::map<inode_t,int> sources;
|
||||||
std::map<inode_t,uint64_t> inode_used;
|
std::map<inode_t,uint64_t> inode_used;
|
||||||
|
@ -245,6 +247,7 @@ resume_8:
|
||||||
}
|
}
|
||||||
state = 100;
|
state = 100;
|
||||||
result = (cli_result_t){
|
result = (cli_result_t){
|
||||||
|
.err = 0,
|
||||||
.text = "",
|
.text = "",
|
||||||
.data = my_result(result.data),
|
.data = my_result(result.data),
|
||||||
};
|
};
|
||||||
|
@ -679,6 +682,7 @@ resume_100:
|
||||||
{ "inode", inode },
|
{ "inode", inode },
|
||||||
{ "pool", (uint64_t)INODE_POOL(inode) },
|
{ "pool", (uint64_t)INODE_POOL(inode) },
|
||||||
{ "fsync-interval", fsync_interval },
|
{ "fsync-interval", fsync_interval },
|
||||||
|
{ "down-ok", down_ok },
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -690,6 +694,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_rm(json11::Json cfg)
|
||||||
snap_remover->from_name = cfg["from"].string_value();
|
snap_remover->from_name = cfg["from"].string_value();
|
||||||
snap_remover->to_name = cfg["to"].string_value();
|
snap_remover->to_name = cfg["to"].string_value();
|
||||||
snap_remover->fsync_interval = cfg["fsync_interval"].uint64_value();
|
snap_remover->fsync_interval = cfg["fsync_interval"].uint64_value();
|
||||||
|
snap_remover->down_ok = cfg["down_ok"].bool_value();
|
||||||
if (!snap_remover->fsync_interval)
|
if (!snap_remover->fsync_interval)
|
||||||
snap_remover->fsync_interval = 128;
|
snap_remover->fsync_interval = 128;
|
||||||
if (!cfg["cas"].is_null())
|
if (!cfg["cas"].is_null())
|
||||||
|
|
|
@ -25,6 +25,7 @@ struct rm_inode_t
|
||||||
uint64_t inode = 0;
|
uint64_t inode = 0;
|
||||||
pool_id_t pool_id = 0;
|
pool_id_t pool_id = 0;
|
||||||
uint64_t min_offset = 0;
|
uint64_t min_offset = 0;
|
||||||
|
bool down_ok = false;
|
||||||
|
|
||||||
cli_tool_t *parent = NULL;
|
cli_tool_t *parent = NULL;
|
||||||
inode_list_t *lister = NULL;
|
inode_list_t *lister = NULL;
|
||||||
|
@ -212,7 +213,9 @@ struct rm_inode_t
|
||||||
}
|
}
|
||||||
if (parent->progress && total_count > 0 && total_done*1000/total_count != total_prev_pct)
|
if (parent->progress && total_count > 0 && total_done*1000/total_count != total_prev_pct)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\rRemoved %ju/%ju objects, %ju more PGs to list...", total_done, total_count, pgs_to_list);
|
fprintf(stderr, parent->color
|
||||||
|
? "\rRemoved %ju/%ju objects, %ju more PGs to list..."
|
||||||
|
: "Removed %ju/%ju objects, %ju more PGs to list...\n", total_done, total_count, pgs_to_list);
|
||||||
total_prev_pct = total_done*1000/total_count;
|
total_prev_pct = total_done*1000/total_count;
|
||||||
}
|
}
|
||||||
if (lists_done && !lists.size())
|
if (lists_done && !lists.size())
|
||||||
|
@ -221,17 +224,18 @@ struct rm_inode_t
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
if (parent->progress && (total_done < total_count || inactive_osds.size() > 0 || error_count > 0))
|
bool is_error = (total_done < total_count || inactive_osds.size() > 0 || error_count > 0);
|
||||||
|
if (parent->progress && is_error)
|
||||||
{
|
{
|
||||||
fprintf(
|
fprintf(
|
||||||
stderr, "Warning: Pool:%u,ID:%ju inode data may not have been fully removed.\n"
|
stderr, "Warning: Pool:%u,ID:%ju inode data may not have been fully removed.\n"
|
||||||
" Use `vitastor-cli rm-data --pool %u --inode %ju` if you encounter it in listings.\n",
|
"Use `vitastor-cli rm-data --pool %u --inode %ju` if you encounter it in listings.\n",
|
||||||
pool_id, INODE_NO_POOL(inode), pool_id, INODE_NO_POOL(inode)
|
pool_id, INODE_NO_POOL(inode), pool_id, INODE_NO_POOL(inode)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
result = (cli_result_t){
|
result = (cli_result_t){
|
||||||
.err = error_count > 0 ? EIO : 0,
|
.err = is_error && !down_ok ? EIO : 0,
|
||||||
.text = error_count > 0 ? "Some blocks were not removed" : (
|
.text = is_error ? "Some blocks were not removed" : (
|
||||||
"Done, inode "+std::to_string(INODE_NO_POOL(inode))+" from pool "+
|
"Done, inode "+std::to_string(INODE_NO_POOL(inode))+" from pool "+
|
||||||
std::to_string(pool_id)+" removed"),
|
std::to_string(pool_id)+" removed"),
|
||||||
.data = json11::Json::object {
|
.data = json11::Json::object {
|
||||||
|
@ -280,6 +284,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_rm_data(json11::Json cfg)
|
||||||
{
|
{
|
||||||
remover->inode = (remover->inode & (((uint64_t)1 << (64-POOL_ID_BITS)) - 1)) | (((uint64_t)remover->pool_id) << (64-POOL_ID_BITS));
|
remover->inode = (remover->inode & (((uint64_t)1 << (64-POOL_ID_BITS)) - 1)) | (((uint64_t)remover->pool_id) << (64-POOL_ID_BITS));
|
||||||
}
|
}
|
||||||
|
remover->down_ok = cfg["down_ok"].bool_value();
|
||||||
remover->pool_id = INODE_POOL(remover->inode);
|
remover->pool_id = INODE_POOL(remover->inode);
|
||||||
remover->min_offset = cfg["min_offset"].uint64_value();
|
remover->min_offset = cfg["min_offset"].uint64_value();
|
||||||
return [remover](cli_result_t & result)
|
return [remover](cli_result_t & result)
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
#include "cluster_client_impl.h"
|
#include "cluster_client_impl.h"
|
||||||
#include "http_client.h" // json_is_true
|
#include "http_client.h" // json_is_true
|
||||||
|
|
||||||
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config)
|
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json config)
|
||||||
{
|
{
|
||||||
wb = new writeback_cache_t();
|
wb = new writeback_cache_t();
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ void cluster_client_t::erase_op(cluster_op_t *op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void cluster_client_t::continue_ops(bool up_retry)
|
void cluster_client_t::continue_ops(int time_passed)
|
||||||
{
|
{
|
||||||
if (!pgs_loaded)
|
if (!pgs_loaded)
|
||||||
{
|
{
|
||||||
|
@ -277,22 +277,27 @@ void cluster_client_t::continue_ops(bool up_retry)
|
||||||
// Attempt to reenter the function
|
// Attempt to reenter the function
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
int reset_duration = 0;
|
||||||
restart:
|
restart:
|
||||||
continuing_ops = 1;
|
continuing_ops = 1;
|
||||||
for (auto op = op_queue_head; op; )
|
for (auto op = op_queue_head; op; )
|
||||||
{
|
{
|
||||||
cluster_op_t *next_op = op->next;
|
cluster_op_t *next_op = op->next;
|
||||||
if (!op->up_wait || up_retry)
|
if (op->retry_after && time_passed)
|
||||||
{
|
{
|
||||||
op->up_wait = false;
|
op->retry_after = op->retry_after > time_passed ? op->retry_after-time_passed : 0;
|
||||||
if (!op->prev_wait)
|
if (op->retry_after && (!reset_duration || op->retry_after < reset_duration))
|
||||||
{
|
{
|
||||||
if (op->opcode == OSD_OP_SYNC)
|
reset_duration = op->retry_after;
|
||||||
continue_sync(op);
|
|
||||||
else
|
|
||||||
continue_rw(op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!op->retry_after && !op->prev_wait)
|
||||||
|
{
|
||||||
|
if (op->opcode == OSD_OP_SYNC)
|
||||||
|
continue_sync(op);
|
||||||
|
else
|
||||||
|
continue_rw(op);
|
||||||
|
}
|
||||||
op = next_op;
|
op = next_op;
|
||||||
if (continuing_ops == 2)
|
if (continuing_ops == 2)
|
||||||
{
|
{
|
||||||
|
@ -300,6 +305,27 @@ restart:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continuing_ops = 0;
|
continuing_ops = 0;
|
||||||
|
reset_retry_timer(reset_duration);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cluster_client_t::reset_retry_timer(int new_duration)
|
||||||
|
{
|
||||||
|
if (retry_timeout_duration && retry_timeout_duration <= new_duration || !new_duration)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (retry_timeout_id)
|
||||||
|
{
|
||||||
|
tfd->clear_timer(retry_timeout_id);
|
||||||
|
}
|
||||||
|
retry_timeout_duration = new_duration;
|
||||||
|
retry_timeout_id = tfd->set_timer(retry_timeout_duration, false, [this](int)
|
||||||
|
{
|
||||||
|
int time_passed = retry_timeout_duration;
|
||||||
|
retry_timeout_id = 0;
|
||||||
|
retry_timeout_duration = 0;
|
||||||
|
continue_ops(time_passed);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void cluster_client_t::on_load_config_hook(json11::Json::object & etcd_global_config)
|
void cluster_client_t::on_load_config_hook(json11::Json::object & etcd_global_config)
|
||||||
|
@ -349,15 +375,25 @@ void cluster_client_t::on_load_config_hook(json11::Json::object & etcd_global_co
|
||||||
{
|
{
|
||||||
client_max_writeback_iodepth = DEFAULT_CLIENT_MAX_WRITEBACK_IODEPTH;
|
client_max_writeback_iodepth = DEFAULT_CLIENT_MAX_WRITEBACK_IODEPTH;
|
||||||
}
|
}
|
||||||
// up_wait_retry_interval
|
// client_retry_interval
|
||||||
up_wait_retry_interval = config["up_wait_retry_interval"].uint64_value();
|
client_retry_interval = config["client_retry_interval"].uint64_value();
|
||||||
if (!up_wait_retry_interval)
|
if (!client_retry_interval)
|
||||||
{
|
{
|
||||||
up_wait_retry_interval = 50;
|
client_retry_interval = 50;
|
||||||
}
|
}
|
||||||
else if (up_wait_retry_interval < 10)
|
else if (client_retry_interval < 10)
|
||||||
{
|
{
|
||||||
up_wait_retry_interval = 10;
|
client_retry_interval = 10;
|
||||||
|
}
|
||||||
|
// client_eio_retry_interval
|
||||||
|
client_eio_retry_interval = 1000;
|
||||||
|
if (!config["client_eio_retry_interval"].is_null())
|
||||||
|
{
|
||||||
|
client_eio_retry_interval = config["client_eio_retry_interval"].uint64_value();
|
||||||
|
if (client_eio_retry_interval && client_eio_retry_interval < 10)
|
||||||
|
{
|
||||||
|
client_eio_retry_interval = 10;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// log_level
|
// log_level
|
||||||
log_level = config["log_level"].uint64_value();
|
log_level = config["log_level"].uint64_value();
|
||||||
|
@ -534,7 +570,7 @@ void cluster_client_t::execute_internal(cluster_op_t *op)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (op->opcode == OSD_OP_WRITE && enable_writeback && !(op->flags & OP_FLUSH_BUFFER) &&
|
if (op->opcode == OSD_OP_WRITE && enable_writeback && !(op->flags & OP_FLUSH_BUFFER) &&
|
||||||
!op->version /* FIXME no CAS writeback */)
|
!op->version /* no CAS writeback */)
|
||||||
{
|
{
|
||||||
if (wb->writebacks_active >= client_max_writeback_iodepth)
|
if (wb->writebacks_active >= client_max_writeback_iodepth)
|
||||||
{
|
{
|
||||||
|
@ -555,7 +591,7 @@ void cluster_client_t::execute_internal(cluster_op_t *op)
|
||||||
}
|
}
|
||||||
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT))
|
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT))
|
||||||
{
|
{
|
||||||
if (!(op->flags & OP_FLUSH_BUFFER))
|
if (!(op->flags & OP_FLUSH_BUFFER) && !op->version /* no CAS write-repeat */)
|
||||||
{
|
{
|
||||||
wb->copy_write(op, CACHE_WRITTEN);
|
wb->copy_write(op, CACHE_WRITTEN);
|
||||||
}
|
}
|
||||||
|
@ -716,15 +752,8 @@ resume_1:
|
||||||
// We'll need to retry again
|
// We'll need to retry again
|
||||||
if (op->parts[i].flags & PART_RETRY)
|
if (op->parts[i].flags & PART_RETRY)
|
||||||
{
|
{
|
||||||
op->up_wait = true;
|
op->retry_after = client_retry_interval;
|
||||||
if (!retry_timeout_id)
|
reset_retry_timer(client_retry_interval);
|
||||||
{
|
|
||||||
retry_timeout_id = tfd->set_timer(up_wait_retry_interval, false, [this](int)
|
|
||||||
{
|
|
||||||
retry_timeout_id = 0;
|
|
||||||
continue_ops(true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
op->state = 1;
|
op->state = 1;
|
||||||
}
|
}
|
||||||
|
@ -780,10 +809,9 @@ resume_2:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
else if (op->retval != 0 && !(op->flags & OP_FLUSH_BUFFER) &&
|
else if (op->retval != 0 && !(op->flags & OP_FLUSH_BUFFER) &&
|
||||||
op->retval != -EPIPE && op->retval != -EIO && op->retval != -ENOSPC)
|
op->retval != -EPIPE && (op->retval != -EIO || !client_eio_retry_interval) && op->retval != -ENOSPC)
|
||||||
{
|
{
|
||||||
// Fatal error (neither -EPIPE, -EIO nor -ENOSPC)
|
// Fatal error (neither -EPIPE, -EIO nor -ENOSPC)
|
||||||
// FIXME: Add a parameter to allow to not wait for EIOs (incomplete or corrupted objects) to heal
|
|
||||||
erase_op(op);
|
erase_op(op);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1189,7 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else if (log_level > 0)
|
||||||
{
|
{
|
||||||
fprintf(
|
fprintf(
|
||||||
stderr, "%s operation failed on OSD %ju: retval=%jd (expected %d)\n",
|
stderr, "%s operation failed on OSD %ju: retval=%jd (expected %d)\n",
|
||||||
|
@ -1171,16 +1199,12 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
|
||||||
// All next things like timer, continue_sync/rw and stop_client may affect the operation again
|
// All next things like timer, continue_sync/rw and stop_client may affect the operation again
|
||||||
// So do all these things after modifying operation state, otherwise we may hit reenterability bugs
|
// So do all these things after modifying operation state, otherwise we may hit reenterability bugs
|
||||||
// FIXME postpone such things to set_immediate here to avoid bugs
|
// FIXME postpone such things to set_immediate here to avoid bugs
|
||||||
// Mark op->up_wait = true to retry operation after a short pause (not immediately)
|
// Set op->retry_after to retry operation after a short pause (not immediately)
|
||||||
op->up_wait = true;
|
if (!op->retry_after)
|
||||||
if (!retry_timeout_id)
|
|
||||||
{
|
{
|
||||||
retry_timeout_id = tfd->set_timer(up_wait_retry_interval, false, [this](int)
|
op->retry_after = op->retval == -EIO ? client_eio_retry_interval : client_retry_interval;
|
||||||
{
|
|
||||||
retry_timeout_id = 0;
|
|
||||||
continue_ops(true);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
reset_retry_timer(op->retry_after);
|
||||||
if (op->inflight_count == 0)
|
if (op->inflight_count == 0)
|
||||||
{
|
{
|
||||||
if (op->opcode == OSD_OP_SYNC)
|
if (op->opcode == OSD_OP_SYNC)
|
||||||
|
|
|
@ -59,7 +59,7 @@ protected:
|
||||||
void *buf = NULL;
|
void *buf = NULL;
|
||||||
cluster_op_t *orig_op = NULL;
|
cluster_op_t *orig_op = NULL;
|
||||||
bool needs_reslice = false;
|
bool needs_reslice = false;
|
||||||
bool up_wait = false;
|
int retry_after = 0;
|
||||||
int inflight_count = 0, done_count = 0;
|
int inflight_count = 0, done_count = 0;
|
||||||
std::vector<cluster_op_part_t> parts;
|
std::vector<cluster_op_part_t> parts;
|
||||||
void *part_bitmaps = NULL;
|
void *part_bitmaps = NULL;
|
||||||
|
@ -92,9 +92,11 @@ class cluster_client_t
|
||||||
uint64_t client_max_writeback_iodepth = 0;
|
uint64_t client_max_writeback_iodepth = 0;
|
||||||
|
|
||||||
int log_level = 0;
|
int log_level = 0;
|
||||||
int up_wait_retry_interval = 500; // ms
|
int client_retry_interval = 50; // ms
|
||||||
|
int client_eio_retry_interval = 1000; // ms
|
||||||
|
|
||||||
int retry_timeout_id = 0;
|
int retry_timeout_id = 0;
|
||||||
|
int retry_timeout_duration = 0;
|
||||||
std::vector<cluster_op_t*> offline_ops;
|
std::vector<cluster_op_t*> offline_ops;
|
||||||
cluster_op_t *op_queue_head = NULL, *op_queue_tail = NULL;
|
cluster_op_t *op_queue_head = NULL, *op_queue_tail = NULL;
|
||||||
writeback_cache_t *wb = NULL;
|
writeback_cache_t *wb = NULL;
|
||||||
|
@ -121,7 +123,7 @@ public:
|
||||||
json11::Json::object cli_config, file_config, etcd_global_config;
|
json11::Json::object cli_config, file_config, etcd_global_config;
|
||||||
json11::Json::object config;
|
json11::Json::object config;
|
||||||
|
|
||||||
cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config);
|
cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json config);
|
||||||
~cluster_client_t();
|
~cluster_client_t();
|
||||||
void execute(cluster_op_t *op);
|
void execute(cluster_op_t *op);
|
||||||
void execute_raw(osd_num_t osd_num, osd_op_t *op);
|
void execute_raw(osd_num_t osd_num, osd_op_t *op);
|
||||||
|
@ -131,7 +133,7 @@ public:
|
||||||
|
|
||||||
bool get_immediate_commit(uint64_t inode);
|
bool get_immediate_commit(uint64_t inode);
|
||||||
|
|
||||||
void continue_ops(bool up_retry = false);
|
void continue_ops(int time_passed = 0);
|
||||||
inode_list_t *list_inode_start(inode_t inode,
|
inode_list_t *list_inode_start(inode_t inode,
|
||||||
std::function<void(inode_list_t* lst, std::set<object_id>&& objects, pg_num_t pg_num, osd_num_t primary_osd, int status)> callback);
|
std::function<void(inode_list_t* lst, std::set<object_id>&& objects, pg_num_t pg_num, osd_num_t primary_osd, int status)> callback);
|
||||||
int list_pg_count(inode_list_t *lst);
|
int list_pg_count(inode_list_t *lst);
|
||||||
|
@ -152,6 +154,7 @@ protected:
|
||||||
int continue_rw(cluster_op_t *op);
|
int continue_rw(cluster_op_t *op);
|
||||||
bool check_rw(cluster_op_t *op);
|
bool check_rw(cluster_op_t *op);
|
||||||
void slice_rw(cluster_op_t *op);
|
void slice_rw(cluster_op_t *op);
|
||||||
|
void reset_retry_timer(int new_duration);
|
||||||
bool try_send(cluster_op_t *op, int i);
|
bool try_send(cluster_op_t *op, int i);
|
||||||
int continue_sync(cluster_op_t *op);
|
int continue_sync(cluster_op_t *op);
|
||||||
void send_sync(cluster_op_t *op, cluster_op_part_t *part);
|
void send_sync(cluster_op_t *op, cluster_op_part_t *part);
|
||||||
|
|
|
@ -573,8 +573,7 @@ void etcd_state_client_t::load_global_config()
|
||||||
{
|
{
|
||||||
global_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
|
global_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
|
||||||
}
|
}
|
||||||
global_immediate_commit = global_config["immediate_commit"].string_value() == "all"
|
global_immediate_commit = parse_immediate_commit(global_config["immediate_commit"].string_value());
|
||||||
? IMMEDIATE_ALL : (global_config["immediate_commit"].string_value() == "small" ? IMMEDIATE_SMALL : IMMEDIATE_NONE);
|
|
||||||
on_load_config_hook(global_config);
|
on_load_config_hook(global_config);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -782,13 +781,8 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
|
||||||
// Failure Domain
|
// Failure Domain
|
||||||
pc.failure_domain = pool_item.second["failure_domain"].string_value();
|
pc.failure_domain = pool_item.second["failure_domain"].string_value();
|
||||||
// Coding Scheme
|
// Coding Scheme
|
||||||
if (pool_item.second["scheme"] == "replicated")
|
pc.scheme = parse_scheme(pool_item.second["scheme"].string_value());
|
||||||
pc.scheme = POOL_SCHEME_REPLICATED;
|
if (!pc.scheme)
|
||||||
else if (pool_item.second["scheme"] == "xor")
|
|
||||||
pc.scheme = POOL_SCHEME_XOR;
|
|
||||||
else if (pool_item.second["scheme"] == "ec" || pool_item.second["scheme"] == "jerasure")
|
|
||||||
pc.scheme = POOL_SCHEME_EC;
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Pool %u has invalid coding scheme (one of \"xor\", \"replicated\", \"ec\" or \"jerasure\" required), skipping pool\n", pool_id);
|
fprintf(stderr, "Pool %u has invalid coding scheme (one of \"xor\", \"replicated\", \"ec\" or \"jerasure\" required), skipping pool\n", pool_id);
|
||||||
continue;
|
continue;
|
||||||
|
@ -871,9 +865,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
|
||||||
pc.scrub_interval = 0;
|
pc.scrub_interval = 0;
|
||||||
// Immediate Commit Mode
|
// Immediate Commit Mode
|
||||||
pc.immediate_commit = pool_item.second["immediate_commit"].is_string()
|
pc.immediate_commit = pool_item.second["immediate_commit"].is_string()
|
||||||
? (pool_item.second["immediate_commit"].string_value() == "all"
|
? parse_immediate_commit(pool_item.second["immediate_commit"].string_value())
|
||||||
? IMMEDIATE_ALL : (pool_item.second["immediate_commit"].string_value() == "small"
|
|
||||||
? IMMEDIATE_SMALL : IMMEDIATE_NONE))
|
|
||||||
: global_immediate_commit;
|
: global_immediate_commit;
|
||||||
// PG Stripe Size
|
// PG Stripe Size
|
||||||
pc.pg_stripe_size = pool_item.second["pg_stripe_size"].uint64_value();
|
pc.pg_stripe_size = pool_item.second["pg_stripe_size"].uint64_value();
|
||||||
|
@ -1167,6 +1159,23 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t etcd_state_client_t::parse_immediate_commit(const std::string & immediate_commit_str)
|
||||||
|
{
|
||||||
|
return immediate_commit_str == "all" ? IMMEDIATE_ALL :
|
||||||
|
(immediate_commit_str == "small" ? IMMEDIATE_SMALL : IMMEDIATE_NONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t etcd_state_client_t::parse_scheme(const std::string & scheme)
|
||||||
|
{
|
||||||
|
if (scheme == "replicated")
|
||||||
|
return POOL_SCHEME_REPLICATED;
|
||||||
|
else if (scheme == "xor")
|
||||||
|
return POOL_SCHEME_XOR;
|
||||||
|
else if (scheme == "ec" || scheme == "jerasure")
|
||||||
|
return POOL_SCHEME_EC;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void etcd_state_client_t::insert_inode_config(const inode_config_t & cfg)
|
void etcd_state_client_t::insert_inode_config(const inode_config_t & cfg)
|
||||||
{
|
{
|
||||||
this->inode_config[cfg.num] = cfg;
|
this->inode_config[cfg.num] = cfg;
|
||||||
|
|
|
@ -151,4 +151,7 @@ public:
|
||||||
void close_watch(inode_watch_t* watch);
|
void close_watch(inode_watch_t* watch);
|
||||||
int address_count();
|
int address_count();
|
||||||
~etcd_state_client_t();
|
~etcd_state_client_t();
|
||||||
|
|
||||||
|
static uint32_t parse_immediate_commit(const std::string & immediate_commit_str);
|
||||||
|
static uint32_t parse_scheme(const std::string & scheme_str);
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,401 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
//
|
||||||
|
// Vitastor shared key/value database test CLI
|
||||||
|
|
||||||
|
#define _XOPEN_SOURCE
|
||||||
|
#include <limits.h>
|
||||||
|
|
||||||
|
#include <netinet/tcp.h>
|
||||||
|
#include <sys/epoll.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
//#include <signal.h>
|
||||||
|
|
||||||
|
#include "epoll_manager.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
#include "kv_db.h"
|
||||||
|
|
||||||
|
const char *exe_name = NULL;
|
||||||
|
|
||||||
|
class kv_cli_t
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
kv_dbw_t *db = NULL;
|
||||||
|
ring_loop_t *ringloop = NULL;
|
||||||
|
epoll_manager_t *epmgr = NULL;
|
||||||
|
cluster_client_t *cli = NULL;
|
||||||
|
bool interactive = false;
|
||||||
|
int in_progress = 0;
|
||||||
|
char *cur_cmd = NULL;
|
||||||
|
int cur_cmd_size = 0, cur_cmd_alloc = 0;
|
||||||
|
bool finished = false, eof = false;
|
||||||
|
json11::Json::object cfg;
|
||||||
|
|
||||||
|
~kv_cli_t();
|
||||||
|
|
||||||
|
static json11::Json::object parse_args(int narg, const char *args[]);
|
||||||
|
void run(const json11::Json::object & cfg);
|
||||||
|
void read_cmd();
|
||||||
|
void next_cmd();
|
||||||
|
void handle_cmd(const std::string & cmd, std::function<void()> cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
kv_cli_t::~kv_cli_t()
|
||||||
|
{
|
||||||
|
if (cur_cmd)
|
||||||
|
{
|
||||||
|
free(cur_cmd);
|
||||||
|
cur_cmd = NULL;
|
||||||
|
}
|
||||||
|
cur_cmd_alloc = 0;
|
||||||
|
if (db)
|
||||||
|
delete db;
|
||||||
|
if (cli)
|
||||||
|
{
|
||||||
|
cli->flush();
|
||||||
|
delete cli;
|
||||||
|
}
|
||||||
|
if (epmgr)
|
||||||
|
delete epmgr;
|
||||||
|
if (ringloop)
|
||||||
|
delete ringloop;
|
||||||
|
}
|
||||||
|
|
||||||
|
json11::Json::object kv_cli_t::parse_args(int narg, const char *args[])
|
||||||
|
{
|
||||||
|
json11::Json::object cfg;
|
||||||
|
for (int i = 1; i < narg; i++)
|
||||||
|
{
|
||||||
|
if (!strcmp(args[i], "-h") || !strcmp(args[i], "--help"))
|
||||||
|
{
|
||||||
|
printf(
|
||||||
|
"Vitastor Key/Value CLI\n"
|
||||||
|
"(c) Vitaliy Filippov, 2023+ (VNPL-1.1)\n"
|
||||||
|
"\n"
|
||||||
|
"USAGE: %s [--etcd_address ADDR] [OTHER OPTIONS]\n",
|
||||||
|
exe_name
|
||||||
|
);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||||
|
{
|
||||||
|
const char *opt = args[i]+2;
|
||||||
|
cfg[opt] = !strcmp(opt, "json") || i == narg-1 ? "1" : args[++i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cfg;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_cli_t::run(const json11::Json::object & cfg)
|
||||||
|
{
|
||||||
|
// Create client
|
||||||
|
ringloop = new ring_loop_t(512);
|
||||||
|
epmgr = new epoll_manager_t(ringloop);
|
||||||
|
cli = new cluster_client_t(ringloop, epmgr->tfd, cfg);
|
||||||
|
db = new kv_dbw_t(cli);
|
||||||
|
// Load image metadata
|
||||||
|
while (!cli->is_ready())
|
||||||
|
{
|
||||||
|
ringloop->loop();
|
||||||
|
if (cli->is_ready())
|
||||||
|
break;
|
||||||
|
ringloop->wait();
|
||||||
|
}
|
||||||
|
// Run
|
||||||
|
fcntl(0, F_SETFL, fcntl(0, F_GETFL, 0) | O_NONBLOCK);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
epmgr->tfd->set_fd_handler(0, false, [this](int fd, int events)
|
||||||
|
{
|
||||||
|
if (events & EPOLLIN)
|
||||||
|
{
|
||||||
|
read_cmd();
|
||||||
|
}
|
||||||
|
if (events & EPOLLRDHUP)
|
||||||
|
{
|
||||||
|
epmgr->tfd->set_fd_handler(0, false, NULL);
|
||||||
|
finished = true;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
interactive = true;
|
||||||
|
printf("> ");
|
||||||
|
}
|
||||||
|
catch (std::exception & e)
|
||||||
|
{
|
||||||
|
// Can't add to epoll, STDIN is probably a file
|
||||||
|
read_cmd();
|
||||||
|
}
|
||||||
|
while (!finished)
|
||||||
|
{
|
||||||
|
ringloop->loop();
|
||||||
|
if (!finished)
|
||||||
|
ringloop->wait();
|
||||||
|
}
|
||||||
|
// Destroy the client
|
||||||
|
delete db;
|
||||||
|
db = NULL;
|
||||||
|
cli->flush();
|
||||||
|
delete cli;
|
||||||
|
delete epmgr;
|
||||||
|
delete ringloop;
|
||||||
|
cli = NULL;
|
||||||
|
epmgr = NULL;
|
||||||
|
ringloop = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_cli_t::read_cmd()
|
||||||
|
{
|
||||||
|
if (!cur_cmd_alloc)
|
||||||
|
{
|
||||||
|
cur_cmd_alloc = 65536;
|
||||||
|
cur_cmd = (char*)malloc_or_die(cur_cmd_alloc);
|
||||||
|
}
|
||||||
|
while (cur_cmd_size < cur_cmd_alloc)
|
||||||
|
{
|
||||||
|
int r = read(0, cur_cmd+cur_cmd_size, cur_cmd_alloc-cur_cmd_size);
|
||||||
|
if (r < 0 && errno != EAGAIN)
|
||||||
|
fprintf(stderr, "Error reading from stdin: %s\n", strerror(errno));
|
||||||
|
if (r > 0)
|
||||||
|
cur_cmd_size += r;
|
||||||
|
if (r == 0)
|
||||||
|
eof = true;
|
||||||
|
if (r <= 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next_cmd();
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_cli_t::next_cmd()
|
||||||
|
{
|
||||||
|
if (in_progress > 0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
int pos = 0;
|
||||||
|
for (; pos < cur_cmd_size; pos++)
|
||||||
|
{
|
||||||
|
if (cur_cmd[pos] == '\n' || cur_cmd[pos] == '\r')
|
||||||
|
{
|
||||||
|
auto cmd = trim(std::string(cur_cmd, pos));
|
||||||
|
pos++;
|
||||||
|
memmove(cur_cmd, cur_cmd+pos, cur_cmd_size-pos);
|
||||||
|
cur_cmd_size -= pos;
|
||||||
|
in_progress++;
|
||||||
|
handle_cmd(cmd, [this]()
|
||||||
|
{
|
||||||
|
in_progress--;
|
||||||
|
if (interactive)
|
||||||
|
printf("> ");
|
||||||
|
next_cmd();
|
||||||
|
if (!in_progress)
|
||||||
|
read_cmd();
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (eof && !in_progress)
|
||||||
|
{
|
||||||
|
finished = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_cli_t::handle_cmd(const std::string & cmd, std::function<void()> cb)
|
||||||
|
{
|
||||||
|
if (cmd == "")
|
||||||
|
{
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto pos = cmd.find_first_of(" \t");
|
||||||
|
if (pos != std::string::npos)
|
||||||
|
{
|
||||||
|
while (pos < cmd.size()-1 && (cmd[pos+1] == ' ' || cmd[pos+1] == '\t'))
|
||||||
|
pos++;
|
||||||
|
}
|
||||||
|
auto opname = strtolower(pos == std::string::npos ? cmd : cmd.substr(0, pos));
|
||||||
|
if (opname == "open")
|
||||||
|
{
|
||||||
|
uint64_t pool_id = 0;
|
||||||
|
inode_t inode_id = 0;
|
||||||
|
uint32_t kv_block_size = 0;
|
||||||
|
int scanned = sscanf(cmd.c_str() + pos+1, "%lu %lu %u", &pool_id, &inode_id, &kv_block_size);
|
||||||
|
if (scanned == 2)
|
||||||
|
{
|
||||||
|
kv_block_size = 4096;
|
||||||
|
}
|
||||||
|
if (scanned < 2 || !pool_id || !inode_id || !kv_block_size || (kv_block_size & (kv_block_size-1)) != 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Usage: open <pool_id> <inode_id> [block_size]. Block size must be a power of 2. Default is 4096.\n");
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cfg["kv_block_size"] = (uint64_t)kv_block_size;
|
||||||
|
db->open(INODE_WITH_POOL(pool_id, inode_id), cfg, [=](int res)
|
||||||
|
{
|
||||||
|
if (res < 0)
|
||||||
|
fprintf(stderr, "Error opening index: %s (code %d)\n", strerror(-res), res);
|
||||||
|
else
|
||||||
|
printf("Index opened. Current size: %lu bytes\n", db->get_size());
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else if (opname == "config")
|
||||||
|
{
|
||||||
|
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||||
|
if (pos2 == std::string::npos)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Usage: config <property> <value>\n");
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto key = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||||
|
auto value = parse_size(trim(cmd.substr(pos2+1)));
|
||||||
|
if (key != "kv_memory_limit" &&
|
||||||
|
key != "kv_allocate_blocks" &&
|
||||||
|
key != "kv_evict_max_misses" &&
|
||||||
|
key != "kv_evict_attempts_per_level" &&
|
||||||
|
key != "kv_evict_unused_age" &&
|
||||||
|
key != "kv_log_level")
|
||||||
|
{
|
||||||
|
fprintf(
|
||||||
|
stderr, "Allowed properties: kv_memory_limit, kv_allocate_blocks,"
|
||||||
|
" kv_evict_max_misses, kv_evict_attempts_per_level, kv_evict_unused_age, kv_log_level\n"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
cfg[key] = value;
|
||||||
|
db->set_config(cfg);
|
||||||
|
}
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
else if (opname == "get" || opname == "set" || opname == "del")
|
||||||
|
{
|
||||||
|
if (opname == "get" || opname == "del")
|
||||||
|
{
|
||||||
|
if (pos == std::string::npos)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Usage: %s <key>\n", opname.c_str());
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto key = trim(cmd.substr(pos+1));
|
||||||
|
if (opname == "get")
|
||||||
|
{
|
||||||
|
db->get(key, [this, cb](int res, const std::string & value)
|
||||||
|
{
|
||||||
|
if (res < 0)
|
||||||
|
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
write(1, value.c_str(), value.size());
|
||||||
|
write(1, "\n", 1);
|
||||||
|
}
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
db->del(key, [this, cb](int res)
|
||||||
|
{
|
||||||
|
if (res < 0)
|
||||||
|
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||||
|
else
|
||||||
|
printf("OK\n");
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||||
|
if (pos2 == std::string::npos)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Usage: set <key> <value>\n");
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto key = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||||
|
auto value = trim(cmd.substr(pos2+1));
|
||||||
|
db->set(key, value, [this, cb](int res)
|
||||||
|
{
|
||||||
|
if (res < 0)
|
||||||
|
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||||
|
else
|
||||||
|
printf("OK\n");
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (opname == "list")
|
||||||
|
{
|
||||||
|
std::string start, end;
|
||||||
|
if (pos != std::string::npos)
|
||||||
|
{
|
||||||
|
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||||
|
if (pos2 != std::string::npos)
|
||||||
|
{
|
||||||
|
start = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||||
|
end = trim(cmd.substr(pos2+1));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
start = trim(cmd.substr(pos+1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void *handle = db->list_start(start);
|
||||||
|
db->list_next(handle, [=](int res, const std::string & key, const std::string & value)
|
||||||
|
{
|
||||||
|
if (res < 0)
|
||||||
|
{
|
||||||
|
if (res != -ENOENT)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||||
|
}
|
||||||
|
db->list_close(handle);
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("%s = %s\n", key.c_str(), value.c_str());
|
||||||
|
db->list_next(handle, NULL);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else if (opname == "close")
|
||||||
|
{
|
||||||
|
db->close([=]()
|
||||||
|
{
|
||||||
|
printf("Index closed\n");
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else if (opname == "quit" || opname == "q")
|
||||||
|
{
|
||||||
|
::close(0);
|
||||||
|
finished = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fprintf(
|
||||||
|
stderr, "Unknown operation: %s. Supported operations:\n"
|
||||||
|
"open <pool_id> <inode_id> [block_size]\n"
|
||||||
|
"config <property> <value>\n"
|
||||||
|
"get <key>\nset <key> <value>\ndel <key>\nlist [<start> [end]]\n"
|
||||||
|
"close\nquit\n", opname.c_str()
|
||||||
|
);
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int narg, const char *args[])
|
||||||
|
{
|
||||||
|
setvbuf(stdout, NULL, _IONBF, 0);
|
||||||
|
setvbuf(stderr, NULL, _IONBF, 0);
|
||||||
|
exe_name = args[0];
|
||||||
|
kv_cli_t *p = new kv_cli_t();
|
||||||
|
p->run(kv_cli_t::parse_args(narg, args));
|
||||||
|
delete p;
|
||||||
|
return 0;
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
//
|
||||||
|
// Vitastor shared key/value database
|
||||||
|
// Parallel optimistic B-Tree O:-)
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "cluster_client.h"
|
||||||
|
|
||||||
|
struct kv_db_t;
|
||||||
|
|
||||||
|
struct kv_dbw_t
|
||||||
|
{
|
||||||
|
kv_dbw_t(cluster_client_t *cli);
|
||||||
|
~kv_dbw_t();
|
||||||
|
|
||||||
|
void open(inode_t inode_id, json11::Json cfg, std::function<void(int)> cb);
|
||||||
|
void set_config(json11::Json cfg);
|
||||||
|
void close(std::function<void()> cb);
|
||||||
|
|
||||||
|
uint64_t get_size();
|
||||||
|
|
||||||
|
void get(const std::string & key, std::function<void(int res, const std::string & value)> cb,
|
||||||
|
bool allow_old_cached = false);
|
||||||
|
void set(const std::string & key, const std::string & value, std::function<void(int res)> cb,
|
||||||
|
std::function<bool(int res, const std::string & value)> cas_compare = NULL);
|
||||||
|
void del(const std::string & key, std::function<void(int res)> cb,
|
||||||
|
std::function<bool(int res, const std::string & value)> cas_compare = NULL);
|
||||||
|
|
||||||
|
void* list_start(const std::string & start);
|
||||||
|
void list_next(void *handle, std::function<void(int res, const std::string & key, const std::string & value)> cb);
|
||||||
|
void list_close(void *handle);
|
||||||
|
|
||||||
|
kv_db_t *db;
|
||||||
|
};
|
|
@ -0,0 +1,697 @@
|
||||||
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
|
// License: VNPL-1.1 (see README.md for details)
|
||||||
|
//
|
||||||
|
// Vitastor shared key/value database stress tester / benchmark
|
||||||
|
|
||||||
|
#define _XOPEN_SOURCE
|
||||||
|
#include <limits.h>
|
||||||
|
|
||||||
|
#include <netinet/tcp.h>
|
||||||
|
#include <sys/epoll.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
//#include <signal.h>
|
||||||
|
|
||||||
|
#include "epoll_manager.h"
|
||||||
|
#include "str_util.h"
|
||||||
|
#include "kv_db.h"
|
||||||
|
|
||||||
|
const char *exe_name = NULL;
|
||||||
|
|
||||||
|
struct kv_test_listing_t
|
||||||
|
{
|
||||||
|
uint64_t count = 0, done = 0;
|
||||||
|
void *handle = NULL;
|
||||||
|
std::string next_after;
|
||||||
|
std::set<std::string> inflights;
|
||||||
|
timespec tv_begin;
|
||||||
|
bool error = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kv_test_lat_t
|
||||||
|
{
|
||||||
|
const char *name = NULL;
|
||||||
|
uint64_t usec = 0, count = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kv_test_stat_t
|
||||||
|
{
|
||||||
|
kv_test_lat_t get, add, update, del, list;
|
||||||
|
uint64_t list_keys = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class kv_test_t
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
// Config
|
||||||
|
json11::Json::object kv_cfg;
|
||||||
|
std::string key_prefix, key_suffix;
|
||||||
|
uint64_t inode_id = 0;
|
||||||
|
uint64_t op_count = 1000000;
|
||||||
|
uint64_t runtime_sec = 0;
|
||||||
|
uint64_t parallelism = 4;
|
||||||
|
uint64_t reopen_prob = 1;
|
||||||
|
uint64_t get_prob = 30000;
|
||||||
|
uint64_t add_prob = 20000;
|
||||||
|
uint64_t update_prob = 20000;
|
||||||
|
uint64_t del_prob = 5000;
|
||||||
|
uint64_t list_prob = 300;
|
||||||
|
uint64_t min_key_len = 10;
|
||||||
|
uint64_t max_key_len = 70;
|
||||||
|
uint64_t min_value_len = 50;
|
||||||
|
uint64_t max_value_len = 300;
|
||||||
|
uint64_t min_list_count = 10;
|
||||||
|
uint64_t max_list_count = 1000;
|
||||||
|
uint64_t print_stats_interval = 1;
|
||||||
|
bool json_output = false;
|
||||||
|
uint64_t log_level = 1;
|
||||||
|
bool trace = false;
|
||||||
|
bool stop_on_error = false;
|
||||||
|
// FIXME: Multiple clients
|
||||||
|
kv_test_stat_t stat, prev_stat;
|
||||||
|
timespec prev_stat_time, start_stat_time;
|
||||||
|
|
||||||
|
// State
|
||||||
|
kv_dbw_t *db = NULL;
|
||||||
|
ring_loop_t *ringloop = NULL;
|
||||||
|
epoll_manager_t *epmgr = NULL;
|
||||||
|
cluster_client_t *cli = NULL;
|
||||||
|
ring_consumer_t consumer;
|
||||||
|
bool finished = false;
|
||||||
|
uint64_t total_prob = 0;
|
||||||
|
uint64_t ops_sent = 0, ops_done = 0;
|
||||||
|
int stat_timer_id = -1;
|
||||||
|
int in_progress = 0;
|
||||||
|
bool reopening = false;
|
||||||
|
std::set<kv_test_listing_t*> listings;
|
||||||
|
std::set<std::string> changing_keys;
|
||||||
|
std::map<std::string, std::string> values;
|
||||||
|
|
||||||
|
~kv_test_t();
|
||||||
|
|
||||||
|
static json11::Json::object parse_args(int narg, const char *args[]);
|
||||||
|
void parse_config(json11::Json cfg);
|
||||||
|
void run(json11::Json cfg);
|
||||||
|
void loop();
|
||||||
|
void print_stats(kv_test_stat_t & prev_stat, timespec & prev_stat_time);
|
||||||
|
void print_total_stats();
|
||||||
|
void start_change(const std::string & key);
|
||||||
|
void stop_change(const std::string & key);
|
||||||
|
void add_stat(kv_test_lat_t & stat, timespec tv_begin);
|
||||||
|
};
|
||||||
|
|
||||||
|
kv_test_t::~kv_test_t()
|
||||||
|
{
|
||||||
|
if (db)
|
||||||
|
delete db;
|
||||||
|
if (cli)
|
||||||
|
{
|
||||||
|
cli->flush();
|
||||||
|
delete cli;
|
||||||
|
}
|
||||||
|
if (epmgr)
|
||||||
|
delete epmgr;
|
||||||
|
if (ringloop)
|
||||||
|
delete ringloop;
|
||||||
|
}
|
||||||
|
|
||||||
|
json11::Json::object kv_test_t::parse_args(int narg, const char *args[])
|
||||||
|
{
|
||||||
|
json11::Json::object cfg;
|
||||||
|
for (int i = 1; i < narg; i++)
|
||||||
|
{
|
||||||
|
if (!strcmp(args[i], "-h") || !strcmp(args[i], "--help"))
|
||||||
|
{
|
||||||
|
printf(
|
||||||
|
"Vitastor Key/Value DB stress tester / benchmark\n"
|
||||||
|
"(c) Vitaliy Filippov, 2023+ (VNPL-1.1)\n"
|
||||||
|
"\n"
|
||||||
|
"USAGE: %s --pool_id POOL_ID --inode_id INODE_ID [OPTIONS]\n"
|
||||||
|
" --op_count 1000000\n"
|
||||||
|
" Total operations to run during test. 0 means unlimited\n"
|
||||||
|
" --key_prefix \"\"\n"
|
||||||
|
" Prefix for all keys read or written (to avoid collisions)\n"
|
||||||
|
" --key_suffix \"\"\n"
|
||||||
|
" Suffix for all keys read or written (to avoid collisions, but scan all DB)\n"
|
||||||
|
" --runtime 0\n"
|
||||||
|
" Run for this number of seconds. 0 means unlimited\n"
|
||||||
|
" --parallelism 4\n"
|
||||||
|
" Run this number of operations in parallel\n"
|
||||||
|
" --get_prob 30000\n"
|
||||||
|
" Fraction of key retrieve operations\n"
|
||||||
|
" --add_prob 20000\n"
|
||||||
|
" Fraction of key addition operations\n"
|
||||||
|
" --update_prob 20000\n"
|
||||||
|
" Fraction of key update operations\n"
|
||||||
|
" --del_prob 30000\n"
|
||||||
|
" Fraction of key delete operations\n"
|
||||||
|
" --list_prob 300\n"
|
||||||
|
" Fraction of listing operations\n"
|
||||||
|
" --min_key_len 10\n"
|
||||||
|
" Minimum key size in bytes\n"
|
||||||
|
" --max_key_len 70\n"
|
||||||
|
" Maximum key size in bytes\n"
|
||||||
|
" --min_value_len 50\n"
|
||||||
|
" Minimum value size in bytes\n"
|
||||||
|
" --max_value_len 300\n"
|
||||||
|
" Maximum value size in bytes\n"
|
||||||
|
" --min_list_count 10\n"
|
||||||
|
" Minimum number of keys read in listing (0 = all keys)\n"
|
||||||
|
" --max_list_count 1000\n"
|
||||||
|
" Maximum number of keys read in listing\n"
|
||||||
|
" --print_stats 1\n"
|
||||||
|
" Print operation statistics every this number of seconds\n"
|
||||||
|
" --json\n"
|
||||||
|
" JSON output\n"
|
||||||
|
" --stop_on_error 0\n"
|
||||||
|
" Stop on first execution error, mismatch, lost key or extra key during listing\n"
|
||||||
|
" --kv_memory_limit 128M\n"
|
||||||
|
" Maximum memory to use for vitastor-kv index cache\n"
|
||||||
|
" --kv_allocate_blocks 4\n"
|
||||||
|
" Number of PG blocks used for new tree block allocation in parallel\n"
|
||||||
|
" --kv_evict_max_misses 10\n"
|
||||||
|
" Eviction algorithm parameter: retry eviction from another random spot\n"
|
||||||
|
" if this number of keys is used currently or was used recently\n"
|
||||||
|
" --kv_evict_attempts_per_level 3\n"
|
||||||
|
" Retry eviction at most this number of times per tree level, starting\n"
|
||||||
|
" with bottom-most levels\n"
|
||||||
|
" --kv_evict_unused_age 1000\n"
|
||||||
|
" Evict only keys unused during this number of last operations\n"
|
||||||
|
" --kv_log_level 1\n"
|
||||||
|
" Log level. 0 = errors, 1 = warnings, 10 = trace operations\n",
|
||||||
|
exe_name
|
||||||
|
);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||||
|
{
|
||||||
|
const char *opt = args[i]+2;
|
||||||
|
cfg[opt] = !strcmp(opt, "json") || i == narg-1 ? "1" : args[++i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cfg;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::parse_config(json11::Json cfg)
|
||||||
|
{
|
||||||
|
inode_id = INODE_WITH_POOL(cfg["pool_id"].uint64_value(), cfg["inode_id"].uint64_value());
|
||||||
|
if (cfg["op_count"].uint64_value() > 0)
|
||||||
|
op_count = cfg["op_count"].uint64_value();
|
||||||
|
key_prefix = cfg["key_prefix"].string_value();
|
||||||
|
key_suffix = cfg["key_suffix"].string_value();
|
||||||
|
if (cfg["runtime"].uint64_value() > 0)
|
||||||
|
runtime_sec = cfg["runtime"].uint64_value();
|
||||||
|
if (cfg["parallelism"].uint64_value() > 0)
|
||||||
|
parallelism = cfg["parallelism"].uint64_value();
|
||||||
|
if (!cfg["reopen_prob"].is_null())
|
||||||
|
reopen_prob = cfg["reopen_prob"].uint64_value();
|
||||||
|
if (!cfg["get_prob"].is_null())
|
||||||
|
get_prob = cfg["get_prob"].uint64_value();
|
||||||
|
if (!cfg["add_prob"].is_null())
|
||||||
|
add_prob = cfg["add_prob"].uint64_value();
|
||||||
|
if (!cfg["update_prob"].is_null())
|
||||||
|
update_prob = cfg["update_prob"].uint64_value();
|
||||||
|
if (!cfg["del_prob"].is_null())
|
||||||
|
del_prob = cfg["del_prob"].uint64_value();
|
||||||
|
if (!cfg["list_prob"].is_null())
|
||||||
|
list_prob = cfg["list_prob"].uint64_value();
|
||||||
|
if (!cfg["min_key_len"].is_null())
|
||||||
|
min_key_len = cfg["min_key_len"].uint64_value();
|
||||||
|
if (cfg["max_key_len"].uint64_value() > 0)
|
||||||
|
max_key_len = cfg["max_key_len"].uint64_value();
|
||||||
|
if (!cfg["min_value_len"].is_null())
|
||||||
|
min_value_len = cfg["min_value_len"].uint64_value();
|
||||||
|
if (cfg["max_value_len"].uint64_value() > 0)
|
||||||
|
max_value_len = cfg["max_value_len"].uint64_value();
|
||||||
|
if (!cfg["min_list_count"].is_null())
|
||||||
|
min_list_count = cfg["min_list_count"].uint64_value();
|
||||||
|
if (!cfg["max_list_count"].is_null())
|
||||||
|
max_list_count = cfg["max_list_count"].uint64_value();
|
||||||
|
if (!cfg["print_stats"].is_null())
|
||||||
|
print_stats_interval = cfg["print_stats"].uint64_value();
|
||||||
|
if (!cfg["json"].is_null())
|
||||||
|
json_output = true;
|
||||||
|
if (!cfg["stop_on_error"].is_null())
|
||||||
|
stop_on_error = cfg["stop_on_error"].bool_value();
|
||||||
|
if (!cfg["kv_memory_limit"].is_null())
|
||||||
|
kv_cfg["kv_memory_limit"] = cfg["kv_memory_limit"];
|
||||||
|
if (!cfg["kv_allocate_blocks"].is_null())
|
||||||
|
kv_cfg["kv_allocate_blocks"] = cfg["kv_allocate_blocks"];
|
||||||
|
if (!cfg["kv_evict_max_misses"].is_null())
|
||||||
|
kv_cfg["kv_evict_max_misses"] = cfg["kv_evict_max_misses"];
|
||||||
|
if (!cfg["kv_evict_attempts_per_level"].is_null())
|
||||||
|
kv_cfg["kv_evict_attempts_per_level"] = cfg["kv_evict_attempts_per_level"];
|
||||||
|
if (!cfg["kv_evict_unused_age"].is_null())
|
||||||
|
kv_cfg["kv_evict_unused_age"] = cfg["kv_evict_unused_age"];
|
||||||
|
if (!cfg["kv_log_level"].is_null())
|
||||||
|
{
|
||||||
|
log_level = cfg["kv_log_level"].uint64_value();
|
||||||
|
trace = log_level >= 10;
|
||||||
|
kv_cfg["kv_log_level"] = cfg["kv_log_level"];
|
||||||
|
}
|
||||||
|
total_prob = reopen_prob+get_prob+add_prob+update_prob+del_prob+list_prob;
|
||||||
|
stat.get.name = "get";
|
||||||
|
stat.add.name = "add";
|
||||||
|
stat.update.name = "update";
|
||||||
|
stat.del.name = "del";
|
||||||
|
stat.list.name = "list";
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::run(json11::Json cfg)
|
||||||
|
{
|
||||||
|
srand48(time(NULL));
|
||||||
|
parse_config(cfg);
|
||||||
|
// Create client
|
||||||
|
ringloop = new ring_loop_t(512);
|
||||||
|
epmgr = new epoll_manager_t(ringloop);
|
||||||
|
cli = new cluster_client_t(ringloop, epmgr->tfd, cfg);
|
||||||
|
db = new kv_dbw_t(cli);
|
||||||
|
// Load image metadata
|
||||||
|
while (!cli->is_ready())
|
||||||
|
{
|
||||||
|
ringloop->loop();
|
||||||
|
if (cli->is_ready())
|
||||||
|
break;
|
||||||
|
ringloop->wait();
|
||||||
|
}
|
||||||
|
// Run
|
||||||
|
reopening = true;
|
||||||
|
db->open(inode_id, kv_cfg, [this](int res)
|
||||||
|
{
|
||||||
|
reopening = false;
|
||||||
|
if (res < 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: Open index: %d (%s)\n", res, strerror(-res));
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
if (trace)
|
||||||
|
printf("Index opened\n");
|
||||||
|
ringloop->wakeup();
|
||||||
|
});
|
||||||
|
consumer.loop = [this]() { loop(); };
|
||||||
|
ringloop->register_consumer(&consumer);
|
||||||
|
if (print_stats_interval)
|
||||||
|
stat_timer_id = epmgr->tfd->set_timer(print_stats_interval*1000, true, [this](int) { print_stats(prev_stat, prev_stat_time); });
|
||||||
|
clock_gettime(CLOCK_REALTIME, &start_stat_time);
|
||||||
|
prev_stat_time = start_stat_time;
|
||||||
|
while (!finished)
|
||||||
|
{
|
||||||
|
ringloop->loop();
|
||||||
|
if (!finished)
|
||||||
|
ringloop->wait();
|
||||||
|
}
|
||||||
|
if (stat_timer_id >= 0)
|
||||||
|
epmgr->tfd->clear_timer(stat_timer_id);
|
||||||
|
ringloop->unregister_consumer(&consumer);
|
||||||
|
// Print total stats
|
||||||
|
print_total_stats();
|
||||||
|
// Destroy the client
|
||||||
|
delete db;
|
||||||
|
db = NULL;
|
||||||
|
cli->flush();
|
||||||
|
delete cli;
|
||||||
|
delete epmgr;
|
||||||
|
delete ringloop;
|
||||||
|
cli = NULL;
|
||||||
|
epmgr = NULL;
|
||||||
|
ringloop = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789@+/";
|
||||||
|
|
||||||
|
std::string random_str(int len)
|
||||||
|
{
|
||||||
|
std::string str;
|
||||||
|
str.resize(len);
|
||||||
|
for (int i = 0; i < len; i++)
|
||||||
|
{
|
||||||
|
str[i] = base64_chars[lrand48() % 64];
|
||||||
|
}
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::loop()
|
||||||
|
{
|
||||||
|
if (reopening)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (ops_done >= op_count)
|
||||||
|
{
|
||||||
|
finished = true;
|
||||||
|
}
|
||||||
|
while (!finished && ops_sent < op_count && in_progress < parallelism)
|
||||||
|
{
|
||||||
|
uint64_t dice = (lrand48() % total_prob);
|
||||||
|
if (dice < reopen_prob)
|
||||||
|
{
|
||||||
|
reopening = true;
|
||||||
|
db->close([this]()
|
||||||
|
{
|
||||||
|
if (trace)
|
||||||
|
printf("Index closed\n");
|
||||||
|
db->open(inode_id, kv_cfg, [this](int res)
|
||||||
|
{
|
||||||
|
reopening = false;
|
||||||
|
if (res < 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: Reopen index: %d (%s)\n", res, strerror(-res));
|
||||||
|
finished = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (trace)
|
||||||
|
printf("Index reopened\n");
|
||||||
|
ringloop->wakeup();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
else if (dice < reopen_prob+get_prob)
|
||||||
|
{
|
||||||
|
// get existing
|
||||||
|
auto key = random_str(max_key_len);
|
||||||
|
auto k_it = values.lower_bound(key);
|
||||||
|
if (k_it == values.end())
|
||||||
|
continue;
|
||||||
|
key = k_it->first;
|
||||||
|
if (changing_keys.find(key) != changing_keys.end())
|
||||||
|
continue;
|
||||||
|
in_progress++;
|
||||||
|
ops_sent++;
|
||||||
|
if (trace)
|
||||||
|
printf("get %s\n", key.c_str());
|
||||||
|
timespec tv_begin;
|
||||||
|
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||||
|
db->get(key, [this, key, tv_begin](int res, const std::string & value)
|
||||||
|
{
|
||||||
|
add_stat(stat.get, tv_begin);
|
||||||
|
ops_done++;
|
||||||
|
in_progress--;
|
||||||
|
auto it = values.find(key);
|
||||||
|
if (res != (it == values.end() ? -ENOENT : 0))
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: get %s: %d (%s)\n", key.c_str(), res, strerror(-res));
|
||||||
|
if (stop_on_error)
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
else if (it != values.end() && value != it->second)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: get %s: mismatch: %s vs %s\n", key.c_str(), value.c_str(), it->second.c_str());
|
||||||
|
if (stop_on_error)
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
ringloop->wakeup();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else if (dice < reopen_prob+get_prob+add_prob+update_prob)
|
||||||
|
{
|
||||||
|
bool is_add = false;
|
||||||
|
std::string key;
|
||||||
|
if (dice < reopen_prob+get_prob+add_prob)
|
||||||
|
{
|
||||||
|
// add
|
||||||
|
is_add = true;
|
||||||
|
uint64_t key_len = min_key_len + (max_key_len > min_key_len ? lrand48() % (max_key_len-min_key_len) : 0);
|
||||||
|
key = key_prefix + random_str(key_len) + key_suffix;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// update
|
||||||
|
key = random_str(max_key_len);
|
||||||
|
auto k_it = values.lower_bound(key);
|
||||||
|
if (k_it == values.end())
|
||||||
|
continue;
|
||||||
|
key = k_it->first;
|
||||||
|
}
|
||||||
|
if (changing_keys.find(key) != changing_keys.end())
|
||||||
|
continue;
|
||||||
|
uint64_t value_len = min_value_len + (max_value_len > min_value_len ? lrand48() % (max_value_len-min_value_len) : 0);
|
||||||
|
auto value = random_str(value_len);
|
||||||
|
start_change(key);
|
||||||
|
ops_sent++;
|
||||||
|
in_progress++;
|
||||||
|
if (trace)
|
||||||
|
printf("set %s = %s\n", key.c_str(), value.c_str());
|
||||||
|
timespec tv_begin;
|
||||||
|
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||||
|
db->set(key, value, [this, key, value, tv_begin, is_add](int res)
|
||||||
|
{
|
||||||
|
add_stat(is_add ? stat.add : stat.update, tv_begin);
|
||||||
|
stop_change(key);
|
||||||
|
ops_done++;
|
||||||
|
in_progress--;
|
||||||
|
if (res != 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: set %s = %s: %d (%s)\n", key.c_str(), value.c_str(), res, strerror(-res));
|
||||||
|
if (stop_on_error)
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
values[key] = value;
|
||||||
|
}
|
||||||
|
ringloop->wakeup();
|
||||||
|
}, NULL);
|
||||||
|
}
|
||||||
|
else if (dice < reopen_prob+get_prob+add_prob+update_prob+del_prob)
|
||||||
|
{
|
||||||
|
// delete
|
||||||
|
auto key = random_str(max_key_len);
|
||||||
|
auto k_it = values.lower_bound(key);
|
||||||
|
if (k_it == values.end())
|
||||||
|
continue;
|
||||||
|
key = k_it->first;
|
||||||
|
if (changing_keys.find(key) != changing_keys.end())
|
||||||
|
continue;
|
||||||
|
start_change(key);
|
||||||
|
ops_sent++;
|
||||||
|
in_progress++;
|
||||||
|
if (trace)
|
||||||
|
printf("del %s\n", key.c_str());
|
||||||
|
timespec tv_begin;
|
||||||
|
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||||
|
db->del(key, [this, key, tv_begin](int res)
|
||||||
|
{
|
||||||
|
add_stat(stat.del, tv_begin);
|
||||||
|
stop_change(key);
|
||||||
|
ops_done++;
|
||||||
|
in_progress--;
|
||||||
|
if (res != 0)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: del %s: %d (%s)\n", key.c_str(), res, strerror(-res));
|
||||||
|
if (stop_on_error)
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
values.erase(key);
|
||||||
|
}
|
||||||
|
ringloop->wakeup();
|
||||||
|
}, NULL);
|
||||||
|
}
|
||||||
|
else if (dice < reopen_prob+get_prob+add_prob+update_prob+del_prob+list_prob)
|
||||||
|
{
|
||||||
|
// list
|
||||||
|
ops_sent++;
|
||||||
|
in_progress++;
|
||||||
|
auto key = random_str(max_key_len);
|
||||||
|
auto lst = new kv_test_listing_t;
|
||||||
|
auto k_it = values.lower_bound(key);
|
||||||
|
lst->count = min_list_count + (max_list_count > min_list_count ? lrand48() % (max_list_count-min_list_count) : 0);
|
||||||
|
lst->handle = db->list_start(k_it == values.begin() ? key_prefix : key);
|
||||||
|
lst->next_after = k_it == values.begin() ? key_prefix : key;
|
||||||
|
lst->inflights = changing_keys;
|
||||||
|
listings.insert(lst);
|
||||||
|
if (trace)
|
||||||
|
printf("list from %s\n", key.c_str());
|
||||||
|
clock_gettime(CLOCK_REALTIME, &lst->tv_begin);
|
||||||
|
db->list_next(lst->handle, [this, lst](int res, const std::string & key, const std::string & value)
|
||||||
|
{
|
||||||
|
if (log_level >= 11)
|
||||||
|
printf("list: %s = %s\n", key.c_str(), value.c_str());
|
||||||
|
if (res >= 0 && key_prefix.size() && (key.size() < key_prefix.size() ||
|
||||||
|
key.substr(0, key_prefix.size()) != key_prefix))
|
||||||
|
{
|
||||||
|
// stop at this key
|
||||||
|
res = -ENOENT;
|
||||||
|
}
|
||||||
|
if (res < 0 || (lst->count > 0 && lst->done >= lst->count))
|
||||||
|
{
|
||||||
|
add_stat(stat.list, lst->tv_begin);
|
||||||
|
if (res == 0)
|
||||||
|
{
|
||||||
|
// ok (done >= count)
|
||||||
|
}
|
||||||
|
else if (res != -ENOENT)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: list: %d (%s)\n", res, strerror(-res));
|
||||||
|
lst->error = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto k_it = lst->next_after == "" ? values.begin() : values.upper_bound(lst->next_after);
|
||||||
|
while (k_it != values.end())
|
||||||
|
{
|
||||||
|
while (k_it != values.end() && lst->inflights.find(k_it->first) != lst->inflights.end())
|
||||||
|
k_it++;
|
||||||
|
if (k_it != values.end())
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: list: missing key %s\n", (k_it++)->first.c_str());
|
||||||
|
lst->error = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (lst->error && stop_on_error)
|
||||||
|
exit(1);
|
||||||
|
ops_done++;
|
||||||
|
in_progress--;
|
||||||
|
db->list_close(lst->handle);
|
||||||
|
delete lst;
|
||||||
|
listings.erase(lst);
|
||||||
|
ringloop->wakeup();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
stat.list_keys++;
|
||||||
|
// Do not check modified keys in listing
|
||||||
|
// Listing may return their old or new state
|
||||||
|
if ((!key_suffix.size() || key.size() >= key_suffix.size() &&
|
||||||
|
key.substr(key.size()-key_suffix.size()) == key_suffix) &&
|
||||||
|
lst->inflights.find(key) == lst->inflights.end())
|
||||||
|
{
|
||||||
|
lst->done++;
|
||||||
|
auto k_it = lst->next_after == "" ? values.begin() : values.upper_bound(lst->next_after);
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
while (k_it != values.end() && lst->inflights.find(k_it->first) != lst->inflights.end())
|
||||||
|
{
|
||||||
|
k_it++;
|
||||||
|
}
|
||||||
|
if (k_it == values.end() || k_it->first > key)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: list: extra key %s\n", key.c_str());
|
||||||
|
lst->error = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if (k_it->first < key)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: list: missing key %s\n", k_it->first.c_str());
|
||||||
|
lst->error = true;
|
||||||
|
lst->next_after = k_it->first;
|
||||||
|
k_it++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (k_it->second != value)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "ERROR: list: mismatch: %s = %s but should be %s\n",
|
||||||
|
key.c_str(), value.c_str(), k_it->second.c_str());
|
||||||
|
lst->error = true;
|
||||||
|
}
|
||||||
|
lst->next_after = k_it->first;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db->list_next(lst->handle, NULL);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::add_stat(kv_test_lat_t & stat, timespec tv_begin)
|
||||||
|
{
|
||||||
|
timespec tv_end;
|
||||||
|
clock_gettime(CLOCK_REALTIME, &tv_end);
|
||||||
|
int64_t usec = (tv_end.tv_sec - tv_begin.tv_sec)*1000000 +
|
||||||
|
(tv_end.tv_nsec - tv_begin.tv_nsec)/1000;
|
||||||
|
if (usec > 0)
|
||||||
|
{
|
||||||
|
stat.usec += usec;
|
||||||
|
stat.count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::print_stats(kv_test_stat_t & prev_stat, timespec & prev_stat_time)
|
||||||
|
{
|
||||||
|
timespec cur_stat_time;
|
||||||
|
clock_gettime(CLOCK_REALTIME, &cur_stat_time);
|
||||||
|
int64_t usec = (cur_stat_time.tv_sec - prev_stat_time.tv_sec)*1000000 +
|
||||||
|
(cur_stat_time.tv_nsec - prev_stat_time.tv_nsec)/1000;
|
||||||
|
if (usec > 0)
|
||||||
|
{
|
||||||
|
kv_test_lat_t *lats[] = { &stat.get, &stat.add, &stat.update, &stat.del, &stat.list };
|
||||||
|
kv_test_lat_t *prev[] = { &prev_stat.get, &prev_stat.add, &prev_stat.update, &prev_stat.del, &prev_stat.list };
|
||||||
|
if (!json_output)
|
||||||
|
{
|
||||||
|
char buf[128] = { 0 };
|
||||||
|
for (int i = 0; i < sizeof(lats)/sizeof(lats[0]); i++)
|
||||||
|
{
|
||||||
|
snprintf(buf, sizeof(buf)-1, "%.1f %s/s (%lu us)", (lats[i]->count-prev[i]->count)*1000000.0/usec,
|
||||||
|
lats[i]->name, (lats[i]->usec-prev[i]->usec)/(lats[i]->count-prev[i]->count > 0 ? lats[i]->count-prev[i]->count : 1));
|
||||||
|
int k;
|
||||||
|
for (k = strlen(buf); k < strlen(lats[i]->name)+21; k++)
|
||||||
|
buf[k] = ' ';
|
||||||
|
buf[k] = 0;
|
||||||
|
printf("%s", buf);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int64_t runtime = (cur_stat_time.tv_sec - start_stat_time.tv_sec)*1000000 +
|
||||||
|
(cur_stat_time.tv_nsec - start_stat_time.tv_nsec)/1000;
|
||||||
|
printf("{\"runtime\":%.1f", (double)runtime/1000000.0);
|
||||||
|
for (int i = 0; i < sizeof(lats)/sizeof(lats[0]); i++)
|
||||||
|
{
|
||||||
|
if (lats[i]->count > prev[i]->count)
|
||||||
|
{
|
||||||
|
printf(
|
||||||
|
",\"%s\":{\"avg\":{\"iops\":%.1f,\"usec\":%lu},\"total\":{\"count\":%lu,\"usec\":%lu}}",
|
||||||
|
lats[i]->name, (lats[i]->count-prev[i]->count)*1000000.0/usec,
|
||||||
|
(lats[i]->usec-prev[i]->usec)/(lats[i]->count-prev[i]->count),
|
||||||
|
lats[i]->count, lats[i]->usec
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("}\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prev_stat = stat;
|
||||||
|
prev_stat_time = cur_stat_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::print_total_stats()
|
||||||
|
{
|
||||||
|
if (!json_output)
|
||||||
|
printf("Total:\n");
|
||||||
|
kv_test_stat_t start_stats;
|
||||||
|
timespec start_stat_time = this->start_stat_time;
|
||||||
|
print_stats(start_stats, start_stat_time);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::start_change(const std::string & key)
|
||||||
|
{
|
||||||
|
changing_keys.insert(key);
|
||||||
|
for (auto lst: listings)
|
||||||
|
{
|
||||||
|
lst->inflights.insert(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void kv_test_t::stop_change(const std::string & key)
|
||||||
|
{
|
||||||
|
changing_keys.erase(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int narg, const char *args[])
|
||||||
|
{
|
||||||
|
setvbuf(stdout, NULL, _IONBF, 0);
|
||||||
|
setvbuf(stderr, NULL, _IONBF, 0);
|
||||||
|
exe_name = args[0];
|
||||||
|
kv_test_t *p = new kv_test_t();
|
||||||
|
p->run(kv_test_t::parse_args(narg, args));
|
||||||
|
delete p;
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -48,6 +48,15 @@ osd_t::osd_t(const json11::Json & config, ring_loop_t *ringloop)
|
||||||
{
|
{
|
||||||
auto bs_cfg = json_to_bs(this->config);
|
auto bs_cfg = json_to_bs(this->config);
|
||||||
this->bs = new blockstore_t(bs_cfg, ringloop, tfd);
|
this->bs = new blockstore_t(bs_cfg, ringloop, tfd);
|
||||||
|
// Wait for blockstore initialisation before actually starting OSD logic
|
||||||
|
// to prevent peering timeouts during restart with filled databases
|
||||||
|
while (!bs->is_started())
|
||||||
|
{
|
||||||
|
ringloop->loop();
|
||||||
|
if (bs->is_started())
|
||||||
|
break;
|
||||||
|
ringloop->wait();
|
||||||
|
}
|
||||||
// Autosync based on the number of unstable writes to prevent stalls due to insufficient journal space
|
// Autosync based on the number of unstable writes to prevent stalls due to insufficient journal space
|
||||||
uint64_t max_autosync = bs->get_journal_size() / bs->get_block_size() / 2;
|
uint64_t max_autosync = bs->get_journal_size() / bs->get_block_size() / 2;
|
||||||
if (autosync_writes > max_autosync)
|
if (autosync_writes > max_autosync)
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "object_id.h"
|
||||||
|
|
||||||
#define POOL_SCHEME_REPLICATED 1
|
#define POOL_SCHEME_REPLICATED 1
|
||||||
#define POOL_SCHEME_XOR 2
|
#define POOL_SCHEME_XOR 2
|
||||||
#define POOL_SCHEME_EC 3
|
#define POOL_SCHEME_EC 3
|
||||||
|
|
|
@ -214,7 +214,10 @@ void print_help(const char *help_text, std::string exe_name, std::string cmd, bo
|
||||||
else if (*next_line && isspace(*next_line))
|
else if (*next_line && isspace(*next_line))
|
||||||
started = true;
|
started = true;
|
||||||
else if (cmd_start && matched)
|
else if (cmd_start && matched)
|
||||||
|
{
|
||||||
filtered_text += std::string(cmd_start, next_line-cmd_start);
|
filtered_text += std::string(cmd_start, next_line-cmd_start);
|
||||||
|
matched = started = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
while (filtered_text.size() > 1 &&
|
while (filtered_text.size() > 1 &&
|
||||||
filtered_text[filtered_text.size()-1] == '\n' &&
|
filtered_text[filtered_text.size()-1] == '\n' &&
|
||||||
|
@ -324,3 +327,24 @@ size_t utf8_length(const char *s)
|
||||||
len += (*s & 0xC0) != 0x80;
|
len += (*s & 0xC0) != 0x80;
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> explode(const std::string & sep, const std::string & value, bool trim)
|
||||||
|
{
|
||||||
|
std::vector<std::string> res;
|
||||||
|
size_t prev = 0;
|
||||||
|
while (prev < value.size())
|
||||||
|
{
|
||||||
|
while (trim && prev < value.size() && isspace(value[prev]))
|
||||||
|
prev++;
|
||||||
|
size_t pos = value.find(sep, prev);
|
||||||
|
if (pos == std::string::npos)
|
||||||
|
pos = value.size();
|
||||||
|
size_t next = pos+sep.size();
|
||||||
|
while (trim && pos > prev && isspace(value[pos-1]))
|
||||||
|
pos--;
|
||||||
|
if (!trim || pos > prev)
|
||||||
|
res.push_back(value.substr(prev, pos-prev));
|
||||||
|
prev = next;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
std::string base64_encode(const std::string &in);
|
std::string base64_encode(const std::string &in);
|
||||||
std::string base64_decode(const std::string &in);
|
std::string base64_decode(const std::string &in);
|
||||||
|
@ -20,3 +21,4 @@ std::string read_all_fd(int fd);
|
||||||
std::string str_repeat(const std::string & str, int times);
|
std::string str_repeat(const std::string & str, int times);
|
||||||
size_t utf8_length(const std::string & s);
|
size_t utf8_length(const std::string & s);
|
||||||
size_t utf8_length(const char *s);
|
size_t utf8_length(const char *s);
|
||||||
|
std::vector<std::string> explode(const std::string & sep, const std::string & value, bool trim);
|
||||||
|
|
|
@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
|
||||||
|
|
||||||
Name: Vitastor
|
Name: Vitastor
|
||||||
Description: Vitastor client library
|
Description: Vitastor client library
|
||||||
Version: 1.4.7
|
Version: 1.4.8
|
||||||
Libs: -L${libdir} -lvitastor_client
|
Libs: -L${libdir} -lvitastor_client
|
||||||
Cflags: -I${includedir}
|
Cflags: -I${includedir}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,14 @@ $ETCDCTL put /vitastor/osd/stats/5 '{"host":"host3","size":1073741824,"time":"'$
|
||||||
$ETCDCTL put /vitastor/osd/stats/6 '{"host":"host3","size":1073741824,"time":"'$TIME'"}'
|
$ETCDCTL put /vitastor/osd/stats/6 '{"host":"host3","size":1073741824,"time":"'$TIME'"}'
|
||||||
$ETCDCTL put /vitastor/osd/stats/7 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
$ETCDCTL put /vitastor/osd/stats/7 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
||||||
$ETCDCTL put /vitastor/osd/stats/8 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
$ETCDCTL put /vitastor/osd/stats/8 '{"host":"host4","size":1073741824,"time":"'$TIME'"}'
|
||||||
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":4,"failure_domain":"rack"}}'
|
build/src/vitastor-cli --etcd_address $ETCD_URL create-pool testpool --ec 3+2 -n 32 --failure_domain rack --force
|
||||||
|
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1": {"failure_domain": "rack", "name": "testpool", "parity_chunks": 2, "pg_count": 32, "pg_minsize": 4, "pg_size": 5, "scheme": "ec"}}]'
|
||||||
|
build/src/vitastor-cli --etcd_address $ETCD_URL modify-pool testpool --ec 3+3 --failure_domain host
|
||||||
|
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1": {"failure_domain": "host", "name": "testpool", "parity_chunks": 3, "pg_count": 32, "pg_minsize": 4, "pg_size": 6, "scheme": "ec"}}]'
|
||||||
|
build/src/vitastor-cli --etcd_address $ETCD_URL rm-pool testpool
|
||||||
|
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{}]'
|
||||||
|
build/src/vitastor-cli --etcd_address $ETCD_URL create-pool testpool -s 2 -n 4 --failure_domain rack --force
|
||||||
|
$ETCDCTL get --print-value-only /vitastor/config/pools | jq -s -e '. == [{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":4,"failure_domain":"rack"}}]'
|
||||||
|
|
||||||
node mon/mon-main.js --etcd_address $ETCD_URL --etcd_prefix "/vitastor" >>./testdata/mon.log 2>&1 &
|
node mon/mon-main.js --etcd_address $ETCD_URL --etcd_prefix "/vitastor" >>./testdata/mon.log 2>&1 &
|
||||||
MON_PID=$!
|
MON_PID=$!
|
||||||
|
|
Loading…
Reference in New Issue