forked from vitalif/vitastor
Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
a9ef9a86c0 | |||
25832cb7e4 | |||
e6326c6539 | |||
e32f382815 | |||
fb23d94000 | |||
ee462c2dad | |||
16e4c767f1 | |||
9e2b677499 | |||
fd57096d2d | |||
e5ae907256 | |||
64fd6f1c56 | |||
6e76e09d16 | |||
0964aeebd2 | |||
facff20ca1 | |||
16e09745f0 | |||
442f44a64f | |||
c67e3d56cb | |||
de41e46335 | |||
bf9a279ff9 | |||
b7a41e6394 | |||
4175cb3720 | |||
3a4b71b0cd | |||
34969c5919 | |||
02a8df6586 | |||
86c6482cf3 | |||
4dd68c543c | |||
10ad96c56c | |||
6e451117ce | |||
85f35bdf30 | |||
09adaf62fd | |||
af93f8323c | |||
3d29c76ff4 | |||
19275379c1 | |||
96ad3c7c50 |
@@ -395,7 +395,7 @@ jobs:
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 6
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_snapshot_chain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
@@ -532,24 +532,6 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_switch_primary:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_switch_primary.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_write:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
@@ -39,10 +39,6 @@ for my $line (<>)
|
||||
$test_name .= '_'.lc($1).'_'.$2;
|
||||
}
|
||||
}
|
||||
if ($test_name eq 'test_snapshot_chain_ec')
|
||||
{
|
||||
$timeout = 6;
|
||||
}
|
||||
$line =~ s!\./test_!/root/vitastor/tests/test_!;
|
||||
# Gitea CI doesn't support artifacts yet, lol
|
||||
#- name: Upload results
|
||||
|
@@ -38,7 +38,7 @@ in the offer.
|
||||
on behalf of third parties, including on behalf of his employer.
|
||||
|
||||
2. Subject of the Agreement. \
|
||||
2.1. Subject of the Agreement shall be the Contributions sent to the Author by Contributors. \
|
||||
2.1. Subject of the Agreement shall be the Contributions sent to the Author by Contributors.
|
||||
2.2. The Contributor grants to the Author the right to use Contributions at his own
|
||||
discretion and without any necessity to get a prior approval from Contributor or
|
||||
any other third party in any way, under a simple (non-exclusive), royalty-free,
|
||||
@@ -86,7 +86,7 @@ in the offer.
|
||||
of their provision to the Author. \
|
||||
5.2. The Contributor represents and warrants that he legally owns exclusive
|
||||
intellectual property rights to the Contributions. \
|
||||
5.3. The Contributor represents and warrants that any further use of
|
||||
5.3. The Contributor represents and warrants that any further use of \
|
||||
Contributions by the Author as provided by Contributor under the terms
|
||||
of the Agreement does not infringe on intellectual and other rights and
|
||||
legitimate interests of third parties. \
|
||||
|
@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
|
||||
|
||||
project(vitastor)
|
||||
|
||||
set(VERSION "1.4.4")
|
||||
set(VERSION "1.3.1")
|
||||
|
||||
add_subdirectory(src)
|
||||
|
Submodule cpp-btree updated: 8de8b467ac...45e6d1f131
@@ -1,4 +1,4 @@
|
||||
VERSION ?= v1.4.4
|
||||
VERSION ?= v1.3.1
|
||||
|
||||
all: build push
|
||||
|
||||
|
@@ -49,7 +49,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: vitalif/vitastor-csi:v1.4.4
|
||||
image: vitalif/vitastor-csi:v1.3.1
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -121,7 +121,7 @@ spec:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
image: vitalif/vitastor-csi:v1.4.4
|
||||
image: vitalif/vitastor-csi:v1.3.1
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -5,7 +5,7 @@ package vitastor
|
||||
|
||||
const (
|
||||
vitastorCSIDriverName = "csi.vitastor.io"
|
||||
vitastorCSIDriverVersion = "1.4.4"
|
||||
vitastorCSIDriverVersion = "1.3.1"
|
||||
)
|
||||
|
||||
// Config struct fills the parameters of request or user input
|
||||
|
@@ -188,6 +188,7 @@ func (ns *NodeServer) unmapNbd(devicePath string)
|
||||
|
||||
func findByPidFile(pidFile string) (*os.Process, error)
|
||||
{
|
||||
klog.Infof("killing process with PID from file %s", pidFile)
|
||||
pidBuf, err := os.ReadFile(pidFile)
|
||||
if (err != nil)
|
||||
{
|
||||
@@ -208,7 +209,6 @@ func findByPidFile(pidFile string) (*os.Process, error)
|
||||
|
||||
func killByPidFile(pidFile string) error
|
||||
{
|
||||
klog.Infof("killing process with PID from file %s", pidFile)
|
||||
proc, err := findByPidFile(pidFile)
|
||||
if (err != nil)
|
||||
{
|
||||
|
2
debian/changelog
vendored
2
debian/changelog
vendored
@@ -1,4 +1,4 @@
|
||||
vitastor (1.4.4-1) unstable; urgency=medium
|
||||
vitastor (1.3.1-1) unstable; urgency=medium
|
||||
|
||||
* Bugfixes
|
||||
|
||||
|
8
debian/vitastor.Dockerfile
vendored
8
debian/vitastor.Dockerfile
vendored
@@ -35,8 +35,8 @@ RUN set -e -x; \
|
||||
mkdir -p /root/packages/vitastor-$REL; \
|
||||
rm -rf /root/packages/vitastor-$REL/*; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
cp -r /root/vitastor vitastor-1.4.4; \
|
||||
cd vitastor-1.4.4; \
|
||||
cp -r /root/vitastor vitastor-1.3.1; \
|
||||
cd vitastor-1.3.1; \
|
||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||
@@ -49,8 +49,8 @@ RUN set -e -x; \
|
||||
rm -rf a b; \
|
||||
echo "dep:fio=$FIO" > debian/fio_version; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.4.4.orig.tar.xz vitastor-1.4.4; \
|
||||
cd vitastor-1.4.4; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.3.1.orig.tar.xz vitastor-1.3.1; \
|
||||
cd vitastor-1.3.1; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||
|
@@ -19,8 +19,8 @@ These parameters only apply to Monitors.
|
||||
## etcd_mon_ttl
|
||||
|
||||
- Type: seconds
|
||||
- Default: 1
|
||||
- Minimum: 5
|
||||
- Default: 30
|
||||
- Minimum: 10
|
||||
|
||||
Monitor etcd lease refresh interval in seconds
|
||||
|
||||
|
@@ -19,8 +19,8 @@
|
||||
## etcd_mon_ttl
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 1
|
||||
- Минимальное значение: 5
|
||||
- Значение по умолчанию: 30
|
||||
- Минимальное значение: 10
|
||||
|
||||
Интервал обновления etcd резервации (lease) монитором
|
||||
|
||||
|
@@ -215,8 +215,8 @@ is scheduled.
|
||||
## up_wait_retry_interval
|
||||
|
||||
- Type: milliseconds
|
||||
- Default: 50
|
||||
- Minimum: 10
|
||||
- Default: 500
|
||||
- Minimum: 50
|
||||
- Can be changed online: yes
|
||||
|
||||
OSDs respond to clients with a special error code when they receive I/O
|
||||
|
@@ -224,8 +224,8 @@ OSD в любом случае согласовывают реальное зн
|
||||
## up_wait_retry_interval
|
||||
|
||||
- Тип: миллисекунды
|
||||
- Значение по умолчанию: 50
|
||||
- Минимальное значение: 10
|
||||
- Значение по умолчанию: 500
|
||||
- Минимальное значение: 50
|
||||
- Можно менять на лету: да
|
||||
|
||||
Когда OSD получают от клиентов запросы ввода-вывода, относящиеся к не
|
||||
|
@@ -59,7 +59,6 @@ them, even without restarting by updating configuration in etcd.
|
||||
- [recovery_tune_client_util_high](#recovery_tune_client_util_high)
|
||||
- [recovery_tune_agg_interval](#recovery_tune_agg_interval)
|
||||
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
|
||||
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
|
||||
|
||||
## etcd_report_interval
|
||||
|
||||
@@ -605,14 +604,5 @@ is usually fine.
|
||||
- Default: 10
|
||||
- Can be changed online: yes
|
||||
|
||||
Minimum possible value for auto-tuned recovery_sleep_us. Lower values
|
||||
are changed to 0.
|
||||
|
||||
## recovery_tune_sleep_cutoff_us
|
||||
|
||||
- Type: microseconds
|
||||
- Default: 10000000
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum possible value for auto-tuned recovery_sleep_us. Higher values
|
||||
are treated as outliers and ignored in aggregation.
|
||||
Minimum possible value for auto-tuned recovery_sleep_us. Values lower
|
||||
than this value are changed to 0.
|
||||
|
@@ -60,7 +60,6 @@
|
||||
- [recovery_tune_client_util_high](#recovery_tune_client_util_high)
|
||||
- [recovery_tune_agg_interval](#recovery_tune_agg_interval)
|
||||
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
|
||||
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
|
||||
|
||||
## etcd_report_interval
|
||||
|
||||
@@ -635,14 +634,4 @@ EC (кодов коррекции ошибок) с более, чем 1 диск
|
||||
- Можно менять на лету: да
|
||||
|
||||
Минимальное возможное значение авто-подстроенного recovery_sleep_us.
|
||||
Меньшие значения заменяются на 0.
|
||||
|
||||
## recovery_tune_sleep_cutoff_us
|
||||
|
||||
- Тип: микросекунды
|
||||
- Значение по умолчанию: 10000000
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное возможное значение авто-подстроенного recovery_sleep_us.
|
||||
Большие значения считаются случайными выбросами и игнорируются в
|
||||
усреднении.
|
||||
Значения ниже данного заменяются на 0.
|
||||
|
@@ -1,7 +1,7 @@
|
||||
- name: etcd_mon_ttl
|
||||
type: sec
|
||||
min: 5
|
||||
default: 1
|
||||
min: 10
|
||||
default: 30
|
||||
info: Monitor etcd lease refresh interval in seconds
|
||||
info_ru: Интервал обновления etcd резервации (lease) монитором
|
||||
- name: etcd_mon_timeout
|
||||
|
@@ -245,8 +245,8 @@
|
||||
повторная попытка соединения.
|
||||
- name: up_wait_retry_interval
|
||||
type: ms
|
||||
min: 10
|
||||
default: 50
|
||||
min: 50
|
||||
default: 500
|
||||
online: true
|
||||
info: |
|
||||
OSDs respond to clients with a special error code when they receive I/O
|
||||
|
@@ -731,19 +731,8 @@
|
||||
default: 10
|
||||
online: true
|
||||
info: |
|
||||
Minimum possible value for auto-tuned recovery_sleep_us. Lower values
|
||||
are changed to 0.
|
||||
Minimum possible value for auto-tuned recovery_sleep_us. Values lower
|
||||
than this value are changed to 0.
|
||||
info_ru: |
|
||||
Минимальное возможное значение авто-подстроенного recovery_sleep_us.
|
||||
Меньшие значения заменяются на 0.
|
||||
- name: recovery_tune_sleep_cutoff_us
|
||||
type: us
|
||||
default: 10000000
|
||||
online: true
|
||||
info: |
|
||||
Maximum possible value for auto-tuned recovery_sleep_us. Higher values
|
||||
are treated as outliers and ignored in aggregation.
|
||||
info_ru: |
|
||||
Максимальное возможное значение авто-подстроенного recovery_sleep_us.
|
||||
Большие значения считаются случайными выбросами и игнорируются в
|
||||
усреднении.
|
||||
Значения ниже данного заменяются на 0.
|
||||
|
@@ -37,7 +37,6 @@ Vitastor CSI supports:
|
||||
- Volume snapshots. Example: [snapshot class](../../csi/deploy/example-snapshot-class.yaml), [snapshot](../../csi/deploy/example-snapshot.yaml), [clone](../../csi/deploy/example-snapshot-clone.yaml)
|
||||
- [VDUSE](../usage/qemu.en.md#vduse) (preferred) and [NBD](../usage/nbd.en.md) device mapping methods
|
||||
- Upgrades with VDUSE - new handler processes are restarted when CSI pods are restarted themselves
|
||||
- VDUSE daemon auto-restart - handler processes are automatically restarted if they crash due to a bug in Vitastor client code
|
||||
- Multiple clusters by using multiple configuration files in ConfigMap.
|
||||
|
||||
Remember that to use snapshots with CSI you also have to install [Snapshot Controller and CRDs](https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment).
|
||||
|
@@ -37,7 +37,6 @@ CSI-плагин Vitastor поддерживает:
|
||||
- Снимки томов. Пример: [класс снимков](../../csi/deploy/example-snapshot-class.yaml), [снимок](../../csi/deploy/example-snapshot.yaml), [клон снимка](../../csi/deploy/example-snapshot-clone.yaml)
|
||||
- Способы подключения устройств [VDUSE](../usage/qemu.ru.md#vduse) (предпочитаемый) и [NBD](../usage/nbd.ru.md)
|
||||
- Обновление при использовании VDUSE - новые процессы-обработчики устройств успешно перезапускаются вместе с самими подами CSI
|
||||
- Автоперезауск демонов VDUSE - процесс-обработчик автоматически перезапустится, если он внезапно упадёт из-за бага в коде клиента Vitastor
|
||||
- Несколько кластеров через задание нескольких файлов конфигурации в ConfigMap.
|
||||
|
||||
Не забывайте, что для использования снимков нужно сначала установить [контроллер снимков и CRD](https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment).
|
||||
|
@@ -11,26 +11,19 @@ Replicated setups:
|
||||
- Single-threaded write+fsync latency:
|
||||
- With immediate commit: 2 network roundtrips + 1 disk write.
|
||||
- With lazy commit: 4 network roundtrips + 1 disk write + 1 disk flush.
|
||||
- Linear read: `min(total network bandwidth, sum(disk read MB/s))`.
|
||||
- Linear write: `min(total network bandwidth, sum(disk write MB/s / number of replicas))`.
|
||||
- Saturated parallel read iops: `min(total network bandwidth, sum(disk read iops))`.
|
||||
- Saturated parallel write iops: `min(total network bandwidth / number of replicas, sum(disk write iops / number of replicas / (write amplification = 4)))`.
|
||||
- Saturated parallel read iops: min(network bandwidth, sum(disk read iops)).
|
||||
- Saturated parallel write iops: min(network bandwidth, sum(disk write iops / number of replicas / write amplification)).
|
||||
|
||||
EC/XOR setups (EC N+K):
|
||||
EC/XOR setups:
|
||||
- Single-threaded (T1Q1) read latency: 1.5 network roundtrips + 1 disk read.
|
||||
- Single-threaded write+fsync latency:
|
||||
- With immediate commit: 3.5 network roundtrips + 1 disk read + 2 disk writes.
|
||||
- With lazy commit: 5.5 network roundtrips + 1 disk read + 2 disk writes + 2 disk fsyncs.
|
||||
- 0.5 in actually `(N-1)/N` which means that an additional roundtrip doesn't happen when
|
||||
- 0.5 in actually (k-1)/k which means that an additional roundtrip doesn't happen when
|
||||
the read sub-operation can be served locally.
|
||||
- Linear read: `min(total network bandwidth, sum(disk read MB/s))`.
|
||||
- Linear write: `min(total network bandwidth, sum(disk write MB/s * N/(N+K)))`.
|
||||
- Saturated parallel read iops: `min(total network bandwidth, sum(disk read iops))`.
|
||||
- Saturated parallel write iops: roughly `total iops / (N+K) / WA`. More exactly,
|
||||
`min(total network bandwidth * N/(N+K), sum(disk randrw iops / (N*4 + K*5 + 1)))` with
|
||||
random read/write mix corresponding to `(N-1)/(N*4 + K*5 + 1)*100 % reads`.
|
||||
- For example, with EC 2+1 it is: `(7% randrw iops) / 14`.
|
||||
- With EC 6+3 it is: `(12.5% randrw iops) / 40`.
|
||||
- Saturated parallel read iops: min(network bandwidth, sum(disk read iops)).
|
||||
- Saturated parallel write iops: min(network bandwidth, sum(disk write iops * number of data drives / (number of data + parity drives) / write amplification)).
|
||||
In fact, you should put disk write iops under the condition of ~10% reads / ~90% writes in this formula.
|
||||
|
||||
Write amplification for 4 KB blocks is usually 3-5 in Vitastor:
|
||||
1. Journal block write
|
||||
|
@@ -11,27 +11,20 @@
|
||||
- Запись+fsync в 1 поток:
|
||||
- С мгновенным сбросом: 2 RTT + 1 запись.
|
||||
- С отложенным ("ленивым") сбросом: 4 RTT + 1 запись + 1 fsync.
|
||||
- Линейное чтение: сумма МБ/с чтения всех дисков, либо общая производительность сети (сумма пропускной способности сети всех нод), если в сеть упрётся раньше.
|
||||
- Линейная запись: сумма МБ/с записи всех дисков / число реплик, либо производительность сети / число реплик, если в сеть упрётся раньше.
|
||||
- Параллельное случайное мелкое чтение: сумма IOPS чтения всех дисков, либо производительность сети, если в сеть упрётся раньше.
|
||||
- Параллельная случайная мелкая запись: сумма IOPS записи всех дисков / число реплик / WA, либо производительность сети / число реплик, если в сеть упрётся раньше.
|
||||
- Параллельное чтение: сумма IOPS всех дисков либо производительность сети, если в сеть упрётся раньше.
|
||||
- Параллельная запись: сумма IOPS всех дисков / число реплик / WA либо производительность сети, если в сеть упрётся раньше.
|
||||
|
||||
При использовании кодов коррекции ошибок (EC N+K):
|
||||
При использовании кодов коррекции ошибок (EC):
|
||||
- Задержка чтения в 1 поток (T1Q1): 1.5 RTT + 1 чтение.
|
||||
- Запись+fsync в 1 поток:
|
||||
- С мгновенным сбросом: 3.5 RTT + 1 чтение + 2 записи.
|
||||
- С отложенным ("ленивым") сбросом: 5.5 RTT + 1 чтение + 2 записи + 2 fsync.
|
||||
- Под 0.5 на самом деле подразумевается (N-1)/N, где N - число дисков данных,
|
||||
- Под 0.5 на самом деле подразумевается (k-1)/k, где k - число дисков данных,
|
||||
что означает, что дополнительное обращение по сети не нужно, когда операция
|
||||
чтения обслуживается локально.
|
||||
- Линейное чтение: сумма МБ/с чтения всех дисков, либо общая производительность сети, если в сеть упрётся раньше.
|
||||
- Линейная запись: сумма МБ/с записи всех дисков * N/(N+K), либо производительность сети * N / (N+K), если в сеть упрётся раньше.
|
||||
- Параллельное случайное мелкое чтение: сумма IOPS чтения всех дисков либо производительность сети, если в сеть упрётся раньше.
|
||||
- Параллельная случайная мелкая запись: грубо `(сумма IOPS / (N+K) / WA)`. Если точнее, то:
|
||||
сумма смешанного IOPS всех дисков при `(N-1)/(N*4 + K*5 + 1)*100 %` чтения, делённая на `(N*4 + K*5 + 1)`.
|
||||
Либо, производительность сети * N/(N+K), если в сеть упрётся раньше.
|
||||
- Например, при EC 2+1 это: `(сумма IOPS при 7% чтения) / 14`.
|
||||
- При EC 6+3 это: `(сумма IOPS при 12.5% чтения) / 40`.
|
||||
- Параллельное чтение: сумма IOPS всех дисков либо производительность сети, если в сеть упрётся раньше.
|
||||
- Параллельная запись: сумма IOPS всех дисков / общее число дисков данных и чётности / WA либо производительность сети, если в сеть упрётся раньше.
|
||||
Примечание: IOPS дисков в данном случае надо брать в смешанном режиме чтения/записи в пропорции, аналогичной формулам выше.
|
||||
|
||||
WA (мультипликатор записи) для 4 КБ блоков в Vitastor обычно составляет 3-5:
|
||||
1. Запись метаданных в журнал
|
||||
|
67
mon/mon.js
67
mon/mon.js
@@ -55,7 +55,7 @@ const etcd_tree = {
|
||||
// etcd connection - configurable online
|
||||
etcd_address: "10.0.115.10:2379/v3",
|
||||
// mon
|
||||
etcd_mon_ttl: 5, // min: 1
|
||||
etcd_mon_ttl: 30, // min: 10
|
||||
etcd_mon_timeout: 1000, // ms. min: 0
|
||||
etcd_mon_retries: 5, // min: 0
|
||||
mon_change_timeout: 1000, // ms. min: 100
|
||||
@@ -92,7 +92,7 @@ const etcd_tree = {
|
||||
peer_connect_timeout: 5, // seconds. min: 1
|
||||
osd_idle_timeout: 5, // seconds. min: 1
|
||||
osd_ping_timeout: 5, // seconds. min: 1
|
||||
up_wait_retry_interval: 50, // ms. min: 10
|
||||
up_wait_retry_interval: 500, // ms. min: 50
|
||||
max_etcd_attempts: 5,
|
||||
etcd_quick_timeout: 1000, // ms
|
||||
etcd_slow_timeout: 5000, // ms
|
||||
@@ -390,8 +390,7 @@ class Mon
|
||||
{
|
||||
constructor(config)
|
||||
{
|
||||
this.failconnect = (e) => this._die(e, 2);
|
||||
this.die = (e) => this._die(e, 1);
|
||||
this.die = (e) => this._die(e);
|
||||
if (fs.existsSync(config.config_path||'/etc/vitastor/vitastor.conf'))
|
||||
{
|
||||
config = {
|
||||
@@ -480,10 +479,10 @@ class Mon
|
||||
|
||||
check_config()
|
||||
{
|
||||
this.config.etcd_mon_ttl = Number(this.config.etcd_mon_ttl) || 5;
|
||||
if (this.config.etcd_mon_ttl < 1)
|
||||
this.config.etcd_mon_ttl = Number(this.config.etcd_mon_ttl) || 30;
|
||||
if (this.config.etcd_mon_ttl < 10)
|
||||
{
|
||||
this.config.etcd_mon_ttl = 1;
|
||||
this.config.etcd_mon_ttl = 10;
|
||||
}
|
||||
this.config.etcd_mon_timeout = Number(this.config.etcd_mon_timeout) || 0;
|
||||
if (this.config.etcd_mon_timeout <= 0)
|
||||
@@ -605,7 +604,7 @@ class Mon
|
||||
}
|
||||
if (!this.ws)
|
||||
{
|
||||
this.failconnect('Failed to open etcd watch websocket');
|
||||
this.die('Failed to open etcd watch websocket');
|
||||
}
|
||||
const cur_addr = this.selected_etcd_url;
|
||||
this.ws_alive = true;
|
||||
@@ -675,12 +674,7 @@ class Mon
|
||||
{
|
||||
this.parse_kv(e.kv);
|
||||
const key = e.kv.key.substr(this.etcd_prefix.length);
|
||||
if (key.substr(0, 11) == '/osd/state/')
|
||||
{
|
||||
stats_changed = true;
|
||||
changed = true;
|
||||
}
|
||||
else if (key.substr(0, 11) == '/osd/stats/' || key.substr(0, 10) == '/pg/stats/' || key.substr(0, 16) == '/osd/inodestats/')
|
||||
if (key.substr(0, 11) == '/osd/stats/' || key.substr(0, 10) == '/pg/stats/' || key.substr(0, 16) == '/osd/inodestats/')
|
||||
{
|
||||
stats_changed = true;
|
||||
}
|
||||
@@ -797,9 +791,9 @@ class Mon
|
||||
const res = await this.etcd_call('/lease/keepalive', { ID: this.etcd_lease_id }, this.config.etcd_mon_timeout, this.config.etcd_mon_retries);
|
||||
if (!res.result.TTL)
|
||||
{
|
||||
this.failconnect('Lease expired');
|
||||
this.die('Lease expired');
|
||||
}
|
||||
}, this.config.etcd_mon_ttl*1000);
|
||||
}, this.config.etcd_mon_timeout);
|
||||
if (!this.signals_set)
|
||||
{
|
||||
process.on('SIGINT', this.on_stop_cb);
|
||||
@@ -1420,14 +1414,7 @@ class Mon
|
||||
}
|
||||
if (changed)
|
||||
{
|
||||
const ok = await this.save_pg_config(new_config_pgs);
|
||||
if (ok)
|
||||
console.log('PG configuration successfully changed');
|
||||
else
|
||||
{
|
||||
console.log('Someone changed PG configuration while we also tried to change it. Retrying in '+this.config.mon_change_timeout+' ms');
|
||||
this.schedule_recheck();
|
||||
}
|
||||
await this.save_pg_config(new_config_pgs);
|
||||
}
|
||||
}
|
||||
this.recheck_pgs_active = false;
|
||||
@@ -1508,11 +1495,6 @@ class Mon
|
||||
this.save_new_pgs_txn(new_config_pgs, etcd_request, pool_id, up_osds, osd_tree, real_prev_pgs, pool_res.pgs, pg_history);
|
||||
}
|
||||
new_config_pgs.hash = tree_hash;
|
||||
return await this.save_pg_config(new_config_pgs, etcd_request);
|
||||
}
|
||||
|
||||
async save_pg_config(new_config_pgs, etcd_request = { compare: [], success: [] })
|
||||
{
|
||||
etcd_request.compare.push(
|
||||
{ key: b64(this.etcd_prefix+'/mon/master'), target: 'LEASE', lease: ''+this.etcd_lease_id },
|
||||
{ key: b64(this.etcd_prefix+'/config/pgs'), target: 'MOD', mod_revision: ''+this.etcd_watch_revision, result: 'LESS' },
|
||||
@@ -1640,13 +1622,9 @@ class Mon
|
||||
}
|
||||
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
|
||||
// Sum derived values instead of deriving summed
|
||||
for (const osd in this.state.osd.state)
|
||||
for (const osd in this.state.osd.stats)
|
||||
{
|
||||
const derived = this.prev_stats.osd_diff[osd];
|
||||
if (!this.state.osd.state[osd] || !derived)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for (const type in sum_diff)
|
||||
{
|
||||
for (const op in derived[type]||{})
|
||||
@@ -1747,13 +1725,9 @@ class Mon
|
||||
const used = this.state.pool.stats[pool_id].used_raw_tb;
|
||||
this.state.pool.stats[pool_id].used_raw_tb = Number(used)/1024/1024/1024/1024;
|
||||
}
|
||||
for (const osd_num in this.state.osd.state)
|
||||
for (const osd_num in this.state.osd.inodestats)
|
||||
{
|
||||
const ist = this.state.osd.inodestats[osd_num];
|
||||
if (!ist || !this.state.osd.state[osd_num])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for (const pool_id in ist)
|
||||
{
|
||||
inode_stats[pool_id] = inode_stats[pool_id] || {};
|
||||
@@ -1769,14 +1743,9 @@ class Mon
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const osd in this.state.osd.state)
|
||||
for (const osd in this.prev_stats.osd_diff)
|
||||
{
|
||||
const osd_diff = this.prev_stats.osd_diff[osd];
|
||||
if (!osd_diff || !this.state.osd.state[osd])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
for (const pool_id in osd_diff.inode_stats)
|
||||
for (const pool_id in this.prev_stats.osd_diff[osd].inode_stats)
|
||||
{
|
||||
for (const inode_num in this.prev_stats.osd_diff[osd].inode_stats[pool_id])
|
||||
{
|
||||
@@ -2016,14 +1985,14 @@ class Mon
|
||||
return res.json;
|
||||
}
|
||||
}
|
||||
this.failconnect();
|
||||
this.die();
|
||||
}
|
||||
|
||||
_die(err, code)
|
||||
_die(err)
|
||||
{
|
||||
// In fact we can just try to rejoin
|
||||
console.error(new Error(err || 'Cluster connection failed'));
|
||||
process.exit(code || 2);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
local_ips(all)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vitastor-mon",
|
||||
"version": "1.4.4",
|
||||
"version": "1.3.1",
|
||||
"description": "Vitastor SDS monitor service",
|
||||
"main": "mon-main.js",
|
||||
"scripts": {
|
||||
|
@@ -8,9 +8,7 @@ PartOf=vitastor.target
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitMEMLOCK=infinity
|
||||
# Use the following for direct logs to files
|
||||
#ExecStart=bash -c 'exec vitastor-disk exec-osd /dev/vitastor/osd%i-data >>/var/log/vitastor/osd%i.log 2>&1'
|
||||
ExecStart=vitastor-disk exec-osd /dev/vitastor/osd%i-data
|
||||
ExecStart=bash -c 'exec vitastor-disk exec-osd /dev/vitastor/osd%i-data >>/var/log/vitastor/osd%i.log 2>&1'
|
||||
ExecStartPre=+vitastor-disk pre-exec /dev/vitastor/osd%i-data
|
||||
WorkingDirectory=/
|
||||
User=vitastor
|
||||
|
@@ -50,7 +50,7 @@ from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
VERSION = '1.4.4'
|
||||
VERSION = '1.3.1'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@@ -1,643 +0,0 @@
|
||||
commit c1cd026e211e94b120028e7c98a6e4ce5afe9846
|
||||
Author: Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||
Date: Wed Jan 24 22:04:50 2024 +0300
|
||||
|
||||
Add Vitastor support
|
||||
|
||||
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
|
||||
index aaad4a3da1..5f5daa8341 100644
|
||||
--- a/include/libvirt/libvirt-storage.h
|
||||
+++ b/include/libvirt/libvirt-storage.h
|
||||
@@ -326,6 +326,7 @@ typedef enum {
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17, /* (Since: 1.2.8) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18, /* (Since: 3.1.0) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19, /* (Since: 5.6.0) */
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR = 1 << 20, /* (Since: 5.0.0) */
|
||||
} virConnectListAllStoragePoolsFlags;
|
||||
|
||||
int virConnectListAllStoragePools(virConnectPtr conn,
|
||||
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
||||
index 22ad43e1d7..56c81d6852 100644
|
||||
--- a/src/conf/domain_conf.c
|
||||
+++ b/src/conf/domain_conf.c
|
||||
@@ -7185,7 +7185,8 @@ virDomainDiskSourceNetworkParse(xmlNodePtr node,
|
||||
src->configFile = virXPathString("string(./config/@file)", ctxt);
|
||||
|
||||
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
|
||||
- src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_VITASTOR)
|
||||
src->query = virXMLPropString(node, "query");
|
||||
|
||||
if (virDomainStorageNetworkParseHosts(node, ctxt, &src->hosts, &src->nhosts) < 0)
|
||||
@@ -30618,6 +30619,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_POOL_MPATH:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
|
||||
index c72108886e..c739ed6c43 100644
|
||||
--- a/src/conf/domain_validate.c
|
||||
+++ b/src/conf/domain_validate.c
|
||||
@@ -495,6 +495,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
@@ -541,7 +542,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
}
|
||||
}
|
||||
|
||||
- /* internal snapshots and config files are currently supported only with rbd: */
|
||||
+ /* internal snapshots are currently supported only with rbd: */
|
||||
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD) {
|
||||
if (src->snapshot) {
|
||||
@@ -549,10 +550,15 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
_("<snapshot> element is currently supported only with 'rbd' disks"));
|
||||
return -1;
|
||||
}
|
||||
+ }
|
||||
|
||||
+ /* config files are currently supported only with rbd and vitastor: */
|
||||
+ if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR) {
|
||||
if (src->configFile) {
|
||||
virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
- _("<config> element is currently supported only with 'rbd' disks"));
|
||||
+ _("<config> element is currently supported only with 'rbd' and 'vitastor' disks"));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng
|
||||
index b98a2ae602..7d7a872e01 100644
|
||||
--- a/src/conf/schemas/domaincommon.rng
|
||||
+++ b/src/conf/schemas/domaincommon.rng
|
||||
@@ -1997,6 +1997,35 @@
|
||||
</element>
|
||||
</define>
|
||||
|
||||
+ <define name="diskSourceNetworkProtocolVitastor">
|
||||
+ <element name="source">
|
||||
+ <interleave>
|
||||
+ <attribute name="protocol">
|
||||
+ <value>vitastor</value>
|
||||
+ </attribute>
|
||||
+ <ref name="diskSourceCommon"/>
|
||||
+ <optional>
|
||||
+ <attribute name="name"/>
|
||||
+ </optional>
|
||||
+ <optional>
|
||||
+ <attribute name="query"/>
|
||||
+ </optional>
|
||||
+ <zeroOrMore>
|
||||
+ <ref name="diskSourceNetworkHost"/>
|
||||
+ </zeroOrMore>
|
||||
+ <optional>
|
||||
+ <element name="config">
|
||||
+ <attribute name="file">
|
||||
+ <ref name="absFilePath"/>
|
||||
+ </attribute>
|
||||
+ <empty/>
|
||||
+ </element>
|
||||
+ </optional>
|
||||
+ <empty/>
|
||||
+ </interleave>
|
||||
+ </element>
|
||||
+ </define>
|
||||
+
|
||||
<define name="diskSourceNetworkProtocolISCSI">
|
||||
<element name="source">
|
||||
<attribute name="protocol">
|
||||
@@ -2347,6 +2376,7 @@
|
||||
<ref name="diskSourceNetworkProtocolSimple"/>
|
||||
<ref name="diskSourceNetworkProtocolVxHS"/>
|
||||
<ref name="diskSourceNetworkProtocolNFS"/>
|
||||
+ <ref name="diskSourceNetworkProtocolVitastor"/>
|
||||
</choice>
|
||||
</define>
|
||||
|
||||
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
|
||||
index 68842004b7..1d69a788b6 100644
|
||||
--- a/src/conf/storage_conf.c
|
||||
+++ b/src/conf/storage_conf.c
|
||||
@@ -56,7 +56,7 @@ VIR_ENUM_IMPL(virStoragePool,
|
||||
"logical", "disk", "iscsi",
|
||||
"iscsi-direct", "scsi", "mpath",
|
||||
"rbd", "sheepdog", "gluster",
|
||||
- "zfs", "vstorage",
|
||||
+ "zfs", "vstorage", "vitastor",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
|
||||
@@ -242,6 +242,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
|
||||
.formatToString = virStorageFileFormatTypeToString,
|
||||
}
|
||||
},
|
||||
+ {.poolType = VIR_STORAGE_POOL_VITASTOR,
|
||||
+ .poolOptions = {
|
||||
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NAME),
|
||||
+ },
|
||||
+ .volOptions = {
|
||||
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
|
||||
+ .formatFromString = virStorageVolumeFormatFromString,
|
||||
+ .formatToString = virStorageFileFormatTypeToString,
|
||||
+ }
|
||||
+ },
|
||||
{.poolType = VIR_STORAGE_POOL_SHEEPDOG,
|
||||
.poolOptions = {
|
||||
.flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
@@ -538,6 +550,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
|
||||
_("element 'name' is mandatory for RBD pool"));
|
||||
return -1;
|
||||
}
|
||||
+ if (pool_type == VIR_STORAGE_POOL_VITASTOR && source->name == NULL) {
|
||||
+ virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
+ _("element 'name' is mandatory for Vitastor pool"));
|
||||
+ return -1;
|
||||
+ }
|
||||
|
||||
if (options->formatFromString) {
|
||||
g_autofree char *format = NULL;
|
||||
@@ -1127,6 +1144,7 @@ virStoragePoolDefFormatBuf(virBuffer *buf,
|
||||
/* RBD, Sheepdog, Gluster and Iscsi-direct devices are not local block devs nor
|
||||
* files, so they don't have a target */
|
||||
if (def->type != VIR_STORAGE_POOL_RBD &&
|
||||
+ def->type != VIR_STORAGE_POOL_VITASTOR &&
|
||||
def->type != VIR_STORAGE_POOL_SHEEPDOG &&
|
||||
def->type != VIR_STORAGE_POOL_GLUSTER &&
|
||||
def->type != VIR_STORAGE_POOL_ISCSI_DIRECT) {
|
||||
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
|
||||
index fc67957cfe..720c07ef74 100644
|
||||
--- a/src/conf/storage_conf.h
|
||||
+++ b/src/conf/storage_conf.h
|
||||
@@ -103,6 +103,7 @@ typedef enum {
|
||||
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
|
||||
VIR_STORAGE_POOL_ZFS, /* ZFS */
|
||||
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
|
||||
+ VIR_STORAGE_POOL_VITASTOR, /* Vitastor */
|
||||
|
||||
VIR_STORAGE_POOL_LAST,
|
||||
} virStoragePoolType;
|
||||
@@ -454,6 +455,7 @@ VIR_ENUM_DECL(virStoragePartedFs);
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SCSI | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_MPATH | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_RBD | \
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS | \
|
||||
diff --git a/src/conf/storage_source_conf.c b/src/conf/storage_source_conf.c
|
||||
index f974a521b1..cd394d0a9f 100644
|
||||
--- a/src/conf/storage_source_conf.c
|
||||
+++ b/src/conf/storage_source_conf.c
|
||||
@@ -88,6 +88,7 @@ VIR_ENUM_IMPL(virStorageNetProtocol,
|
||||
"ssh",
|
||||
"vxhs",
|
||||
"nfs",
|
||||
+ "vitastor",
|
||||
);
|
||||
|
||||
|
||||
@@ -1301,6 +1302,7 @@ virStorageSourceNetworkDefaultPort(virStorageNetProtocol protocol)
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
return 24007;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
/* we don't provide a default for RBD */
|
||||
return 0;
|
||||
diff --git a/src/conf/storage_source_conf.h b/src/conf/storage_source_conf.h
|
||||
index 5e7d127453..283709eeb3 100644
|
||||
--- a/src/conf/storage_source_conf.h
|
||||
+++ b/src/conf/storage_source_conf.h
|
||||
@@ -129,6 +129,7 @@ typedef enum {
|
||||
VIR_STORAGE_NET_PROTOCOL_SSH,
|
||||
VIR_STORAGE_NET_PROTOCOL_VXHS,
|
||||
VIR_STORAGE_NET_PROTOCOL_NFS,
|
||||
+ VIR_STORAGE_NET_PROTOCOL_VITASTOR,
|
||||
|
||||
VIR_STORAGE_NET_PROTOCOL_LAST
|
||||
} virStorageNetProtocol;
|
||||
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
|
||||
index 59fa5da372..4739167f5f 100644
|
||||
--- a/src/conf/virstorageobj.c
|
||||
+++ b/src/conf/virstorageobj.c
|
||||
@@ -1438,6 +1438,7 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
|
||||
return 1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
@@ -1921,6 +1922,8 @@ virStoragePoolObjMatch(virStoragePoolObj *obj,
|
||||
(obj->def->type == VIR_STORAGE_POOL_MPATH)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_RBD) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_RBD)) ||
|
||||
+ (MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR) &&
|
||||
+ (obj->def->type == VIR_STORAGE_POOL_VITASTOR)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_SHEEPDOG)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER) &&
|
||||
diff --git a/src/libvirt-storage.c b/src/libvirt-storage.c
|
||||
index db7660aac4..561df34709 100644
|
||||
--- a/src/libvirt-storage.c
|
||||
+++ b/src/libvirt-storage.c
|
||||
@@ -94,6 +94,7 @@ virStoragePoolGetConnect(virStoragePoolPtr pool)
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_RBD
|
||||
+ * VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_ZFS
|
||||
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
|
||||
index 62e1be6672..71a1d42896 100644
|
||||
--- a/src/libxl/libxl_conf.c
|
||||
+++ b/src/libxl/libxl_conf.c
|
||||
@@ -979,6 +979,7 @@ libxlMakeNetworkDiskSrcStr(virStorageSource *src,
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/libxl/xen_xl.c b/src/libxl/xen_xl.c
|
||||
index f175359307..8efcf4c329 100644
|
||||
--- a/src/libxl/xen_xl.c
|
||||
+++ b/src/libxl/xen_xl.c
|
||||
@@ -1456,6 +1456,7 @@ xenFormatXLDiskSrcNet(virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
|
||||
index 7e9daf0bdc..825b4a3006 100644
|
||||
--- a/src/qemu/qemu_block.c
|
||||
+++ b/src/qemu/qemu_block.c
|
||||
@@ -758,6 +758,38 @@ qemuBlockStorageSourceGetRBDProps(virStorageSource *src,
|
||||
}
|
||||
|
||||
|
||||
+static virJSONValue *
|
||||
+qemuBlockStorageSourceGetVitastorProps(virStorageSource *src)
|
||||
+{
|
||||
+ virJSONValue *ret = NULL;
|
||||
+ virStorageNetHostDef *host;
|
||||
+ size_t i;
|
||||
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
||||
+ g_autofree char *etcd = NULL;
|
||||
+
|
||||
+ for (i = 0; i < src->nhosts; i++) {
|
||||
+ host = src->hosts + i;
|
||||
+ if ((virStorageNetHostTransport)host->transport != VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ virBufferAsprintf(&buf, i > 0 ? ",%s:%u" : "%s:%u", host->name, host->port);
|
||||
+ }
|
||||
+ if (src->nhosts > 0) {
|
||||
+ etcd = virBufferContentAndReset(&buf);
|
||||
+ }
|
||||
+
|
||||
+ if (virJSONValueObjectAdd(&ret,
|
||||
+ "S:etcd-host", etcd,
|
||||
+ "S:etcd-prefix", src->query,
|
||||
+ "S:config-path", src->configFile,
|
||||
+ "s:image", src->path,
|
||||
+ NULL) < 0)
|
||||
+ return NULL;
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static virJSONValue *
|
||||
qemuBlockStorageSourceGetSheepdogProps(virStorageSource *src)
|
||||
{
|
||||
@@ -1140,6 +1172,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSource *src,
|
||||
return NULL;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(fileprops = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return NULL;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(fileprops = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
@@ -2032,6 +2070,7 @@ qemuBlockGetBackingStoreString(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
@@ -2415,6 +2454,12 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(location = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(location = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
||||
index 953808fcfe..62860283d8 100644
|
||||
--- a/src/qemu/qemu_domain.c
|
||||
+++ b/src/qemu/qemu_domain.c
|
||||
@@ -5215,7 +5215,8 @@ qemuDomainValidateStorageSource(virStorageSource *src,
|
||||
if (src->query &&
|
||||
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
||||
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
||||
- src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR))) {
|
||||
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
||||
_("query is supported only with HTTP(S) protocols"));
|
||||
return -1;
|
||||
@@ -10340,6 +10341,7 @@ qemuDomainPrepareStorageSourceTLS(virStorageSource *src,
|
||||
break;
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
|
||||
index 73ff533827..e9c799ca8f 100644
|
||||
--- a/src/qemu/qemu_snapshot.c
|
||||
+++ b/src/qemu/qemu_snapshot.c
|
||||
@@ -423,6 +423,7 @@ qemuSnapshotPrepareDiskExternalInactive(virDomainSnapshotDiskDef *snapdisk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
@@ -648,6 +649,7 @@ qemuSnapshotPrepareDiskInternal(virDomainDiskDef *disk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
|
||||
index 314fe930e0..fb615a8b4e 100644
|
||||
--- a/src/storage/storage_driver.c
|
||||
+++ b/src/storage/storage_driver.c
|
||||
@@ -1626,6 +1626,7 @@ storageVolLookupByPathCallback(virStoragePoolObj *obj,
|
||||
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/storage_file/storage_source_backingstore.c b/src/storage_file/storage_source_backingstore.c
|
||||
index 80681924ea..8a3ade9ec0 100644
|
||||
--- a/src/storage_file/storage_source_backingstore.c
|
||||
+++ b/src/storage_file/storage_source_backingstore.c
|
||||
@@ -287,6 +287,75 @@ virStorageSourceParseRBDColonString(const char *rbdstr,
|
||||
}
|
||||
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseVitastorColonString(const char *colonstr,
|
||||
+ virStorageSource *src)
|
||||
+{
|
||||
+ char *p, *e, *next;
|
||||
+ g_autofree char *options = NULL;
|
||||
+
|
||||
+ /* optionally skip the "vitastor:" prefix if provided */
|
||||
+ if (STRPREFIX(colonstr, "vitastor:"))
|
||||
+ colonstr += strlen("vitastor:");
|
||||
+
|
||||
+ options = g_strdup(colonstr);
|
||||
+
|
||||
+ p = options;
|
||||
+ while (*p) {
|
||||
+ /* find : delimiter or end of string */
|
||||
+ for (e = p; *e && *e != ':'; ++e) {
|
||||
+ if (*e == '\\') {
|
||||
+ e++;
|
||||
+ if (*e == '\0')
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (*e == '\0') {
|
||||
+ next = e; /* last kv pair */
|
||||
+ } else {
|
||||
+ next = e + 1;
|
||||
+ *e = '\0';
|
||||
+ }
|
||||
+
|
||||
+ if (STRPREFIX(p, "image=")) {
|
||||
+ src->path = g_strdup(p + strlen("image="));
|
||||
+ } else if (STRPREFIX(p, "etcd-prefix=")) {
|
||||
+ src->query = g_strdup(p + strlen("etcd-prefix="));
|
||||
+ } else if (STRPREFIX(p, "config-path=")) {
|
||||
+ src->configFile = g_strdup(p + strlen("config-path="));
|
||||
+ } else if (STRPREFIX(p, "etcd-host=")) {
|
||||
+ char *h, *sep;
|
||||
+
|
||||
+ h = p + strlen("etcd-host=");
|
||||
+ while (h < e) {
|
||||
+ for (sep = h; sep < e; ++sep) {
|
||||
+ if (*sep == '\\' && (sep[1] == ',' ||
|
||||
+ sep[1] == ';' ||
|
||||
+ sep[1] == ' ')) {
|
||||
+ *sep = '\0';
|
||||
+ sep += 2;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (virStorageSourceRBDAddHost(src, h) < 0)
|
||||
+ return -1;
|
||||
+
|
||||
+ h = sep;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ p = next;
|
||||
+ }
|
||||
+
|
||||
+ if (!src->path) {
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseNBDColonString(const char *nbdstr,
|
||||
virStorageSource *src)
|
||||
@@ -399,6 +468,11 @@ virStorageSourceParseBackingColon(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ if (virStorageSourceParseVitastorColonString(path, src) < 0)
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
@@ -975,6 +1049,54 @@ virStorageSourceParseBackingJSONRBD(virStorageSource *src,
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseBackingJSONVitastor(virStorageSource *src,
|
||||
+ virJSONValue *json,
|
||||
+ const char *jsonstr G_GNUC_UNUSED,
|
||||
+ int opaque G_GNUC_UNUSED)
|
||||
+{
|
||||
+ const char *filename;
|
||||
+ const char *image = virJSONValueObjectGetString(json, "image");
|
||||
+ const char *conf = virJSONValueObjectGetString(json, "config-path");
|
||||
+ const char *etcd_prefix = virJSONValueObjectGetString(json, "etcd-prefix");
|
||||
+ virJSONValue *servers = virJSONValueObjectGetArray(json, "server");
|
||||
+ size_t nservers;
|
||||
+ size_t i;
|
||||
+
|
||||
+ src->type = VIR_STORAGE_TYPE_NETWORK;
|
||||
+ src->protocol = VIR_STORAGE_NET_PROTOCOL_VITASTOR;
|
||||
+
|
||||
+ /* legacy syntax passed via 'filename' option */
|
||||
+ if ((filename = virJSONValueObjectGetString(json, "filename")))
|
||||
+ return virStorageSourceParseVitastorColonString(filename, src);
|
||||
+
|
||||
+ if (!image) {
|
||||
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
|
||||
+ _("missing image name in Vitastor backing volume "
|
||||
+ "JSON specification"));
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ src->path = g_strdup(image);
|
||||
+ src->configFile = g_strdup(conf);
|
||||
+ src->query = g_strdup(etcd_prefix);
|
||||
+
|
||||
+ if (servers) {
|
||||
+ nservers = virJSONValueArraySize(servers);
|
||||
+
|
||||
+ src->hosts = g_new0(virStorageNetHostDef, nservers);
|
||||
+ src->nhosts = nservers;
|
||||
+
|
||||
+ for (i = 0; i < nservers; i++) {
|
||||
+ if (virStorageSourceParseBackingJSONInetSocketAddress(src->hosts + i,
|
||||
+ virJSONValueArrayGet(servers, i)) < 0)
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseBackingJSONRaw(virStorageSource *src,
|
||||
virJSONValue *json,
|
||||
@@ -1152,6 +1274,7 @@ static const struct virStorageSourceJSONDriverParser jsonParsers[] = {
|
||||
{"sheepdog", false, virStorageSourceParseBackingJSONSheepdog, 0},
|
||||
{"ssh", false, virStorageSourceParseBackingJSONSSH, 0},
|
||||
{"rbd", false, virStorageSourceParseBackingJSONRBD, 0},
|
||||
+ {"vitastor", false, virStorageSourceParseBackingJSONVitastor, 0},
|
||||
{"raw", true, virStorageSourceParseBackingJSONRaw, 0},
|
||||
{"nfs", false, virStorageSourceParseBackingJSONNFS, 0},
|
||||
{"vxhs", false, virStorageSourceParseBackingJSONVxHS, 0},
|
||||
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
|
||||
index e87d7cfd44..ccc05d7aae 100644
|
||||
--- a/src/test/test_driver.c
|
||||
+++ b/src/test/test_driver.c
|
||||
@@ -7335,6 +7335,7 @@ testStorageVolumeTypeForPool(int pooltype)
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
return VIR_STORAGE_VOL_NETWORK;
|
||||
case VIR_STORAGE_POOL_LOGICAL:
|
||||
case VIR_STORAGE_POOL_DISK:
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
index eee75af746..8bd0a57bdd 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='no'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
index 805950a937..852df0de16 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='yes'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
|
||||
index e8e40d695e..db55fe5f3a 100644
|
||||
--- a/tests/storagepoolxml2argvtest.c
|
||||
+++ b/tests/storagepoolxml2argvtest.c
|
||||
@@ -65,6 +65,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
default:
|
||||
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
|
||||
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
|
||||
index 36f00cf643..5f5bd3464e 100644
|
||||
--- a/tools/virsh-pool.c
|
||||
+++ b/tools/virsh-pool.c
|
||||
@@ -1223,6 +1223,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
|
||||
break;
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR;
|
||||
+ break;
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
break;
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
name: Pull Request
|
||||
about: Submit a pull request
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Describe your pull request
|
||||
placeholder: ""
|
||||
value: ""
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: author
|
||||
attributes:
|
||||
label: Contributor Name
|
||||
description: Contributor Name or Company Details if the Contributor is a company
|
||||
placeholder: ""
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: CLA
|
||||
description: By submitting this pull request, I accept [Vitastor CLA](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/CLA-en.md)
|
||||
options:
|
||||
- label: "I accept Vitastor CLA agreement: https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/CLA-en.md"
|
||||
required: true
|
@@ -24,4 +24,4 @@ rm fio
|
||||
mv fio-copy fio
|
||||
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||
tar --transform 's#^#vitastor-1.4.4/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.4.4$(rpm --eval '%dist').tar.gz *
|
||||
tar --transform 's#^#vitastor-1.3.1/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.3.1$(rpm --eval '%dist').tar.gz *
|
||||
|
@@ -36,7 +36,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-1.4.4.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.3.1.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 1.4.4
|
||||
Version: 1.3.1
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-1.4.4.el7.tar.gz
|
||||
Source0: vitastor-1.3.1.el7.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-1.4.4.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.3.1.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 1.4.4
|
||||
Version: 1.3.1
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-1.4.4.el8.tar.gz
|
||||
Source0: vitastor-1.3.1.el8.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -18,7 +18,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-1.4.4.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.3.1.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 1.4.4
|
||||
Version: 1.3.1
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-1.4.4.el9.tar.gz
|
||||
Source0: vitastor-1.3.1.el9.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -16,7 +16,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||
endif()
|
||||
|
||||
add_definitions(-DVERSION="1.4.4")
|
||||
add_definitions(-DVERSION="1.3.1")
|
||||
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
|
||||
add_link_options(-fno-omit-frame-pointer)
|
||||
if (${WITH_ASAN})
|
||||
@@ -181,6 +181,25 @@ target_link_libraries(vitastor-nbd
|
||||
vitastor_client
|
||||
)
|
||||
|
||||
# vitastor-kv
|
||||
add_executable(vitastor-kv
|
||||
kv_cli.cpp
|
||||
kv_db.cpp
|
||||
kv_db.h
|
||||
)
|
||||
target_link_libraries(vitastor-kv
|
||||
vitastor_client
|
||||
)
|
||||
|
||||
add_executable(vitastor-kv-stress
|
||||
kv_stress.cpp
|
||||
kv_db.cpp
|
||||
kv_db.h
|
||||
)
|
||||
target_link_libraries(vitastor-kv-stress
|
||||
vitastor_client
|
||||
)
|
||||
|
||||
# vitastor-nfs
|
||||
add_executable(vitastor-nfs
|
||||
nfs_proxy.cpp
|
||||
|
@@ -184,7 +184,8 @@ void journal_flusher_t::mark_trim_possible()
|
||||
if (trim_wanted > 0)
|
||||
{
|
||||
dequeuing = true;
|
||||
journal_trim_counter = 0;
|
||||
if (!journal_trim_counter)
|
||||
journal_trim_counter = journal_trim_interval;
|
||||
bs->ringloop->wakeup();
|
||||
}
|
||||
}
|
||||
@@ -365,7 +366,7 @@ resume_0:
|
||||
!flusher->flush_queue.size() || !flusher->dequeuing)
|
||||
{
|
||||
stop_flusher:
|
||||
if (flusher->trim_wanted > 0 && !flusher->journal_trim_counter)
|
||||
if (flusher->trim_wanted > 0 && flusher->journal_trim_counter > 0)
|
||||
{
|
||||
// Attempt forced trim
|
||||
flusher->active_flushers++;
|
||||
@@ -1345,6 +1346,7 @@ bool journal_flusher_co::trim_journal(int wait_base)
|
||||
else if (wait_state == wait_base+2) goto resume_2;
|
||||
else if (wait_state == wait_base+3) goto resume_3;
|
||||
else if (wait_state == wait_base+4) goto resume_4;
|
||||
flusher->journal_trim_counter = 0;
|
||||
new_trim_pos = bs->journal.get_trim_pos();
|
||||
if (new_trim_pos != bs->journal.used_start)
|
||||
{
|
||||
@@ -1417,7 +1419,6 @@ bool journal_flusher_co::trim_journal(int wait_base)
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
flusher->journal_trim_counter = 0;
|
||||
flusher->trimming = false;
|
||||
}
|
||||
return true;
|
||||
|
@@ -163,10 +163,20 @@ void blockstore_impl_t::loop()
|
||||
}
|
||||
else if (op->opcode == BS_OP_SYNC)
|
||||
{
|
||||
// sync only completed writes?
|
||||
// wait for all small writes to be submitted
|
||||
// wait for all big writes to complete, submit data device fsync
|
||||
// wait for the data device fsync to complete, then submit journal writes for big writes
|
||||
// then submit an fsync operation
|
||||
if (has_writes)
|
||||
{
|
||||
// Can't submit SYNC before previous writes
|
||||
continue;
|
||||
}
|
||||
wr_st = continue_sync(op);
|
||||
if (wr_st != 2)
|
||||
{
|
||||
has_writes = wr_st > 0 ? 1 : 2;
|
||||
}
|
||||
}
|
||||
else if (op->opcode == BS_OP_STABLE)
|
||||
{
|
||||
@@ -195,10 +205,6 @@ void blockstore_impl_t::loop()
|
||||
// ring is full, stop submission
|
||||
break;
|
||||
}
|
||||
else if (PRIV(op)->wait_for == WAIT_JOURNAL)
|
||||
{
|
||||
PRIV(op)->wait_detail2 = (unstable_writes.size()+unstable_unsynced);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (op_idx != new_idx)
|
||||
@@ -277,8 +283,7 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
|
||||
}
|
||||
else if (PRIV(op)->wait_for == WAIT_JOURNAL)
|
||||
{
|
||||
if (journal.used_start == PRIV(op)->wait_detail &&
|
||||
(unstable_writes.size()+unstable_unsynced) == PRIV(op)->wait_detail2)
|
||||
if (journal.used_start == PRIV(op)->wait_detail)
|
||||
{
|
||||
// do not submit
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
@@ -553,14 +558,13 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
if (stable_count >= stable_alloc)
|
||||
{
|
||||
stable_alloc *= 2;
|
||||
obj_ver_id* nst = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!nst)
|
||||
stable = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!stable)
|
||||
{
|
||||
op->retval = -ENOMEM;
|
||||
FINISH_OP(op);
|
||||
return;
|
||||
}
|
||||
stable = nst;
|
||||
}
|
||||
stable[stable_count++] = {
|
||||
.oid = clean_it->first,
|
||||
@@ -638,8 +642,8 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
if (stable_count >= stable_alloc)
|
||||
{
|
||||
stable_alloc += 32768;
|
||||
obj_ver_id *nst = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!nst)
|
||||
stable = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!stable)
|
||||
{
|
||||
if (unstable)
|
||||
free(unstable);
|
||||
@@ -647,7 +651,6 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
FINISH_OP(op);
|
||||
return;
|
||||
}
|
||||
stable = nst;
|
||||
}
|
||||
stable[stable_count++] = dirty_it->first;
|
||||
}
|
||||
@@ -663,8 +666,8 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
if (unstable_count >= unstable_alloc)
|
||||
{
|
||||
unstable_alloc += 32768;
|
||||
obj_ver_id *nst = (obj_ver_id*)realloc(unstable, sizeof(obj_ver_id) * unstable_alloc);
|
||||
if (!nst)
|
||||
unstable = (obj_ver_id*)realloc(unstable, sizeof(obj_ver_id) * unstable_alloc);
|
||||
if (!unstable)
|
||||
{
|
||||
if (stable)
|
||||
free(stable);
|
||||
@@ -672,7 +675,6 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
FINISH_OP(op);
|
||||
return;
|
||||
}
|
||||
unstable = nst;
|
||||
}
|
||||
unstable[unstable_count++] = dirty_it->first;
|
||||
}
|
||||
@@ -692,8 +694,8 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
if (stable_count+unstable_count > stable_alloc)
|
||||
{
|
||||
stable_alloc = stable_count+unstable_count;
|
||||
obj_ver_id *nst = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!nst)
|
||||
stable = (obj_ver_id*)realloc(stable, sizeof(obj_ver_id) * stable_alloc);
|
||||
if (!stable)
|
||||
{
|
||||
if (unstable)
|
||||
free(unstable);
|
||||
@@ -701,7 +703,6 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
FINISH_OP(op);
|
||||
return;
|
||||
}
|
||||
stable = nst;
|
||||
}
|
||||
// Copy unstable entries
|
||||
for (int i = 0; i < unstable_count; i++)
|
||||
|
@@ -55,7 +55,6 @@
|
||||
#define IS_JOURNAL(st) (((st) & 0x0F) == BS_ST_SMALL_WRITE)
|
||||
#define IS_BIG_WRITE(st) (((st) & 0x0F) == BS_ST_BIG_WRITE)
|
||||
#define IS_DELETE(st) (((st) & 0x0F) == BS_ST_DELETE)
|
||||
#define IS_INSTANT(st) (((st) & BS_ST_TYPE_MASK) == BS_ST_DELETE || ((st) & BS_ST_INSTANT))
|
||||
|
||||
#define BS_SUBMIT_CHECK_SQES(n) \
|
||||
if (ringloop->sqes_left() < (n))\
|
||||
@@ -202,7 +201,7 @@ struct blockstore_op_private_t
|
||||
{
|
||||
// Wait status
|
||||
int wait_for;
|
||||
uint64_t wait_detail, wait_detail2;
|
||||
uint64_t wait_detail;
|
||||
int pending_ops;
|
||||
int op_state;
|
||||
|
||||
@@ -378,7 +377,7 @@ class blockstore_impl_t
|
||||
// Stabilize
|
||||
int dequeue_stable(blockstore_op_t *op);
|
||||
int continue_stable(blockstore_op_t *op);
|
||||
void mark_stable(obj_ver_id ov, bool forget_dirty = false);
|
||||
void mark_stable(const obj_ver_id & ov, bool forget_dirty = false);
|
||||
void stabilize_object(object_id oid, uint64_t max_ver);
|
||||
blockstore_op_t* selective_sync(blockstore_op_t *op);
|
||||
int split_stab_op(blockstore_op_t *op, std::function<int(obj_ver_id v)> decider);
|
||||
|
@@ -19,7 +19,7 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
|
||||
throttle_target_mbs = strtoull(config["throttle_target_mbs"].c_str(), NULL, 10);
|
||||
throttle_target_parallelism = strtoull(config["throttle_target_parallelism"].c_str(), NULL, 10);
|
||||
throttle_threshold_us = strtoull(config["throttle_threshold_us"].c_str(), NULL, 10);
|
||||
if (config["autosync_writes"] != "")
|
||||
if (config.find("autosync_writes") != config.end())
|
||||
{
|
||||
autosync_writes = strtoull(config["autosync_writes"].c_str(), NULL, 10);
|
||||
}
|
||||
|
@@ -412,40 +412,11 @@ resume_4:
|
||||
return 2;
|
||||
}
|
||||
|
||||
void blockstore_impl_t::mark_stable(obj_ver_id v, bool forget_dirty)
|
||||
void blockstore_impl_t::mark_stable(const obj_ver_id & v, bool forget_dirty)
|
||||
{
|
||||
auto dirty_it = dirty_db.find(v);
|
||||
if (dirty_it != dirty_db.end())
|
||||
{
|
||||
if (IS_INSTANT(dirty_it->second.state))
|
||||
{
|
||||
// 'Instant' (non-EC) operations may complete and try to become stable out of order. Prevent it.
|
||||
auto back_it = dirty_it;
|
||||
while (back_it != dirty_db.begin())
|
||||
{
|
||||
back_it--;
|
||||
if (back_it->first.oid != v.oid)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (!IS_STABLE(back_it->second.state))
|
||||
{
|
||||
// There are preceding unstable versions, can't flush <v>
|
||||
return;
|
||||
}
|
||||
}
|
||||
while (true)
|
||||
{
|
||||
dirty_it++;
|
||||
if (dirty_it == dirty_db.end() || dirty_it->first.oid != v.oid ||
|
||||
!IS_SYNCED(dirty_it->second.state))
|
||||
{
|
||||
dirty_it--;
|
||||
break;
|
||||
}
|
||||
v.version = dirty_it->first.version;
|
||||
}
|
||||
}
|
||||
while (1)
|
||||
{
|
||||
bool was_stable = IS_STABLE(dirty_it->second.state);
|
||||
|
@@ -76,7 +76,6 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
||||
// 2nd step: Data device is synced, prepare & write journal entries
|
||||
// Check space in the journal and journal memory buffers
|
||||
blockstore_journal_check_t space_check(this);
|
||||
auto reservation = (unstable_writes.size()+unstable_unsynced+PRIV(op)->sync_big_writes.size())*journal.block_size;
|
||||
if (dsk.csum_block_size)
|
||||
{
|
||||
// More complex check because all journal entries have different lengths
|
||||
@@ -86,14 +85,16 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
||||
left--;
|
||||
auto & dirty_entry = dirty_db.at(sbw);
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(dirty_entry.offset, dirty_entry.len);
|
||||
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size, left ? 0 : reservation))
|
||||
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size,
|
||||
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(),
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, reservation))
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
|
||||
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -129,7 +129,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
}
|
||||
bool imm = (op->len < dsk.data_block_size ? (immediate_commit != IMMEDIATE_NONE) : (immediate_commit == IMMEDIATE_ALL));
|
||||
if (wait_big && !is_del && !deleted && op->len < dsk.data_block_size && !imm ||
|
||||
!imm && autosync_writes && unsynced_queued_ops >= autosync_writes)
|
||||
!imm && unsynced_queued_ops >= autosync_writes)
|
||||
{
|
||||
// Issue an additional sync so that the previous big write can reach the journal
|
||||
blockstore_op_t *sync_op = new blockstore_op_t;
|
||||
@@ -320,7 +320,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (!space_check.check_available(op, unsynced_big_write_count + 1,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_dyn_size,
|
||||
(unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
|
||||
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -412,7 +412,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
sizeof(journal_entry_big_write) + dsk.clean_dyn_size, 0)
|
||||
|| !space_check.check_available(op, 1,
|
||||
sizeof(journal_entry_small_write) + dyn_size,
|
||||
op->len + (unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
|
||||
op->len + (unstable_writes.size()+unstable_unsynced)*journal.block_size))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -549,7 +549,7 @@ resume_2:
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(op->offset, op->len);
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size,
|
||||
(unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
|
||||
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -593,7 +593,7 @@ resume_4:
|
||||
#endif
|
||||
bool is_big = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE;
|
||||
bool imm = is_big ? (immediate_commit == IMMEDIATE_ALL) : (immediate_commit != IMMEDIATE_NONE);
|
||||
bool is_instant = IS_INSTANT(dirty_it->second.state);
|
||||
bool is_instant = ((dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_DELETE || (dirty_it->second.state & BS_ST_INSTANT));
|
||||
if (imm)
|
||||
{
|
||||
auto & unstab = unstable_writes[op->oid];
|
||||
|
@@ -17,7 +17,6 @@ struct rm_pg_t
|
||||
uint64_t obj_count = 0, obj_done = 0;
|
||||
int state = 0;
|
||||
int in_flight = 0;
|
||||
bool synced = false;
|
||||
};
|
||||
|
||||
struct rm_inode_t
|
||||
@@ -49,7 +48,6 @@ struct rm_inode_t
|
||||
.objects = objects,
|
||||
.obj_count = objects.size(),
|
||||
.obj_done = 0,
|
||||
.synced = parent->cli->get_immediate_commit(inode),
|
||||
});
|
||||
if (min_offset == 0)
|
||||
{
|
||||
@@ -153,37 +151,6 @@ struct rm_inode_t
|
||||
}
|
||||
cur_list->obj_pos++;
|
||||
}
|
||||
if (cur_list->in_flight == 0 && cur_list->obj_pos == cur_list->objects.end() &&
|
||||
!cur_list->synced)
|
||||
{
|
||||
osd_op_t *op = new osd_op_t();
|
||||
op->op_type = OSD_OP_OUT;
|
||||
op->peer_fd = parent->cli->msgr.osd_peer_fds.at(cur_list->rm_osd_num);
|
||||
op->req = (osd_any_op_t){
|
||||
.sync = {
|
||||
.header = {
|
||||
.magic = SECONDARY_OSD_OP_MAGIC,
|
||||
.id = parent->cli->next_op_id(),
|
||||
.opcode = OSD_OP_SYNC,
|
||||
},
|
||||
},
|
||||
};
|
||||
op->callback = [this, cur_list](osd_op_t *op)
|
||||
{
|
||||
cur_list->in_flight--;
|
||||
cur_list->synced = true;
|
||||
if (op->reply.hdr.retval < 0)
|
||||
{
|
||||
fprintf(stderr, "Failed to sync OSD %lu (retval=%ld)\n",
|
||||
cur_list->rm_osd_num, op->reply.hdr.retval);
|
||||
error_count++;
|
||||
}
|
||||
delete op;
|
||||
continue_delete();
|
||||
};
|
||||
cur_list->in_flight++;
|
||||
parent->cli->msgr.outbox_push(op);
|
||||
}
|
||||
}
|
||||
|
||||
void continue_delete()
|
||||
@@ -194,8 +161,7 @@ struct rm_inode_t
|
||||
}
|
||||
for (int i = 0; i < lists.size(); i++)
|
||||
{
|
||||
if (!lists[i]->in_flight && lists[i]->obj_pos == lists[i]->objects.end() &&
|
||||
lists[i]->synced)
|
||||
if (!lists[i]->in_flight && lists[i]->obj_pos == lists[i]->objects.end())
|
||||
{
|
||||
delete lists[i];
|
||||
lists.erase(lists.begin()+i, lists.begin()+i+1);
|
||||
@@ -221,7 +187,7 @@ struct rm_inode_t
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
if (parent->progress && (total_done < total_count || inactive_osds.size() > 0 || error_count > 0))
|
||||
if (parent->progress && (total_done < total_count || inactive_osds.size() > 0))
|
||||
{
|
||||
fprintf(
|
||||
stderr, "Warning: Pool:%u,ID:%lu inode data may not have been fully removed.\n"
|
||||
|
@@ -106,7 +106,7 @@ resume_2:
|
||||
if (etcd_states[i]["error"].is_null())
|
||||
{
|
||||
etcd_alive++;
|
||||
etcd_db_size = etcd_states[i]["dbSize"].uint64_value();
|
||||
etcd_db_size = etcd_states[i]["dbSizeInUse"].uint64_value();
|
||||
}
|
||||
}
|
||||
int mon_count = 0;
|
||||
|
@@ -6,7 +6,7 @@
|
||||
#include "cluster_client_impl.h"
|
||||
#include "http_client.h" // json_is_true
|
||||
|
||||
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config)
|
||||
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json config)
|
||||
{
|
||||
wb = new writeback_cache_t();
|
||||
|
||||
@@ -353,11 +353,11 @@ void cluster_client_t::on_load_config_hook(json11::Json::object & etcd_global_co
|
||||
up_wait_retry_interval = config["up_wait_retry_interval"].uint64_value();
|
||||
if (!up_wait_retry_interval)
|
||||
{
|
||||
up_wait_retry_interval = 50;
|
||||
up_wait_retry_interval = 500;
|
||||
}
|
||||
else if (up_wait_retry_interval < 10)
|
||||
else if (up_wait_retry_interval < 50)
|
||||
{
|
||||
up_wait_retry_interval = 10;
|
||||
up_wait_retry_interval = 50;
|
||||
}
|
||||
// log_level
|
||||
log_level = config["log_level"].uint64_value();
|
||||
@@ -534,7 +534,7 @@ void cluster_client_t::execute_internal(cluster_op_t *op)
|
||||
return;
|
||||
}
|
||||
if (op->opcode == OSD_OP_WRITE && enable_writeback && !(op->flags & OP_FLUSH_BUFFER) &&
|
||||
!op->version /* FIXME no CAS writeback */)
|
||||
!op->version /* no CAS writeback */)
|
||||
{
|
||||
if (wb->writebacks_active >= client_max_writeback_iodepth)
|
||||
{
|
||||
@@ -555,7 +555,7 @@ void cluster_client_t::execute_internal(cluster_op_t *op)
|
||||
}
|
||||
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT))
|
||||
{
|
||||
if (!(op->flags & OP_FLUSH_BUFFER))
|
||||
if (!(op->flags & OP_FLUSH_BUFFER) && !op->version /* no CAS write-repeat */)
|
||||
{
|
||||
wb->copy_write(op, CACHE_WRITTEN);
|
||||
}
|
||||
@@ -1161,7 +1161,7 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
|
||||
);
|
||||
}
|
||||
}
|
||||
else
|
||||
else if (log_level > 0)
|
||||
{
|
||||
fprintf(
|
||||
stderr, "%s operation failed on OSD %lu: retval=%ld (expected %d)\n",
|
||||
|
@@ -121,7 +121,7 @@ public:
|
||||
json11::Json::object cli_config, file_config, etcd_global_config;
|
||||
json11::Json::object config;
|
||||
|
||||
cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config);
|
||||
cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json config);
|
||||
~cluster_client_t();
|
||||
void execute(cluster_op_t *op);
|
||||
void execute_raw(osd_num_t osd_num, osd_op_t *op);
|
||||
|
@@ -28,7 +28,7 @@ struct etcd_kv_t
|
||||
{
|
||||
std::string key;
|
||||
json11::Json value;
|
||||
uint64_t mod_revision = 0;
|
||||
uint64_t mod_revision;
|
||||
};
|
||||
|
||||
struct pg_config_t
|
||||
|
401
src/kv_cli.cpp
Normal file
401
src/kv_cli.cpp
Normal file
@@ -0,0 +1,401 @@
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
//
|
||||
// Vitastor shared key/value database test CLI
|
||||
|
||||
#define _XOPEN_SOURCE
|
||||
#include <limits.h>
|
||||
|
||||
#include <netinet/tcp.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
//#include <signal.h>
|
||||
|
||||
#include "epoll_manager.h"
|
||||
#include "str_util.h"
|
||||
#include "kv_db.h"
|
||||
|
||||
const char *exe_name = NULL;
|
||||
|
||||
class kv_cli_t
|
||||
{
|
||||
public:
|
||||
kv_dbw_t *db = NULL;
|
||||
ring_loop_t *ringloop = NULL;
|
||||
epoll_manager_t *epmgr = NULL;
|
||||
cluster_client_t *cli = NULL;
|
||||
bool interactive = false;
|
||||
int in_progress = 0;
|
||||
char *cur_cmd = NULL;
|
||||
int cur_cmd_size = 0, cur_cmd_alloc = 0;
|
||||
bool finished = false, eof = false;
|
||||
json11::Json::object cfg;
|
||||
|
||||
~kv_cli_t();
|
||||
|
||||
static json11::Json::object parse_args(int narg, const char *args[]);
|
||||
void run(const json11::Json::object & cfg);
|
||||
void read_cmd();
|
||||
void next_cmd();
|
||||
void handle_cmd(const std::string & cmd, std::function<void()> cb);
|
||||
};
|
||||
|
||||
kv_cli_t::~kv_cli_t()
|
||||
{
|
||||
if (cur_cmd)
|
||||
{
|
||||
free(cur_cmd);
|
||||
cur_cmd = NULL;
|
||||
}
|
||||
cur_cmd_alloc = 0;
|
||||
if (db)
|
||||
delete db;
|
||||
if (cli)
|
||||
{
|
||||
cli->flush();
|
||||
delete cli;
|
||||
}
|
||||
if (epmgr)
|
||||
delete epmgr;
|
||||
if (ringloop)
|
||||
delete ringloop;
|
||||
}
|
||||
|
||||
json11::Json::object kv_cli_t::parse_args(int narg, const char *args[])
|
||||
{
|
||||
json11::Json::object cfg;
|
||||
for (int i = 1; i < narg; i++)
|
||||
{
|
||||
if (!strcmp(args[i], "-h") || !strcmp(args[i], "--help"))
|
||||
{
|
||||
printf(
|
||||
"Vitastor Key/Value CLI\n"
|
||||
"(c) Vitaliy Filippov, 2023+ (VNPL-1.1)\n"
|
||||
"\n"
|
||||
"USAGE: %s [--etcd_address ADDR] [OTHER OPTIONS]\n",
|
||||
exe_name
|
||||
);
|
||||
exit(0);
|
||||
}
|
||||
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||
{
|
||||
const char *opt = args[i]+2;
|
||||
cfg[opt] = !strcmp(opt, "json") || i == narg-1 ? "1" : args[++i];
|
||||
}
|
||||
}
|
||||
return cfg;
|
||||
}
|
||||
|
||||
void kv_cli_t::run(const json11::Json::object & cfg)
|
||||
{
|
||||
// Create client
|
||||
ringloop = new ring_loop_t(512);
|
||||
epmgr = new epoll_manager_t(ringloop);
|
||||
cli = new cluster_client_t(ringloop, epmgr->tfd, cfg);
|
||||
db = new kv_dbw_t(cli);
|
||||
// Load image metadata
|
||||
while (!cli->is_ready())
|
||||
{
|
||||
ringloop->loop();
|
||||
if (cli->is_ready())
|
||||
break;
|
||||
ringloop->wait();
|
||||
}
|
||||
// Run
|
||||
fcntl(0, F_SETFL, fcntl(0, F_GETFL, 0) | O_NONBLOCK);
|
||||
try
|
||||
{
|
||||
epmgr->tfd->set_fd_handler(0, false, [this](int fd, int events)
|
||||
{
|
||||
if (events & EPOLLIN)
|
||||
{
|
||||
read_cmd();
|
||||
}
|
||||
if (events & EPOLLRDHUP)
|
||||
{
|
||||
epmgr->tfd->set_fd_handler(0, false, NULL);
|
||||
finished = true;
|
||||
}
|
||||
});
|
||||
interactive = true;
|
||||
printf("> ");
|
||||
}
|
||||
catch (std::exception & e)
|
||||
{
|
||||
// Can't add to epoll, STDIN is probably a file
|
||||
read_cmd();
|
||||
}
|
||||
while (!finished)
|
||||
{
|
||||
ringloop->loop();
|
||||
if (!finished)
|
||||
ringloop->wait();
|
||||
}
|
||||
// Destroy the client
|
||||
delete db;
|
||||
db = NULL;
|
||||
cli->flush();
|
||||
delete cli;
|
||||
delete epmgr;
|
||||
delete ringloop;
|
||||
cli = NULL;
|
||||
epmgr = NULL;
|
||||
ringloop = NULL;
|
||||
}
|
||||
|
||||
void kv_cli_t::read_cmd()
|
||||
{
|
||||
if (!cur_cmd_alloc)
|
||||
{
|
||||
cur_cmd_alloc = 65536;
|
||||
cur_cmd = (char*)malloc_or_die(cur_cmd_alloc);
|
||||
}
|
||||
while (cur_cmd_size < cur_cmd_alloc)
|
||||
{
|
||||
int r = read(0, cur_cmd+cur_cmd_size, cur_cmd_alloc-cur_cmd_size);
|
||||
if (r < 0 && errno != EAGAIN)
|
||||
fprintf(stderr, "Error reading from stdin: %s\n", strerror(errno));
|
||||
if (r > 0)
|
||||
cur_cmd_size += r;
|
||||
if (r == 0)
|
||||
eof = true;
|
||||
if (r <= 0)
|
||||
break;
|
||||
}
|
||||
next_cmd();
|
||||
}
|
||||
|
||||
void kv_cli_t::next_cmd()
|
||||
{
|
||||
if (in_progress > 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
int pos = 0;
|
||||
for (; pos < cur_cmd_size; pos++)
|
||||
{
|
||||
if (cur_cmd[pos] == '\n' || cur_cmd[pos] == '\r')
|
||||
{
|
||||
auto cmd = trim(std::string(cur_cmd, pos));
|
||||
pos++;
|
||||
memmove(cur_cmd, cur_cmd+pos, cur_cmd_size-pos);
|
||||
cur_cmd_size -= pos;
|
||||
in_progress++;
|
||||
handle_cmd(cmd, [this]()
|
||||
{
|
||||
in_progress--;
|
||||
if (interactive)
|
||||
printf("> ");
|
||||
next_cmd();
|
||||
if (!in_progress)
|
||||
read_cmd();
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (eof && !in_progress)
|
||||
{
|
||||
finished = true;
|
||||
}
|
||||
}
|
||||
|
||||
void kv_cli_t::handle_cmd(const std::string & cmd, std::function<void()> cb)
|
||||
{
|
||||
if (cmd == "")
|
||||
{
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
auto pos = cmd.find_first_of(" \t");
|
||||
if (pos != std::string::npos)
|
||||
{
|
||||
while (pos < cmd.size()-1 && (cmd[pos+1] == ' ' || cmd[pos+1] == '\t'))
|
||||
pos++;
|
||||
}
|
||||
auto opname = strtolower(pos == std::string::npos ? cmd : cmd.substr(0, pos));
|
||||
if (opname == "open")
|
||||
{
|
||||
uint64_t pool_id = 0;
|
||||
inode_t inode_id = 0;
|
||||
uint32_t kv_block_size = 0;
|
||||
int scanned = sscanf(cmd.c_str() + pos+1, "%lu %lu %u", &pool_id, &inode_id, &kv_block_size);
|
||||
if (scanned == 2)
|
||||
{
|
||||
kv_block_size = 4096;
|
||||
}
|
||||
if (scanned < 2 || !pool_id || !inode_id || !kv_block_size || (kv_block_size & (kv_block_size-1)) != 0)
|
||||
{
|
||||
fprintf(stderr, "Usage: open <pool_id> <inode_id> [block_size]. Block size must be a power of 2. Default is 4096.\n");
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
cfg["kv_block_size"] = (uint64_t)kv_block_size;
|
||||
db->open(INODE_WITH_POOL(pool_id, inode_id), cfg, [=](int res)
|
||||
{
|
||||
if (res < 0)
|
||||
fprintf(stderr, "Error opening index: %s (code %d)\n", strerror(-res), res);
|
||||
else
|
||||
printf("Index opened. Current size: %lu bytes\n", db->get_size());
|
||||
cb();
|
||||
});
|
||||
}
|
||||
else if (opname == "config")
|
||||
{
|
||||
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||
if (pos2 == std::string::npos)
|
||||
{
|
||||
fprintf(stderr, "Usage: config <property> <value>\n");
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
auto key = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||
auto value = parse_size(trim(cmd.substr(pos2+1)));
|
||||
if (key != "kv_memory_limit" &&
|
||||
key != "kv_allocate_blocks" &&
|
||||
key != "kv_evict_max_misses" &&
|
||||
key != "kv_evict_attempts_per_level" &&
|
||||
key != "kv_evict_unused_age" &&
|
||||
key != "kv_log_level")
|
||||
{
|
||||
fprintf(
|
||||
stderr, "Allowed properties: kv_memory_limit, kv_allocate_blocks,"
|
||||
" kv_evict_max_misses, kv_evict_attempts_per_level, kv_evict_unused_age, kv_log_level\n"
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
cfg[key] = value;
|
||||
db->set_config(cfg);
|
||||
}
|
||||
cb();
|
||||
}
|
||||
else if (opname == "get" || opname == "set" || opname == "del")
|
||||
{
|
||||
if (opname == "get" || opname == "del")
|
||||
{
|
||||
if (pos == std::string::npos)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s <key>\n", opname.c_str());
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
auto key = trim(cmd.substr(pos+1));
|
||||
if (opname == "get")
|
||||
{
|
||||
db->get(key, [this, cb](int res, const std::string & value)
|
||||
{
|
||||
if (res < 0)
|
||||
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||
else
|
||||
{
|
||||
write(1, value.c_str(), value.size());
|
||||
write(1, "\n", 1);
|
||||
}
|
||||
cb();
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
db->del(key, [this, cb](int res)
|
||||
{
|
||||
if (res < 0)
|
||||
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||
else
|
||||
printf("OK\n");
|
||||
cb();
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||
if (pos2 == std::string::npos)
|
||||
{
|
||||
fprintf(stderr, "Usage: set <key> <value>\n");
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
auto key = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||
auto value = trim(cmd.substr(pos2+1));
|
||||
db->set(key, value, [this, cb](int res)
|
||||
{
|
||||
if (res < 0)
|
||||
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||
else
|
||||
printf("OK\n");
|
||||
cb();
|
||||
});
|
||||
}
|
||||
}
|
||||
else if (opname == "list")
|
||||
{
|
||||
std::string start, end;
|
||||
if (pos != std::string::npos)
|
||||
{
|
||||
auto pos2 = cmd.find_first_of(" \t", pos+1);
|
||||
if (pos2 != std::string::npos)
|
||||
{
|
||||
start = trim(cmd.substr(pos+1, pos2-pos-1));
|
||||
end = trim(cmd.substr(pos2+1));
|
||||
}
|
||||
else
|
||||
{
|
||||
start = trim(cmd.substr(pos+1));
|
||||
}
|
||||
}
|
||||
void *handle = db->list_start(start);
|
||||
db->list_next(handle, [=](int res, const std::string & key, const std::string & value)
|
||||
{
|
||||
if (res < 0)
|
||||
{
|
||||
if (res != -ENOENT)
|
||||
{
|
||||
fprintf(stderr, "Error: %s (code %d)\n", strerror(-res), res);
|
||||
}
|
||||
db->list_close(handle);
|
||||
cb();
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("%s = %s\n", key.c_str(), value.c_str());
|
||||
db->list_next(handle, NULL);
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (opname == "close")
|
||||
{
|
||||
db->close([=]()
|
||||
{
|
||||
printf("Index closed\n");
|
||||
cb();
|
||||
});
|
||||
}
|
||||
else if (opname == "quit" || opname == "q")
|
||||
{
|
||||
::close(0);
|
||||
finished = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(
|
||||
stderr, "Unknown operation: %s. Supported operations:\n"
|
||||
"open <pool_id> <inode_id> [block_size]\n"
|
||||
"config <property> <value>\n"
|
||||
"get <key>\nset <key> <value>\ndel <key>\nlist [<start> [end]]\n"
|
||||
"close\nquit\n", opname.c_str()
|
||||
);
|
||||
cb();
|
||||
}
|
||||
}
|
||||
|
||||
int main(int narg, const char *args[])
|
||||
{
|
||||
setvbuf(stdout, NULL, _IONBF, 0);
|
||||
setvbuf(stderr, NULL, _IONBF, 0);
|
||||
exe_name = args[0];
|
||||
kv_cli_t *p = new kv_cli_t();
|
||||
p->run(kv_cli_t::parse_args(narg, args));
|
||||
delete p;
|
||||
return 0;
|
||||
}
|
2068
src/kv_db.cpp
Normal file
2068
src/kv_db.cpp
Normal file
File diff suppressed because it is too large
Load Diff
39
src/kv_db.h
Normal file
39
src/kv_db.h
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
//
|
||||
// Vitastor shared key/value database
|
||||
// Parallel optimistic B-Tree O:-)
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "cluster_client.h"
|
||||
|
||||
struct kv_db_t;
|
||||
|
||||
struct kv_dbw_t
|
||||
{
|
||||
kv_dbw_t(cluster_client_t *cli);
|
||||
~kv_dbw_t();
|
||||
|
||||
void open(inode_t inode_id, json11::Json cfg, std::function<void(int)> cb);
|
||||
void set_config(json11::Json cfg);
|
||||
void close(std::function<void()> cb);
|
||||
|
||||
uint64_t get_size();
|
||||
|
||||
void get(const std::string & key, std::function<void(int res, const std::string & value)> cb,
|
||||
bool allow_old_cached = false);
|
||||
void set(const std::string & key, const std::string & value, std::function<void(int res)> cb,
|
||||
std::function<bool(int res, const std::string & value)> cas_compare = NULL);
|
||||
void del(const std::string & key, std::function<void(int res)> cb,
|
||||
std::function<bool(int res, const std::string & value)> cas_compare = NULL);
|
||||
void update(const std::string & key,
|
||||
std::function<int(int res, const std::string & old_value, std::string & new_value)> cas_compare,
|
||||
std::function<bool(int res)> cb);
|
||||
|
||||
void* list_start(const std::string & start);
|
||||
void list_next(void *handle, std::function<void(int res, const std::string & key, const std::string & value)> cb);
|
||||
void list_close(void *handle);
|
||||
|
||||
kv_db_t *db;
|
||||
};
|
697
src/kv_stress.cpp
Normal file
697
src/kv_stress.cpp
Normal file
@@ -0,0 +1,697 @@
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
//
|
||||
// Vitastor shared key/value database stress tester / benchmark
|
||||
|
||||
#define _XOPEN_SOURCE
|
||||
#include <limits.h>
|
||||
|
||||
#include <netinet/tcp.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
//#include <signal.h>
|
||||
|
||||
#include "epoll_manager.h"
|
||||
#include "str_util.h"
|
||||
#include "kv_db.h"
|
||||
|
||||
const char *exe_name = NULL;
|
||||
|
||||
struct kv_test_listing_t
|
||||
{
|
||||
uint64_t count = 0, done = 0;
|
||||
void *handle = NULL;
|
||||
std::string next_after;
|
||||
std::set<std::string> inflights;
|
||||
timespec tv_begin;
|
||||
bool error = false;
|
||||
};
|
||||
|
||||
struct kv_test_lat_t
|
||||
{
|
||||
const char *name = NULL;
|
||||
uint64_t usec = 0, count = 0;
|
||||
};
|
||||
|
||||
struct kv_test_stat_t
|
||||
{
|
||||
kv_test_lat_t get, add, update, del, list;
|
||||
uint64_t list_keys = 0;
|
||||
};
|
||||
|
||||
class kv_test_t
|
||||
{
|
||||
public:
|
||||
// Config
|
||||
json11::Json::object kv_cfg;
|
||||
std::string key_prefix, key_suffix;
|
||||
uint64_t inode_id = 0;
|
||||
uint64_t op_count = 1000000;
|
||||
uint64_t runtime_sec = 0;
|
||||
uint64_t parallelism = 4;
|
||||
uint64_t reopen_prob = 1;
|
||||
uint64_t get_prob = 30000;
|
||||
uint64_t add_prob = 20000;
|
||||
uint64_t update_prob = 20000;
|
||||
uint64_t del_prob = 5000;
|
||||
uint64_t list_prob = 300;
|
||||
uint64_t min_key_len = 10;
|
||||
uint64_t max_key_len = 70;
|
||||
uint64_t min_value_len = 50;
|
||||
uint64_t max_value_len = 300;
|
||||
uint64_t min_list_count = 10;
|
||||
uint64_t max_list_count = 1000;
|
||||
uint64_t print_stats_interval = 1;
|
||||
bool json_output = false;
|
||||
uint64_t log_level = 1;
|
||||
bool trace = false;
|
||||
bool stop_on_error = false;
|
||||
// FIXME: Multiple clients
|
||||
kv_test_stat_t stat, prev_stat;
|
||||
timespec prev_stat_time, start_stat_time;
|
||||
|
||||
// State
|
||||
kv_dbw_t *db = NULL;
|
||||
ring_loop_t *ringloop = NULL;
|
||||
epoll_manager_t *epmgr = NULL;
|
||||
cluster_client_t *cli = NULL;
|
||||
ring_consumer_t consumer;
|
||||
bool finished = false;
|
||||
uint64_t total_prob = 0;
|
||||
uint64_t ops_sent = 0, ops_done = 0;
|
||||
int stat_timer_id = -1;
|
||||
int in_progress = 0;
|
||||
bool reopening = false;
|
||||
std::set<kv_test_listing_t*> listings;
|
||||
std::set<std::string> changing_keys;
|
||||
std::map<std::string, std::string> values;
|
||||
|
||||
~kv_test_t();
|
||||
|
||||
static json11::Json::object parse_args(int narg, const char *args[]);
|
||||
void parse_config(json11::Json cfg);
|
||||
void run(json11::Json cfg);
|
||||
void loop();
|
||||
void print_stats(kv_test_stat_t & prev_stat, timespec & prev_stat_time);
|
||||
void print_total_stats();
|
||||
void start_change(const std::string & key);
|
||||
void stop_change(const std::string & key);
|
||||
void add_stat(kv_test_lat_t & stat, timespec tv_begin);
|
||||
};
|
||||
|
||||
kv_test_t::~kv_test_t()
|
||||
{
|
||||
if (db)
|
||||
delete db;
|
||||
if (cli)
|
||||
{
|
||||
cli->flush();
|
||||
delete cli;
|
||||
}
|
||||
if (epmgr)
|
||||
delete epmgr;
|
||||
if (ringloop)
|
||||
delete ringloop;
|
||||
}
|
||||
|
||||
json11::Json::object kv_test_t::parse_args(int narg, const char *args[])
|
||||
{
|
||||
json11::Json::object cfg;
|
||||
for (int i = 1; i < narg; i++)
|
||||
{
|
||||
if (!strcmp(args[i], "-h") || !strcmp(args[i], "--help"))
|
||||
{
|
||||
printf(
|
||||
"Vitastor Key/Value DB stress tester / benchmark\n"
|
||||
"(c) Vitaliy Filippov, 2023+ (VNPL-1.1)\n"
|
||||
"\n"
|
||||
"USAGE: %s --pool_id POOL_ID --inode_id INODE_ID [OPTIONS]\n"
|
||||
" --op_count 1000000\n"
|
||||
" Total operations to run during test. 0 means unlimited\n"
|
||||
" --key_prefix \"\"\n"
|
||||
" Prefix for all keys read or written (to avoid collisions)\n"
|
||||
" --key_suffix \"\"\n"
|
||||
" Suffix for all keys read or written (to avoid collisions, but scan all DB)\n"
|
||||
" --runtime 0\n"
|
||||
" Run for this number of seconds. 0 means unlimited\n"
|
||||
" --parallelism 4\n"
|
||||
" Run this number of operations in parallel\n"
|
||||
" --get_prob 30000\n"
|
||||
" Fraction of key retrieve operations\n"
|
||||
" --add_prob 20000\n"
|
||||
" Fraction of key addition operations\n"
|
||||
" --update_prob 20000\n"
|
||||
" Fraction of key update operations\n"
|
||||
" --del_prob 30000\n"
|
||||
" Fraction of key delete operations\n"
|
||||
" --list_prob 300\n"
|
||||
" Fraction of listing operations\n"
|
||||
" --min_key_len 10\n"
|
||||
" Minimum key size in bytes\n"
|
||||
" --max_key_len 70\n"
|
||||
" Maximum key size in bytes\n"
|
||||
" --min_value_len 50\n"
|
||||
" Minimum value size in bytes\n"
|
||||
" --max_value_len 300\n"
|
||||
" Maximum value size in bytes\n"
|
||||
" --min_list_count 10\n"
|
||||
" Minimum number of keys read in listing (0 = all keys)\n"
|
||||
" --max_list_count 1000\n"
|
||||
" Maximum number of keys read in listing\n"
|
||||
" --print_stats 1\n"
|
||||
" Print operation statistics every this number of seconds\n"
|
||||
" --json\n"
|
||||
" JSON output\n"
|
||||
" --stop_on_error 0\n"
|
||||
" Stop on first execution error, mismatch, lost key or extra key during listing\n"
|
||||
" --kv_memory_limit 128M\n"
|
||||
" Maximum memory to use for vitastor-kv index cache\n"
|
||||
" --kv_allocate_blocks 4\n"
|
||||
" Number of PG blocks used for new tree block allocation in parallel\n"
|
||||
" --kv_evict_max_misses 10\n"
|
||||
" Eviction algorithm parameter: retry eviction from another random spot\n"
|
||||
" if this number of keys is used currently or was used recently\n"
|
||||
" --kv_evict_attempts_per_level 3\n"
|
||||
" Retry eviction at most this number of times per tree level, starting\n"
|
||||
" with bottom-most levels\n"
|
||||
" --kv_evict_unused_age 1000\n"
|
||||
" Evict only keys unused during this number of last operations\n"
|
||||
" --kv_log_level 1\n"
|
||||
" Log level. 0 = errors, 1 = warnings, 10 = trace operations\n",
|
||||
exe_name
|
||||
);
|
||||
exit(0);
|
||||
}
|
||||
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||
{
|
||||
const char *opt = args[i]+2;
|
||||
cfg[opt] = !strcmp(opt, "json") || i == narg-1 ? "1" : args[++i];
|
||||
}
|
||||
}
|
||||
return cfg;
|
||||
}
|
||||
|
||||
void kv_test_t::parse_config(json11::Json cfg)
|
||||
{
|
||||
inode_id = INODE_WITH_POOL(cfg["pool_id"].uint64_value(), cfg["inode_id"].uint64_value());
|
||||
if (cfg["op_count"].uint64_value() > 0)
|
||||
op_count = cfg["op_count"].uint64_value();
|
||||
key_prefix = cfg["key_prefix"].string_value();
|
||||
key_suffix = cfg["key_suffix"].string_value();
|
||||
if (cfg["runtime"].uint64_value() > 0)
|
||||
runtime_sec = cfg["runtime"].uint64_value();
|
||||
if (cfg["parallelism"].uint64_value() > 0)
|
||||
parallelism = cfg["parallelism"].uint64_value();
|
||||
if (!cfg["reopen_prob"].is_null())
|
||||
reopen_prob = cfg["reopen_prob"].uint64_value();
|
||||
if (!cfg["get_prob"].is_null())
|
||||
get_prob = cfg["get_prob"].uint64_value();
|
||||
if (!cfg["add_prob"].is_null())
|
||||
add_prob = cfg["add_prob"].uint64_value();
|
||||
if (!cfg["update_prob"].is_null())
|
||||
update_prob = cfg["update_prob"].uint64_value();
|
||||
if (!cfg["del_prob"].is_null())
|
||||
del_prob = cfg["del_prob"].uint64_value();
|
||||
if (!cfg["list_prob"].is_null())
|
||||
list_prob = cfg["list_prob"].uint64_value();
|
||||
if (!cfg["min_key_len"].is_null())
|
||||
min_key_len = cfg["min_key_len"].uint64_value();
|
||||
if (cfg["max_key_len"].uint64_value() > 0)
|
||||
max_key_len = cfg["max_key_len"].uint64_value();
|
||||
if (!cfg["min_value_len"].is_null())
|
||||
min_value_len = cfg["min_value_len"].uint64_value();
|
||||
if (cfg["max_value_len"].uint64_value() > 0)
|
||||
max_value_len = cfg["max_value_len"].uint64_value();
|
||||
if (!cfg["min_list_count"].is_null())
|
||||
min_list_count = cfg["min_list_count"].uint64_value();
|
||||
if (!cfg["max_list_count"].is_null())
|
||||
max_list_count = cfg["max_list_count"].uint64_value();
|
||||
if (!cfg["print_stats"].is_null())
|
||||
print_stats_interval = cfg["print_stats"].uint64_value();
|
||||
if (!cfg["json"].is_null())
|
||||
json_output = true;
|
||||
if (!cfg["stop_on_error"].is_null())
|
||||
stop_on_error = cfg["stop_on_error"].bool_value();
|
||||
if (!cfg["kv_memory_limit"].is_null())
|
||||
kv_cfg["kv_memory_limit"] = cfg["kv_memory_limit"];
|
||||
if (!cfg["kv_allocate_blocks"].is_null())
|
||||
kv_cfg["kv_allocate_blocks"] = cfg["kv_allocate_blocks"];
|
||||
if (!cfg["kv_evict_max_misses"].is_null())
|
||||
kv_cfg["kv_evict_max_misses"] = cfg["kv_evict_max_misses"];
|
||||
if (!cfg["kv_evict_attempts_per_level"].is_null())
|
||||
kv_cfg["kv_evict_attempts_per_level"] = cfg["kv_evict_attempts_per_level"];
|
||||
if (!cfg["kv_evict_unused_age"].is_null())
|
||||
kv_cfg["kv_evict_unused_age"] = cfg["kv_evict_unused_age"];
|
||||
if (!cfg["kv_log_level"].is_null())
|
||||
{
|
||||
log_level = cfg["kv_log_level"].uint64_value();
|
||||
trace = log_level >= 10;
|
||||
kv_cfg["kv_log_level"] = cfg["kv_log_level"];
|
||||
}
|
||||
total_prob = reopen_prob+get_prob+add_prob+update_prob+del_prob+list_prob;
|
||||
stat.get.name = "get";
|
||||
stat.add.name = "add";
|
||||
stat.update.name = "update";
|
||||
stat.del.name = "del";
|
||||
stat.list.name = "list";
|
||||
}
|
||||
|
||||
void kv_test_t::run(json11::Json cfg)
|
||||
{
|
||||
srand48(time(NULL));
|
||||
parse_config(cfg);
|
||||
// Create client
|
||||
ringloop = new ring_loop_t(512);
|
||||
epmgr = new epoll_manager_t(ringloop);
|
||||
cli = new cluster_client_t(ringloop, epmgr->tfd, cfg);
|
||||
db = new kv_dbw_t(cli);
|
||||
// Load image metadata
|
||||
while (!cli->is_ready())
|
||||
{
|
||||
ringloop->loop();
|
||||
if (cli->is_ready())
|
||||
break;
|
||||
ringloop->wait();
|
||||
}
|
||||
// Run
|
||||
reopening = true;
|
||||
db->open(inode_id, kv_cfg, [this](int res)
|
||||
{
|
||||
reopening = false;
|
||||
if (res < 0)
|
||||
{
|
||||
fprintf(stderr, "ERROR: Open index: %d (%s)\n", res, strerror(-res));
|
||||
exit(1);
|
||||
}
|
||||
if (trace)
|
||||
printf("Index opened\n");
|
||||
ringloop->wakeup();
|
||||
});
|
||||
consumer.loop = [this]() { loop(); };
|
||||
ringloop->register_consumer(&consumer);
|
||||
if (print_stats_interval)
|
||||
stat_timer_id = epmgr->tfd->set_timer(print_stats_interval*1000, true, [this](int) { print_stats(prev_stat, prev_stat_time); });
|
||||
clock_gettime(CLOCK_REALTIME, &start_stat_time);
|
||||
prev_stat_time = start_stat_time;
|
||||
while (!finished)
|
||||
{
|
||||
ringloop->loop();
|
||||
if (!finished)
|
||||
ringloop->wait();
|
||||
}
|
||||
if (stat_timer_id >= 0)
|
||||
epmgr->tfd->clear_timer(stat_timer_id);
|
||||
ringloop->unregister_consumer(&consumer);
|
||||
// Print total stats
|
||||
print_total_stats();
|
||||
// Destroy the client
|
||||
delete db;
|
||||
db = NULL;
|
||||
cli->flush();
|
||||
delete cli;
|
||||
delete epmgr;
|
||||
delete ringloop;
|
||||
cli = NULL;
|
||||
epmgr = NULL;
|
||||
ringloop = NULL;
|
||||
}
|
||||
|
||||
static const char *base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789@+/";
|
||||
|
||||
std::string random_str(int len)
|
||||
{
|
||||
std::string str;
|
||||
str.resize(len);
|
||||
for (int i = 0; i < len; i++)
|
||||
{
|
||||
str[i] = base64_chars[lrand48() % 64];
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
void kv_test_t::loop()
|
||||
{
|
||||
if (reopening)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (ops_done >= op_count)
|
||||
{
|
||||
finished = true;
|
||||
}
|
||||
while (!finished && ops_sent < op_count && in_progress < parallelism)
|
||||
{
|
||||
uint64_t dice = (lrand48() % total_prob);
|
||||
if (dice < reopen_prob)
|
||||
{
|
||||
reopening = true;
|
||||
db->close([this]()
|
||||
{
|
||||
if (trace)
|
||||
printf("Index closed\n");
|
||||
db->open(inode_id, kv_cfg, [this](int res)
|
||||
{
|
||||
reopening = false;
|
||||
if (res < 0)
|
||||
{
|
||||
fprintf(stderr, "ERROR: Reopen index: %d (%s)\n", res, strerror(-res));
|
||||
finished = true;
|
||||
return;
|
||||
}
|
||||
if (trace)
|
||||
printf("Index reopened\n");
|
||||
ringloop->wakeup();
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
else if (dice < reopen_prob+get_prob)
|
||||
{
|
||||
// get existing
|
||||
auto key = random_str(max_key_len);
|
||||
auto k_it = values.lower_bound(key);
|
||||
if (k_it == values.end())
|
||||
continue;
|
||||
key = k_it->first;
|
||||
if (changing_keys.find(key) != changing_keys.end())
|
||||
continue;
|
||||
in_progress++;
|
||||
ops_sent++;
|
||||
if (trace)
|
||||
printf("get %s\n", key.c_str());
|
||||
timespec tv_begin;
|
||||
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||
db->get(key, [this, key, tv_begin](int res, const std::string & value)
|
||||
{
|
||||
add_stat(stat.get, tv_begin);
|
||||
ops_done++;
|
||||
in_progress--;
|
||||
auto it = values.find(key);
|
||||
if (res != (it == values.end() ? -ENOENT : 0))
|
||||
{
|
||||
fprintf(stderr, "ERROR: get %s: %d (%s)\n", key.c_str(), res, strerror(-res));
|
||||
if (stop_on_error)
|
||||
exit(1);
|
||||
}
|
||||
else if (it != values.end() && value != it->second)
|
||||
{
|
||||
fprintf(stderr, "ERROR: get %s: mismatch: %s vs %s\n", key.c_str(), value.c_str(), it->second.c_str());
|
||||
if (stop_on_error)
|
||||
exit(1);
|
||||
}
|
||||
ringloop->wakeup();
|
||||
});
|
||||
}
|
||||
else if (dice < reopen_prob+get_prob+add_prob+update_prob)
|
||||
{
|
||||
bool is_add = false;
|
||||
std::string key;
|
||||
if (dice < reopen_prob+get_prob+add_prob)
|
||||
{
|
||||
// add
|
||||
is_add = true;
|
||||
uint64_t key_len = min_key_len + (max_key_len > min_key_len ? lrand48() % (max_key_len-min_key_len) : 0);
|
||||
key = key_prefix + random_str(key_len) + key_suffix;
|
||||
}
|
||||
else
|
||||
{
|
||||
// update
|
||||
key = random_str(max_key_len);
|
||||
auto k_it = values.lower_bound(key);
|
||||
if (k_it == values.end())
|
||||
continue;
|
||||
key = k_it->first;
|
||||
}
|
||||
if (changing_keys.find(key) != changing_keys.end())
|
||||
continue;
|
||||
uint64_t value_len = min_value_len + (max_value_len > min_value_len ? lrand48() % (max_value_len-min_value_len) : 0);
|
||||
auto value = random_str(value_len);
|
||||
start_change(key);
|
||||
ops_sent++;
|
||||
in_progress++;
|
||||
if (trace)
|
||||
printf("set %s = %s\n", key.c_str(), value.c_str());
|
||||
timespec tv_begin;
|
||||
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||
db->set(key, value, [this, key, value, tv_begin, is_add](int res)
|
||||
{
|
||||
add_stat(is_add ? stat.add : stat.update, tv_begin);
|
||||
stop_change(key);
|
||||
ops_done++;
|
||||
in_progress--;
|
||||
if (res != 0)
|
||||
{
|
||||
fprintf(stderr, "ERROR: set %s = %s: %d (%s)\n", key.c_str(), value.c_str(), res, strerror(-res));
|
||||
if (stop_on_error)
|
||||
exit(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
values[key] = value;
|
||||
}
|
||||
ringloop->wakeup();
|
||||
}, NULL);
|
||||
}
|
||||
else if (dice < reopen_prob+get_prob+add_prob+update_prob+del_prob)
|
||||
{
|
||||
// delete
|
||||
auto key = random_str(max_key_len);
|
||||
auto k_it = values.lower_bound(key);
|
||||
if (k_it == values.end())
|
||||
continue;
|
||||
key = k_it->first;
|
||||
if (changing_keys.find(key) != changing_keys.end())
|
||||
continue;
|
||||
start_change(key);
|
||||
ops_sent++;
|
||||
in_progress++;
|
||||
if (trace)
|
||||
printf("del %s\n", key.c_str());
|
||||
timespec tv_begin;
|
||||
clock_gettime(CLOCK_REALTIME, &tv_begin);
|
||||
db->del(key, [this, key, tv_begin](int res)
|
||||
{
|
||||
add_stat(stat.del, tv_begin);
|
||||
stop_change(key);
|
||||
ops_done++;
|
||||
in_progress--;
|
||||
if (res != 0)
|
||||
{
|
||||
fprintf(stderr, "ERROR: del %s: %d (%s)\n", key.c_str(), res, strerror(-res));
|
||||
if (stop_on_error)
|
||||
exit(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
values.erase(key);
|
||||
}
|
||||
ringloop->wakeup();
|
||||
}, NULL);
|
||||
}
|
||||
else if (dice < reopen_prob+get_prob+add_prob+update_prob+del_prob+list_prob)
|
||||
{
|
||||
// list
|
||||
ops_sent++;
|
||||
in_progress++;
|
||||
auto key = random_str(max_key_len);
|
||||
auto lst = new kv_test_listing_t;
|
||||
auto k_it = values.lower_bound(key);
|
||||
lst->count = min_list_count + (max_list_count > min_list_count ? lrand48() % (max_list_count-min_list_count) : 0);
|
||||
lst->handle = db->list_start(k_it == values.begin() ? key_prefix : key);
|
||||
lst->next_after = k_it == values.begin() ? key_prefix : key;
|
||||
lst->inflights = changing_keys;
|
||||
listings.insert(lst);
|
||||
if (trace)
|
||||
printf("list from %s\n", key.c_str());
|
||||
clock_gettime(CLOCK_REALTIME, &lst->tv_begin);
|
||||
db->list_next(lst->handle, [this, lst](int res, const std::string & key, const std::string & value)
|
||||
{
|
||||
if (log_level >= 11)
|
||||
printf("list: %s = %s\n", key.c_str(), value.c_str());
|
||||
if (res >= 0 && key_prefix.size() && (key.size() < key_prefix.size() ||
|
||||
key.substr(0, key_prefix.size()) != key_prefix))
|
||||
{
|
||||
// stop at this key
|
||||
res = -ENOENT;
|
||||
}
|
||||
if (res < 0 || (lst->count > 0 && lst->done >= lst->count))
|
||||
{
|
||||
add_stat(stat.list, lst->tv_begin);
|
||||
if (res == 0)
|
||||
{
|
||||
// ok (done >= count)
|
||||
}
|
||||
else if (res != -ENOENT)
|
||||
{
|
||||
fprintf(stderr, "ERROR: list: %d (%s)\n", res, strerror(-res));
|
||||
lst->error = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto k_it = lst->next_after == "" ? values.begin() : values.upper_bound(lst->next_after);
|
||||
while (k_it != values.end())
|
||||
{
|
||||
while (k_it != values.end() && lst->inflights.find(k_it->first) != lst->inflights.end())
|
||||
k_it++;
|
||||
if (k_it != values.end())
|
||||
{
|
||||
fprintf(stderr, "ERROR: list: missing key %s\n", (k_it++)->first.c_str());
|
||||
lst->error = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lst->error && stop_on_error)
|
||||
exit(1);
|
||||
ops_done++;
|
||||
in_progress--;
|
||||
db->list_close(lst->handle);
|
||||
delete lst;
|
||||
listings.erase(lst);
|
||||
ringloop->wakeup();
|
||||
}
|
||||
else
|
||||
{
|
||||
stat.list_keys++;
|
||||
// Do not check modified keys in listing
|
||||
// Listing may return their old or new state
|
||||
if ((!key_suffix.size() || key.size() >= key_suffix.size() &&
|
||||
key.substr(key.size()-key_suffix.size()) == key_suffix) &&
|
||||
lst->inflights.find(key) == lst->inflights.end())
|
||||
{
|
||||
lst->done++;
|
||||
auto k_it = lst->next_after == "" ? values.begin() : values.upper_bound(lst->next_after);
|
||||
while (true)
|
||||
{
|
||||
while (k_it != values.end() && lst->inflights.find(k_it->first) != lst->inflights.end())
|
||||
{
|
||||
k_it++;
|
||||
}
|
||||
if (k_it == values.end() || k_it->first > key)
|
||||
{
|
||||
fprintf(stderr, "ERROR: list: extra key %s\n", key.c_str());
|
||||
lst->error = true;
|
||||
break;
|
||||
}
|
||||
else if (k_it->first < key)
|
||||
{
|
||||
fprintf(stderr, "ERROR: list: missing key %s\n", k_it->first.c_str());
|
||||
lst->error = true;
|
||||
lst->next_after = k_it->first;
|
||||
k_it++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (k_it->second != value)
|
||||
{
|
||||
fprintf(stderr, "ERROR: list: mismatch: %s = %s but should be %s\n",
|
||||
key.c_str(), value.c_str(), k_it->second.c_str());
|
||||
lst->error = true;
|
||||
}
|
||||
lst->next_after = k_it->first;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
db->list_next(lst->handle, NULL);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void kv_test_t::add_stat(kv_test_lat_t & stat, timespec tv_begin)
|
||||
{
|
||||
timespec tv_end;
|
||||
clock_gettime(CLOCK_REALTIME, &tv_end);
|
||||
int64_t usec = (tv_end.tv_sec - tv_begin.tv_sec)*1000000 +
|
||||
(tv_end.tv_nsec - tv_begin.tv_nsec)/1000;
|
||||
if (usec > 0)
|
||||
{
|
||||
stat.usec += usec;
|
||||
stat.count++;
|
||||
}
|
||||
}
|
||||
|
||||
void kv_test_t::print_stats(kv_test_stat_t & prev_stat, timespec & prev_stat_time)
|
||||
{
|
||||
timespec cur_stat_time;
|
||||
clock_gettime(CLOCK_REALTIME, &cur_stat_time);
|
||||
int64_t usec = (cur_stat_time.tv_sec - prev_stat_time.tv_sec)*1000000 +
|
||||
(cur_stat_time.tv_nsec - prev_stat_time.tv_nsec)/1000;
|
||||
if (usec > 0)
|
||||
{
|
||||
kv_test_lat_t *lats[] = { &stat.get, &stat.add, &stat.update, &stat.del, &stat.list };
|
||||
kv_test_lat_t *prev[] = { &prev_stat.get, &prev_stat.add, &prev_stat.update, &prev_stat.del, &prev_stat.list };
|
||||
if (!json_output)
|
||||
{
|
||||
char buf[128] = { 0 };
|
||||
for (int i = 0; i < sizeof(lats)/sizeof(lats[0]); i++)
|
||||
{
|
||||
snprintf(buf, sizeof(buf)-1, "%.1f %s/s (%lu us)", (lats[i]->count-prev[i]->count)*1000000.0/usec,
|
||||
lats[i]->name, (lats[i]->usec-prev[i]->usec)/(lats[i]->count-prev[i]->count > 0 ? lats[i]->count-prev[i]->count : 1));
|
||||
int k;
|
||||
for (k = strlen(buf); k < strlen(lats[i]->name)+21; k++)
|
||||
buf[k] = ' ';
|
||||
buf[k] = 0;
|
||||
printf("%s", buf);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
int64_t runtime = (cur_stat_time.tv_sec - start_stat_time.tv_sec)*1000000 +
|
||||
(cur_stat_time.tv_nsec - start_stat_time.tv_nsec)/1000;
|
||||
printf("{\"runtime\":%.1f", (double)runtime/1000000.0);
|
||||
for (int i = 0; i < sizeof(lats)/sizeof(lats[0]); i++)
|
||||
{
|
||||
if (lats[i]->count > prev[i]->count)
|
||||
{
|
||||
printf(
|
||||
",\"%s\":{\"avg\":{\"iops\":%.1f,\"usec\":%lu},\"total\":{\"count\":%lu,\"usec\":%lu}}",
|
||||
lats[i]->name, (lats[i]->count-prev[i]->count)*1000000.0/usec,
|
||||
(lats[i]->usec-prev[i]->usec)/(lats[i]->count-prev[i]->count),
|
||||
lats[i]->count, lats[i]->usec
|
||||
);
|
||||
}
|
||||
}
|
||||
printf("}\n");
|
||||
}
|
||||
}
|
||||
prev_stat = stat;
|
||||
prev_stat_time = cur_stat_time;
|
||||
}
|
||||
|
||||
void kv_test_t::print_total_stats()
|
||||
{
|
||||
if (!json_output)
|
||||
printf("Total:\n");
|
||||
kv_test_stat_t start_stats;
|
||||
timespec start_stat_time = this->start_stat_time;
|
||||
print_stats(start_stats, start_stat_time);
|
||||
}
|
||||
|
||||
void kv_test_t::start_change(const std::string & key)
|
||||
{
|
||||
changing_keys.insert(key);
|
||||
for (auto lst: listings)
|
||||
{
|
||||
lst->inflights.insert(key);
|
||||
}
|
||||
}
|
||||
|
||||
void kv_test_t::stop_change(const std::string & key)
|
||||
{
|
||||
changing_keys.erase(key);
|
||||
}
|
||||
|
||||
int main(int narg, const char *args[])
|
||||
{
|
||||
setvbuf(stdout, NULL, _IONBF, 0);
|
||||
setvbuf(stderr, NULL, _IONBF, 0);
|
||||
exe_name = args[0];
|
||||
kv_test_t *p = new kv_test_t();
|
||||
p->run(kv_test_t::parse_args(narg, args));
|
||||
delete p;
|
||||
return 0;
|
||||
}
|
@@ -22,7 +22,7 @@ static blockstore_config_t json_to_bs(const json11::Json::object & config)
|
||||
{
|
||||
if (kv.second.is_string())
|
||||
bs[kv.first] = kv.second.string_value();
|
||||
else if (!kv.second.is_null())
|
||||
else
|
||||
bs[kv.first] = kv.second.dump();
|
||||
}
|
||||
return bs;
|
||||
@@ -194,8 +194,7 @@ void osd_t::parse_config(bool init)
|
||||
if (autosync_interval > MAX_AUTOSYNC_INTERVAL)
|
||||
autosync_interval = DEFAULT_AUTOSYNC_INTERVAL;
|
||||
}
|
||||
if (config["autosync_writes"].is_number() ||
|
||||
config["autosync_writes"].string_value() != "")
|
||||
if (!config["autosync_writes"].is_null())
|
||||
{
|
||||
// Allow to set it to 0
|
||||
autosync_writes = config["autosync_writes"].uint64_value();
|
||||
@@ -233,8 +232,6 @@ void osd_t::parse_config(bool init)
|
||||
? 10 : config["recovery_tune_agg_interval"].uint64_value();
|
||||
recovery_tune_sleep_min_us = config["recovery_tune_sleep_min_us"].is_null()
|
||||
? 10 : config["recovery_tune_sleep_min_us"].uint64_value();
|
||||
recovery_tune_sleep_cutoff_us = config["recovery_tune_sleep_cutoff_us"].is_null()
|
||||
? 10000000 : config["recovery_tune_sleep_cutoff_us"].uint64_value();
|
||||
recovery_pg_switch = config["recovery_pg_switch"].uint64_value();
|
||||
if (recovery_pg_switch < 1)
|
||||
recovery_pg_switch = DEFAULT_RECOVERY_PG_SWITCH;
|
||||
|
@@ -125,7 +125,6 @@ class osd_t
|
||||
int recovery_tune_interval = 1;
|
||||
int recovery_tune_agg_interval = 10;
|
||||
int recovery_tune_sleep_min_us = 10;
|
||||
int recovery_tune_sleep_cutoff_us = 10000000;
|
||||
int recovery_pg_switch = DEFAULT_RECOVERY_PG_SWITCH;
|
||||
int recovery_sync_batch = DEFAULT_RECOVERY_BATCH;
|
||||
int inode_vanish_time = 60;
|
||||
@@ -283,7 +282,6 @@ class osd_t
|
||||
void exec_sync_stab_all(osd_op_t *cur_op);
|
||||
void exec_show_config(osd_op_t *cur_op);
|
||||
void exec_secondary(osd_op_t *cur_op);
|
||||
void exec_secondary_real(osd_op_t *cur_op);
|
||||
void secondary_op_callback(osd_op_t *cur_op);
|
||||
|
||||
// primary ops
|
||||
|
@@ -262,8 +262,7 @@ void osd_t::report_statistics()
|
||||
for (auto st_it = inode_stats.begin(); st_it != inode_stats.end(); )
|
||||
{
|
||||
auto & kv = *st_it;
|
||||
auto spc_it = bs_inode_space.find(kv.first);
|
||||
if (spc_it == bs_inode_space.end() || !spc_it->second) // prevent autovivification
|
||||
if (!bs_inode_space[kv.first])
|
||||
{
|
||||
// Is it an empty inode?
|
||||
if (!tv_now.tv_sec)
|
||||
|
@@ -422,10 +422,6 @@ void osd_t::tune_recovery()
|
||||
rtune_avg_lat = total_recovery_usec/recovery_count;
|
||||
uint64_t target_lat = rtune_avg_lat * rtune_avg_lat/1000000.0 * recovery_count/recovery_tune_interval / rtune_target_util;
|
||||
auto sleep_us = target_lat > rtune_avg_lat+recovery_tune_sleep_min_us ? target_lat-rtune_avg_lat : 0;
|
||||
if (sleep_us > recovery_tune_sleep_cutoff_us)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (recovery_target_sleep_items.size() != recovery_tune_agg_interval)
|
||||
{
|
||||
recovery_target_sleep_items.resize(recovery_tune_agg_interval);
|
||||
@@ -442,7 +438,7 @@ void osd_t::tune_recovery()
|
||||
if (recovery_target_sleep_count < recovery_tune_agg_interval)
|
||||
recovery_target_sleep_count++;
|
||||
recovery_target_sleep_us = recovery_target_sleep_total / recovery_target_sleep_count;
|
||||
if (log_level > 1)
|
||||
if (log_level > 4)
|
||||
{
|
||||
printf(
|
||||
"[OSD %lu] auto-tune: client util: %.2f, recovery util: %.2f, lat: %lu us -> target util %.2f, delay %lu us\n",
|
||||
|
@@ -222,9 +222,6 @@ void osd_t::start_pg_peering(pg_t & pg)
|
||||
}
|
||||
if (pg.pg_cursize < pg.pg_minsize)
|
||||
{
|
||||
// FIXME: Incomplete EC PGs may currently easily lead to write hangs ("slow ops" in OSD logs)
|
||||
// because such PGs don't flush unstable entries on secondary OSDs so they can't remove these
|
||||
// entries from their journals...
|
||||
pg.state = PG_INCOMPLETE;
|
||||
report_pg_state(pg);
|
||||
return;
|
||||
|
@@ -706,26 +706,6 @@ resume_5:
|
||||
remove_object_from_state(op_data->oid, &op_data->object_state, pg);
|
||||
deref_object_state(pg, &op_data->object_state, true);
|
||||
}
|
||||
// Mark PG and OSDs as dirty
|
||||
for (auto & chunk: (op_data->object_state ? op_data->object_state->osd_set : pg.cur_loc_set))
|
||||
{
|
||||
this->dirty_osds.insert(chunk.osd_num);
|
||||
}
|
||||
for (auto cl_it = msgr.clients.find(cur_op->peer_fd); cl_it != msgr.clients.end(); )
|
||||
{
|
||||
cl_it->second->dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
||||
break;
|
||||
}
|
||||
dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
||||
if (immediate_commit == IMMEDIATE_NONE)
|
||||
{
|
||||
unstable_write_count++;
|
||||
if (unstable_write_count >= autosync_writes)
|
||||
{
|
||||
unstable_write_count = 0;
|
||||
autosync();
|
||||
}
|
||||
}
|
||||
pg.total_count--;
|
||||
cur_op->reply.hdr.retval = 0;
|
||||
continue_others:
|
||||
|
@@ -861,15 +861,15 @@ static void calc_rmw_parity_copy_mod(osd_rmw_stripe_t *stripes, int pg_size, int
|
||||
static void calc_rmw_parity_copy_parity(osd_rmw_stripe_t *stripes, int pg_size, int pg_minsize,
|
||||
uint64_t *read_osd_set, uint64_t *write_osd_set, uint32_t chunk_size, uint32_t start, uint32_t end)
|
||||
{
|
||||
if (write_osd_set != read_osd_set && end != 0)
|
||||
if (write_osd_set != read_osd_set)
|
||||
{
|
||||
for (int role = pg_minsize; role < pg_size; role++)
|
||||
{
|
||||
if (write_osd_set[role] != read_osd_set[role] && write_osd_set[role] != 0 && (start != 0 || end != chunk_size))
|
||||
if (write_osd_set[role] != read_osd_set[role] && (start != 0 || end != chunk_size))
|
||||
{
|
||||
// Copy new parity into the read buffer to write it back
|
||||
memcpy(
|
||||
(uint8_t*)stripes[role].read_buf + start - stripes[role].read_start,
|
||||
(uint8_t*)stripes[role].read_buf + start,
|
||||
stripes[role].write_buf,
|
||||
end - start
|
||||
);
|
||||
|
@@ -30,7 +30,6 @@ void test16();
|
||||
void test_recover_22_d2();
|
||||
void test_ec43_error_bruteforce();
|
||||
void test_recover_53_d5();
|
||||
void test_recover_22();
|
||||
|
||||
int main(int narg, char *args[])
|
||||
{
|
||||
@@ -71,8 +70,6 @@ int main(int narg, char *args[])
|
||||
test_ec43_error_bruteforce();
|
||||
// Test 19
|
||||
test_recover_53_d5();
|
||||
// Test 20
|
||||
test_recover_22();
|
||||
// End
|
||||
printf("all ok\n");
|
||||
return 0;
|
||||
@@ -1247,99 +1244,3 @@ void test_recover_53_d5()
|
||||
// Done
|
||||
use_ec(8, 5, false);
|
||||
}
|
||||
|
||||
void test_recover_22()
|
||||
{
|
||||
const int bmp = 128*1024 / 4096 / 8;
|
||||
use_ec(4, 2, true);
|
||||
osd_num_t osd_set[4] = { 1, 2, 3, 4 };
|
||||
osd_num_t write_osd_set[4] = { 5, 0, 3, 0 };
|
||||
osd_rmw_stripe_t stripes[4] = {};
|
||||
unsigned bitmaps[4] = { 0 };
|
||||
// split
|
||||
void *write_buf = (uint8_t*)malloc_or_die(4096);
|
||||
set_pattern(write_buf, 4096, PATTERN0);
|
||||
split_stripes(2, 128*1024, 120*1024, 4096, stripes);
|
||||
assert(stripes[0].req_start == 120*1024 && stripes[0].req_end == 124*1024);
|
||||
assert(stripes[1].req_start == 0 && stripes[1].req_end == 0);
|
||||
assert(stripes[2].req_start == 0 && stripes[2].req_end == 0);
|
||||
assert(stripes[3].req_start == 0 && stripes[3].req_end == 0);
|
||||
// calc_rmw
|
||||
void *rmw_buf = calc_rmw(write_buf, stripes, osd_set, 4, 2, 2, write_osd_set, 128*1024, bmp);
|
||||
for (int i = 0; i < 4; i++)
|
||||
stripes[i].bmp_buf = bitmaps+i;
|
||||
assert(rmw_buf);
|
||||
assert(stripes[0].read_start == 0 && stripes[0].read_end == 128*1024);
|
||||
assert(stripes[1].read_start == 120*1024 && stripes[1].read_end == 124*1024);
|
||||
assert(stripes[2].read_start == 0 && stripes[2].read_end == 0);
|
||||
assert(stripes[3].read_start == 0 && stripes[3].read_end == 0);
|
||||
assert(stripes[0].write_start == 120*1024 && stripes[0].write_end == 124*1024);
|
||||
assert(stripes[1].write_start == 0 && stripes[1].write_end == 0);
|
||||
assert(stripes[2].write_start == 120*1024 && stripes[2].write_end == 124*1024);
|
||||
assert(stripes[3].write_start == 0 && stripes[3].write_end == 0);
|
||||
assert(stripes[0].read_buf == (uint8_t*)rmw_buf+4*1024);
|
||||
assert(stripes[1].read_buf == (uint8_t*)rmw_buf+132*1024);
|
||||
assert(stripes[2].read_buf == NULL);
|
||||
assert(stripes[3].read_buf == NULL);
|
||||
assert(stripes[0].write_buf == write_buf);
|
||||
assert(stripes[1].write_buf == NULL);
|
||||
assert(stripes[2].write_buf == (uint8_t*)rmw_buf);
|
||||
assert(stripes[3].write_buf == NULL);
|
||||
// encode
|
||||
set_pattern(stripes[0].read_buf, 128*1024, PATTERN1);
|
||||
set_pattern(stripes[1].read_buf, 4*1024, PATTERN2);
|
||||
memset(stripes[0].bmp_buf, 0xff, bmp);
|
||||
memset(stripes[1].bmp_buf, 0xff, bmp);
|
||||
calc_rmw_parity_ec(stripes, 4, 2, osd_set, write_osd_set, 128*1024, bmp);
|
||||
assert(*(uint32_t*)stripes[2].bmp_buf == 0);
|
||||
assert(stripes[0].write_start == 0 && stripes[0].write_end == 128*1024);
|
||||
assert(stripes[1].write_start == 0 && stripes[1].write_end == 0);
|
||||
assert(stripes[2].write_start == 120*1024 && stripes[2].write_end == 124*1024);
|
||||
assert(stripes[3].write_start == 0 && stripes[3].write_end == 0);
|
||||
assert(stripes[0].write_buf == stripes[0].read_buf);
|
||||
assert(stripes[1].write_buf == NULL);
|
||||
assert(stripes[2].write_buf == (uint8_t*)rmw_buf);
|
||||
assert(stripes[3].write_buf == NULL);
|
||||
check_pattern(stripes[2].write_buf, 4*1024, PATTERN0^PATTERN2);
|
||||
// decode and verify
|
||||
memset(stripes, 0, sizeof(stripes));
|
||||
split_stripes(2, 128*1024, 0, 256*1024, stripes);
|
||||
assert(stripes[0].req_start == 0 && stripes[0].req_end == 128*1024);
|
||||
assert(stripes[1].req_start == 0 && stripes[1].req_end == 128*1024);
|
||||
assert(stripes[2].req_start == 0 && stripes[2].req_end == 0);
|
||||
assert(stripes[3].req_start == 0 && stripes[3].req_end == 0);
|
||||
for (int role = 0; role < 4; role++)
|
||||
{
|
||||
stripes[role].read_start = stripes[role].req_start;
|
||||
stripes[role].read_end = stripes[role].req_end;
|
||||
}
|
||||
assert(extend_missing_stripes(stripes, write_osd_set, 2, 4) == 0);
|
||||
assert(stripes[0].read_start == 0 && stripes[0].read_end == 128*1024);
|
||||
assert(stripes[1].read_start == 0 && stripes[1].read_end == 128*1024);
|
||||
assert(stripes[2].read_start == 0 && stripes[2].read_end == 128*1024);
|
||||
assert(stripes[3].read_start == 0 && stripes[3].read_end == 0);
|
||||
void *read_buf = alloc_read_buffer(stripes, 4, 0);
|
||||
for (int i = 0; i < 4; i++)
|
||||
stripes[i].bmp_buf = bitmaps+i;
|
||||
assert(read_buf);
|
||||
assert(stripes[0].read_buf == read_buf);
|
||||
assert(stripes[1].read_buf == (uint8_t*)read_buf+128*1024);
|
||||
assert(stripes[2].read_buf == (uint8_t*)read_buf+2*128*1024);
|
||||
set_pattern(stripes[0].read_buf, 128*1024, PATTERN1);
|
||||
set_pattern(stripes[0].read_buf+120*1024, 4*1024, PATTERN0);
|
||||
set_pattern(stripes[2].read_buf, 128*1024, PATTERN1^PATTERN2);
|
||||
set_pattern(stripes[2].read_buf+120*1024, 4*1024, PATTERN0^PATTERN2);
|
||||
memset(stripes[0].bmp_buf, 0xff, bmp);
|
||||
memset(stripes[2].bmp_buf, 0, bmp);
|
||||
bitmaps[1] = 0;
|
||||
bitmaps[3] = 0;
|
||||
reconstruct_stripes_ec(stripes, 4, 2, bmp);
|
||||
assert(bitmaps[0] == 0xFFFFFFFF);
|
||||
assert(*(uint32_t*)stripes[1].bmp_buf == 0xFFFFFFFF);
|
||||
check_pattern(stripes[1].read_buf, 128*1024, PATTERN2);
|
||||
free(read_buf);
|
||||
// Done
|
||||
free(rmw_buf);
|
||||
free(write_buf);
|
||||
use_ec(4, 2, false);
|
||||
}
|
||||
|
@@ -42,10 +42,8 @@ void osd_t::secondary_op_callback(osd_op_t *op)
|
||||
int retval = op->bs_op->retval;
|
||||
delete op->bs_op;
|
||||
op->bs_op = NULL;
|
||||
if (op->is_recovery_related() && recovery_target_sleep_us &&
|
||||
op->req.hdr.opcode == OSD_OP_SEC_STABILIZE)
|
||||
if (op->is_recovery_related() && recovery_target_sleep_us)
|
||||
{
|
||||
// Apply pause AFTER commit. Do not apply pause to SYNC at all
|
||||
if (!op->tv_end.tv_sec)
|
||||
{
|
||||
clock_gettime(CLOCK_REALTIME, &op->tv_end);
|
||||
@@ -61,25 +59,7 @@ void osd_t::secondary_op_callback(osd_op_t *op)
|
||||
}
|
||||
}
|
||||
|
||||
void osd_t::exec_secondary(osd_op_t *op)
|
||||
{
|
||||
if (op->is_recovery_related() && recovery_target_sleep_us &&
|
||||
op->req.hdr.opcode != OSD_OP_SEC_STABILIZE && op->req.hdr.opcode != OSD_OP_SEC_SYNC)
|
||||
{
|
||||
// Apply pause BEFORE write/delete
|
||||
tfd->set_timer_us(recovery_target_sleep_us, false, [this, op](int timer_id)
|
||||
{
|
||||
clock_gettime(CLOCK_REALTIME, &op->tv_begin);
|
||||
exec_secondary_real(op);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
exec_secondary_real(op);
|
||||
}
|
||||
}
|
||||
|
||||
void osd_t::exec_secondary_real(osd_op_t *cur_op)
|
||||
void osd_t::exec_secondary(osd_op_t *cur_op)
|
||||
{
|
||||
if (cur_op->req.hdr.opcode == OSD_OP_SEC_READ_BMP)
|
||||
{
|
||||
|
@@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
|
||||
|
||||
Name: Vitastor
|
||||
Description: Vitastor client library
|
||||
Version: 1.4.4
|
||||
Version: 1.3.1
|
||||
Libs: -L${libdir} -lvitastor_client
|
||||
Cflags: -I${includedir}
|
||||
|
||||
|
@@ -10,7 +10,6 @@ SCHEME=${SCHEME:-replicated}
|
||||
# OFFSET_ARGS
|
||||
# PG_SIZE
|
||||
# PG_MINSIZE
|
||||
# GLOBAL_CONFIG
|
||||
|
||||
if [ "$SCHEME" = "ec" ]; then
|
||||
OSD_COUNT=${OSD_COUNT:-5}
|
||||
@@ -20,10 +19,10 @@ fi
|
||||
|
||||
if [ "$IMMEDIATE_COMMIT" != "" ]; then
|
||||
NO_SAME="--journal_no_same_sector_overwrites true --journal_sector_buffer_count 1024 --disable_data_fsync 1 --immediate_commit all --log_level 10 --etcd_stats_interval 5"
|
||||
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"recovery_tune_util_low":1,"immediate_commit":"all","client_enable_writeback":true,"client_max_writeback_iodepth":32'$GLOBAL_CONFIG'}'
|
||||
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"recovery_tune_util_low":1,"osd_out_time":1,"immediate_commit":"all","client_enable_writeback":true}'
|
||||
else
|
||||
NO_SAME="--journal_sector_buffer_count 1024 --log_level 10 --etcd_stats_interval 5"
|
||||
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"recovery_tune_util_low":1,"client_enable_writeback":true,"client_max_writeback_iodepth":32'$GLOBAL_CONFIG'}'
|
||||
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"recovery_tune_util_low":1,"osd_out_time":1,"client_enable_writeback":true}'
|
||||
fi
|
||||
|
||||
start_osd_on()
|
||||
@@ -54,7 +53,7 @@ for i in $(seq 1 $OSD_COUNT); do
|
||||
start_osd $i
|
||||
done
|
||||
|
||||
(while true; do set +e; node mon/mon-main.js --etcd_address $ETCD_URL --etcd_prefix "/vitastor" --verbose 1; if [[ $? -ne 2 ]]; then break; fi; done) >>./testdata/mon.log 2>&1 &
|
||||
(while true; do node mon/mon-main.js --etcd_address $ETCD_URL --etcd_prefix "/vitastor" --verbose 1 || true; done) >>./testdata/mon.log 2>&1 &
|
||||
MON_PID=$!
|
||||
|
||||
if [ "$SCHEME" = "ec" ]; then
|
||||
|
@@ -45,8 +45,6 @@ IMMEDIATE_COMMIT=1 ./test_rebalance_verify.sh
|
||||
SCHEME=ec ./test_rebalance_verify.sh
|
||||
SCHEME=ec IMMEDIATE_COMMIT=1 ./test_rebalance_verify.sh
|
||||
|
||||
./test_switch_primary.sh
|
||||
|
||||
./test_write.sh
|
||||
SCHEME=xor ./test_write.sh
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
PG_COUNT=2048
|
||||
GLOBAL_CONFIG=',"osd_out_time":1'
|
||||
|
||||
. `dirname $0`/run_3osds.sh
|
||||
|
||||
LD_PRELOAD="build/src/libfio_vitastor.so" \
|
||||
|
@@ -9,7 +9,6 @@ if [[ "$SCHEME" = "ec" ]]; then
|
||||
fi
|
||||
OSD_COUNT=${OSD_COUNT:-7}
|
||||
PG_COUNT=32
|
||||
GLOBAL_CONFIG=',"osd_out_time":1'
|
||||
. `dirname $0`/run_3osds.sh
|
||||
check_qemu
|
||||
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
PG_MINSIZE=1
|
||||
SCHEME=replicated
|
||||
GLOBAL_CONFIG=',"osd_out_time":1'
|
||||
|
||||
. `dirname $0`/run_3osds.sh
|
||||
|
||||
|
@@ -20,9 +20,6 @@ LD_PRELOAD="build/src/libfio_vitastor.so" \
|
||||
fio -thread -name=test -ioengine=build/src/libfio_vitastor.so -bs=1M -direct=1 -iodepth=4 \
|
||||
-mirror_file=./testdata/mirror.bin -end_fsync=1 -rw=write -etcd=$ETCD_URL -image=testimg
|
||||
|
||||
# Save PG primary
|
||||
primary=$($ETCDCTL get --print-value-only /vitastor/config/pgs | jq -r '.items["1"]["1"].primary')
|
||||
|
||||
# Intentionally corrupt OSD data and restart it
|
||||
zero_osd_pid=OSD${ZERO_OSD}_PID
|
||||
kill ${!zero_osd_pid}
|
||||
@@ -37,9 +34,6 @@ start_osd $ZERO_OSD
|
||||
# Wait until start
|
||||
wait_up 10
|
||||
|
||||
# Wait until PG is back on the same primary
|
||||
wait_condition 10 "$ETCDCTL"$' get --print-value-only /vitastor/config/pgs | jq -s -e \'.[0].items["1"]["1"].primary == "'$primary'"'"'"
|
||||
|
||||
# Trigger scrub
|
||||
$ETCDCTL put /vitastor/pg/history/1/1 `$ETCDCTL get --print-value-only /vitastor/pg/history/1/1 | jq -s -c '(.[0] // {}) + {"next_scrub":1}'`
|
||||
|
||||
|
@@ -4,7 +4,6 @@ OSD_COUNT=2
|
||||
PG_SIZE=2
|
||||
PG_MINSIZE=1
|
||||
SCHEME=replicated
|
||||
GLOBAL_CONFIG=',"osd_out_time":1'
|
||||
|
||||
. `dirname $0`/run_3osds.sh
|
||||
|
||||
|
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. `dirname $0`/run_3osds.sh
|
||||
|
||||
primary=$($ETCDCTL get --print-value-only /vitastor/config/pgs | jq -r '.items["1"]["1"].primary')
|
||||
primary_pid=OSD${primary}_PID
|
||||
kill -9 ${!primary_pid}
|
||||
|
||||
sleep 15
|
||||
wait_condition 10 "$ETCDCTL get --print-value-only /vitastor/config/pgs | jq -s -e '.[0].items[\"1\"][\"1\"].primary != \"$primary\"'"
|
||||
|
||||
newprim=$($ETCDCTL get --print-value-only /vitastor/config/pgs | jq -r '.items["1"]["1"].primary')
|
||||
|
||||
if [ "$newprim" = "$primary" ]; then
|
||||
format_error Primary not switched
|
||||
fi
|
||||
|
||||
format_green OK
|
Reference in New Issue
Block a user