Compare commits

..

3 Commits

148 changed files with 3192 additions and 7793 deletions

View File

@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8)
project(vitastor)
set(VERSION "0.8.3")
set(VERSION "0.7.1")
add_subdirectory(src)

View File

@@ -58,7 +58,6 @@ Vitastor поддерживает QEMU-драйвер, протоколы NBD и
- [Метаданные образов в etcd](docs/config/inode.ru.md)
- Использование
- [vitastor-cli](docs/usage/cli.ru.md) (консольный интерфейс)
- [vitastor-disk](docs/usage/disk.ru.md) (управление дисками)
- [fio](docs/usage/fio.ru.md) для тестов производительности
- [NBD](docs/usage/nbd.ru.md) для монтирования ядром
- [QEMU и qemu-img](docs/usage/qemu.ru.md)

View File

@@ -58,7 +58,6 @@ Read more details below in the documentation.
- [Image metadata in etcd](docs/config/inode.en.md)
- Usage
- [vitastor-cli](docs/usage/cli.en.md) (command-line interface)
- [vitastor-disk](docs/usage/disk.en.md) (disk management tool)
- [fio](docs/usage/fio.en.md) for benchmarks
- [NBD](docs/usage/nbd.en.md) for kernel mounts
- [QEMU and qemu-img](docs/usage/qemu.en.md)

View File

@@ -18,19 +18,15 @@ ENV CSI_ENDPOINT=""
RUN apt-get update && \
apt-get install -y wget && \
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \
(echo deb http://vitastor.io/debian buster main > /etc/apt/sources.list.d/vitastor.list) && \
(echo deb http://deb.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list) && \
(echo "APT::Install-Recommends false;" > /etc/apt/apt.conf) && \
apt-get update && \
apt-get install -y e2fsprogs xfsprogs kmod && \
apt-get install -y e2fsprogs xfsprogs vitastor kmod && \
apt-get clean && \
(echo options nbd nbds_max=128 > /etc/modprobe.d/nbd.conf)
COPY --from=build /app/vitastor-csi /bin/
RUN (echo deb http://vitastor.io/debian buster main > /etc/apt/sources.list.d/vitastor.list) && \
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \
apt-get update && \
apt-get install -y vitastor-client && \
apt-get clean
ENTRYPOINT ["/bin/vitastor-csi"]

View File

@@ -1,4 +1,4 @@
VERSION ?= v0.8.3
VERSION ?= v0.7.1
all: build push

View File

@@ -49,7 +49,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: vitalif/vitastor-csi:v0.8.3
image: vitalif/vitastor-csi:v0.7.1
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
@@ -102,7 +102,7 @@ spec:
- "--health-port=9898"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
value: unix://csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir

View File

@@ -116,7 +116,7 @@ spec:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
image: vitalif/vitastor-csi:v0.8.3
image: vitalif/vitastor-csi:v0.7.1
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@@ -5,7 +5,7 @@ package vitastor
const (
vitastorCSIDriverName = "csi.vitastor.io"
vitastorCSIDriverVersion = "0.8.3"
vitastorCSIDriverVersion = "0.7.1"
)
// Config struct fills the parameters of request or user input

4
debian/changelog vendored
View File

@@ -1,10 +1,10 @@
vitastor (0.8.3-1) unstable; urgency=medium
vitastor (0.7.1-1) unstable; urgency=medium
* Bugfixes
-- Vitaliy Filippov <vitalif@yourcmc.ru> Fri, 03 Jun 2022 02:09:44 +0300
vitastor (0.8.3-1) unstable; urgency=medium
vitastor (0.7.1-1) unstable; urgency=medium
* Implement NFS proxy
* Add documentation

2
debian/control vendored
View File

@@ -18,7 +18,7 @@ Description: Vitastor, a fast software-defined clustered block storage
Package: vitastor-osd
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, vitastor-client (= ${binary:Version}), fdisk, util-linux, parted
Depends: ${shlibs:Depends}, ${misc:Depends}, vitastor-client (= ${binary:Version})
Description: Vitastor, a fast software-defined clustered block storage - object storage daemon
Vitastor object storage daemon, i.e. server program that stores data.

View File

@@ -1 +1 @@
patches/VitastorPlugin.pm usr/share/perl5/PVE/Storage/Custom/
patches/PVE_VitastorPlugin.pm usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm

View File

@@ -4,3 +4,4 @@ usr/bin/vitastor-rm
usr/bin/vitastor-nbd
usr/bin/vitastor-nfs
usr/lib/*/libvitastor*.so*
mon/make-osd.sh /usr/lib/vitastor

View File

@@ -1,2 +1 @@
mon usr/lib/vitastor
mon/vitastor-mon.service /lib/systemd/system

View File

@@ -1,9 +0,0 @@
#!/bin/sh
set -e
if [ "$1" = "configure" ]; then
addgroup --system --quiet vitastor
adduser --system --quiet --ingroup vitastor --no-create-home --home /nonexistent vitastor
mkdir -p /etc/vitastor
fi

View File

@@ -1,6 +1,3 @@
usr/bin/vitastor-osd
usr/bin/vitastor-disk
usr/bin/vitastor-dump-journal
mon/vitastor-osd@.service /lib/systemd/system
mon/vitastor.target /lib/systemd/system
mon/90-vitastor.rules /lib/udev/rules.d

View File

@@ -1,10 +0,0 @@
#!/bin/sh
set -e
if [ "$1" = "configure" ]; then
addgroup --system --quiet vitastor
adduser --system --quiet --ingroup vitastor --no-create-home --home /nonexistent vitastor
install -o vitastor -g vitastor -d /var/log/vitastor
mkdir -p /etc/vitastor
fi

View File

@@ -34,8 +34,8 @@ RUN set -e -x; \
mkdir -p /root/packages/vitastor-$REL; \
rm -rf /root/packages/vitastor-$REL/*; \
cd /root/packages/vitastor-$REL; \
cp -r /root/vitastor vitastor-0.8.3; \
cd vitastor-0.8.3; \
cp -r /root/vitastor vitastor-0.7.1; \
cd vitastor-0.7.1; \
ln -s /root/fio-build/fio-*/ ./fio; \
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
@@ -48,8 +48,8 @@ RUN set -e -x; \
rm -rf a b; \
echo "dep:fio=$FIO" > debian/fio_version; \
cd /root/packages/vitastor-$REL; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.8.3.orig.tar.xz vitastor-0.8.3; \
cd vitastor-0.8.3; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.7.1.orig.tar.xz vitastor-0.7.1; \
cd vitastor-0.7.1; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \

View File

@@ -9,34 +9,34 @@
These parameters apply to clients and OSDs, are fixed at the moment of OSD drive
initialization and can't be changed after it without losing data.
OSDs with different values of these parameters (for example, SSD and SSD+HDD
OSDs) can coexist in one Vitastor cluster within different pools. Each pool can
only include OSDs with identical settings of these parameters.
These parameters, when set to a non-default value, must also be specified in
etcd for clients to be aware of their values, either in /vitastor/config/global
or in pool configuration. Pool configuration overrides the global setting.
If the value for a pool in etcd doesn't match on-disk OSD configuration, the
OSD will refuse to start PGs of that pool.
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
- [immediate_commit](#immediate_commit)
- [client_dirty_limit](#client_dirty_limit)
## block_size
- Type: integer
- Default: 131072
Size of objects (data blocks) into which all physical and virtual drives
(within a pool) are subdivided in Vitastor. One of current main settings
in Vitastor, affects memory usage, write amplification and I/O load
distribution effectiveness.
Size of objects (data blocks) into which all physical and virtual drives are
subdivided in Vitastor. One of current main settings in Vitastor, affects
memory usage, write amplification and I/O load distribution effectiveness.
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
it's possible to use 4 MB for SSD too - it will lower memory usage, but
may increase average WA and reduce linear performance.
OSDs with different block sizes (for example, SSD and SSD+HDD OSDs) can
currently coexist in one etcd instance only within separate Vitastor
clusters with different etcd_prefix'es.
Also block size can't be changed after OSD initialization without losing
data.
You must always specify block_size in etcd in /vitastor/config/global if
you change it so all clients can know about it.
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
544 MB per 1 TB of used disk space with the default 128 KB block size.
@@ -50,7 +50,12 @@ of disk_alignment. It's called bitmap granularity because Vitastor tracks
an allocation bitmap for each object containing 2 bits per each
(bitmap_granularity) bytes.
Can't be smaller than the OSD data device sector.
This parameter can't be changed after OSD initialization without losing
data. Also it's fixed for the whole Vitastor cluster i.e. two different
values can't be used in a single Vitastor cluster.
Clients MUST be aware of this parameter value, so put it into etcd key
/vitastor/config/global if you change it for any reason.
## immediate_commit
@@ -94,12 +99,26 @@ unsafe to change by hand). The same may apply to newer HDDs with internal
SSD cache or "media-cache" - for example, a lot of Seagate EXOS drives have
it (they have internal SSD cache even though it's not stated in datasheets).
Setting this parameter to "all" or "small" in OSD parameters requires enabling
disable_journal_fsync and disable_meta_fsync, setting it to "all" also requires
enabling disable_data_fsync.
This parameter must be set both in etcd in /vitastor/config/global and in
OSD command line or configuration. Setting it to "all" or "small" requires
enabling disable_journal_fsync and disable_meta_fsync, setting it to "all"
also requires enabling disable_data_fsync.
TLDR: For optimal performance, set immediate_commit to "all" if you only use
SSDs with supercapacitor-based power loss protection (nonvolatile
write-through cache) for both data and journals in the whole Vitastor
cluster. Set it to "small" if you only use such SSDs for journals. Leave
empty if your drives have write-back cache.
## client_dirty_limit
- Type: integer
- Default: 33554432
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.

View File

@@ -9,19 +9,10 @@
Данные параметры используются клиентами и OSD, задаются в момент инициализации
диска OSD и не могут быть изменены после этого без потери данных.
OSD с разными значениями данных параметров (например, SSD и гибридные SSD+HDD
OSD) могут сосуществовать в одном кластере Vitastor в разных пулах. Один пул
может включать только OSD с одинаковыми настройками этих параметров.
Данные параметры, отличаясь от значения по умолчанию, должны также быть заданы
в etcd, чтобы клиенты могли узнать их значение, либо в глобальной конфигурации
/vitastor/config/global, либо в настройках пулов. Настройки пула переопределяют
глобальное значение. Если значение в настройках пула не будет соответствовать
конфигурации OSD, OSD откажется запускать PG данного пула.
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
- [immediate_commit](#immediate_commit)
- [client_dirty_limit](#client_dirty_limit)
## block_size
@@ -29,15 +20,24 @@ OSD) могут сосуществовать в одном кластере Vita
- Значение по умолчанию: 131072
Размер объектов (блоков данных), на которые делятся физические и виртуальные
диски в Vitastor (в рамках каждого пула). Одна из ключевых на данный момент
настроек, влияет на потребление памяти, объём избыточной записи (write
amplification) и эффективность распределения нагрузки по OSD.
диски в Vitastor. Одна из ключевых на данный момент настроек, влияет на
потребление памяти, объём избыточной записи (write amplification) и
эффективность распределения нагрузки по OSD.
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
это понизит использование памяти, но ухудшит распределение нагрузки и в
среднем увеличит WA.
OSD с разными размерами блока (например, SSD и SSD+HDD OSD) на данный
момент могут сосуществовать в рамках одного etcd только в виде двух независимых
кластеров Vitastor с разными etcd_prefix.
Также размер блока нельзя менять после инициализации OSD без потери данных.
Если вы меняете размер блока, обязательно прописывайте его в etcd в
/vitastor/config/global, дабы все клиенты его знали.
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
стандартном 128 КБ блоке.
@@ -52,7 +52,13 @@ amplification) и эффективность распределения нагр
потому, что Vitastor хранит битовую карту для каждого объекта, содержащую
по 2 бита на каждые (bitmap_granularity) байт.
Не может быть меньше размера сектора дисков данных OSD.
Данный параметр нельзя менять после инициализации OSD без потери данных.
Также он фиксирован для всего кластера Vitastor, т.е. разные значения
не могут сосуществовать в одном кластере.
Клиенты ДОЛЖНЫ знать правильное значение этого параметра, так что если вы
его меняете, обязательно прописывайте изменённое значение в etcd в ключ
/vitastor/config/global.
## immediate_commit
@@ -102,7 +108,8 @@ HDD-дисках с внутренним SSD или "медиа" кэшем - н
многих дисках Seagate EXOS (у них есть внутренний SSD-кэш, хотя это и не
указано в спецификациях).
Указание "all" или "small" в настройках / командной строке OSD требует
Данный параметр нужно указывать и в etcd в /vitastor/config/global, и в
командной строке или конфигурации OSD. Значения "all" и "small" требуют
включения disable_journal_fsync и disable_meta_fsync, значение "all" также
требует включения disable_data_fsync.
@@ -112,3 +119,16 @@ immediate_commit в значение "all", если вы используете
такие SSD для всех журналов, но не для данных - можете установить параметр
в "small". Если и какие-то из дисков журналов имеют волатильный кэш записи -
оставьте параметр пустым.
## client_dirty_limit
- Тип: целое число
- Значение по умолчанию: 33554432
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -29,7 +29,6 @@ between clients, OSDs and etcd.
- [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_timeout](#etcd_ws_keepalive_timeout)
- [client_dirty_limit](#client_dirty_limit)
## tcp_header_buffer_size
@@ -213,16 +212,3 @@ etcd_report_interval to guarantee that keepalive actually works.
etcd websocket ping interval required to keep the connection alive and
detect disconnections quickly.
## client_dirty_limit
- Type: integer
- Default: 33554432
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.

View File

@@ -29,7 +29,6 @@
- [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_timeout](#etcd_ws_keepalive_timeout)
- [client_dirty_limit](#client_dirty_limit)
## tcp_header_buffer_size
@@ -223,16 +222,3 @@ etcd_report_interval, чтобы keepalive гарантированно рабо
- Значение по умолчанию: 30
Интервал проверки живости вебсокет-подключений к etcd.
## client_dirty_limit
- Тип: целое число
- Значение по умолчанию: 33554432
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -17,7 +17,6 @@ initialization and can be changed with an OSD restart.
- [autosync_interval](#autosync_interval)
- [autosync_writes](#autosync_writes)
- [recovery_queue_depth](#recovery_queue_depth)
- [recovery_pg_switch](#recovery_pg_switch)
- [recovery_sync_batch](#recovery_sync_batch)
- [readonly](#readonly)
- [no_recovery](#no_recovery)
@@ -116,16 +115,6 @@ Maximum recovery operations per one primary OSD at any given moment of time.
Currently it's the only parameter available to tune the speed or recovery
and rebalancing, but it's planned to implement more.
## recovery_pg_switch
- Type: integer
- Default: 128
Number of recovery operations before switching to recovery of the next PG.
The idea is to mix all PGs during recovery for more even space and load
distribution but still benefit from recovery queue depth greater than 1.
Degraded PGs are anyway scanned first.
## recovery_sync_batch
- Type: integer

View File

@@ -18,7 +18,6 @@
- [autosync_interval](#autosync_interval)
- [autosync_writes](#autosync_writes)
- [recovery_queue_depth](#recovery_queue_depth)
- [recovery_pg_switch](#recovery_pg_switch)
- [recovery_sync_batch](#recovery_sync_batch)
- [readonly](#readonly)
- [no_recovery](#no_recovery)
@@ -120,17 +119,6 @@ OSD, чтобы успевать очищать журнал - без них OSD
для ускорения или замедления восстановления и перебалансировки данных, но
в планах реализация других параметров.
## recovery_pg_switch
- Тип: целое число
- Значение по умолчанию: 128
Число операций восстановления перед переключением на восстановление другой PG.
Идея заключается в том, чтобы восстанавливать все PG одновременно для более
равномерного распределения места и нагрузки, но при этом всё равно выигрывать
от глубины очереди восстановления, большей, чем 1. Деградированные PG в любом
случае сканируются первыми.
## recovery_sync_batch
- Тип: целое число

View File

@@ -33,9 +33,6 @@ Parameters:
- [pg_count](#pg_count)
- [failure_domain](#failure_domain)
- [max_osd_combinations](#max_osd_combinations)
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
- [immediate_commit](#immediate_commit)
- [pg_stripe_size](#pg_stripe_size)
- [root_node](#root_node)
- [osd_tags](#osd_tags)
@@ -82,7 +79,7 @@ Parent node reference is required for intermediate tree nodes.
Separate OSD settings are set in etc keys `/vitastor/config/osd/<number>`
in JSON format `{"<key>":<value>}`.
As of now, two settings are supported:
As of now, there is only one setting:
## reweight
@@ -96,15 +93,6 @@ This means an OSD configured with reweight lower than 1 receives less PGs than
it normally would. An OSD with reweight = 0 won't store any data. You can set
reweight to 0 to trigger rebalance and remove all data from an OSD.
## tags
- Type: string or array of strings
Sets tag or multiple tags for this OSD. Tags can be used to group OSDs into
subsets and then use a specific subset for pool instead of all OSDs.
For example you can mark SSD OSDs with tag "ssd" and HDD OSDs with "hdd" and
such tags will work as device classes.
# Pool parameters
## name
@@ -198,43 +186,6 @@ number of combinations to generate when optimising PG placement.
This parameter usually doesn't require to be changed.
## block_size
- Type: integer
- Default: 131072
Block size for this pool. The value from /vitastor/config/global is used when
unspecified. If your cluster has OSDs with different block sizes then pool must
be restricted by [osd_tags](#osd_tags) to only include OSDs with matching block
size.
Read more about this parameter in [Cluster-Wide Disk Layout Parameters](layout-cluster.en.md#block_size).
## bitmap_granularity
- Type: integer
- Default: 4096
"Sector" size of virtual disks in this pool. The value from
/vitastor/config/global is used when unspecified. Similar to block_size, the
pool must be restricted by [osd_tags](#osd_tags) to only include OSDs with
matching bitmap_granularity.
Read more about this parameter in [Cluster-Wide Disk Layout Parameters](layout-cluster.en.md#bitmap_granularity).
## immediate_commit
- Type: string, one of "all", "small" and "none"
- Default: none
Immediate commit setting for this pool. The value from /vitastor/config/global
is used when unspecified. Similar to block_size, the pool must be restricted by
[osd_tags](#osd_tags) to only include OSDs with compatible immediate_commit.
Compatible means that a pool with non-immediate commit will work with OSDs with
immediate commit enabled, but not vice versa.
Read more about this parameter in [Cluster-Wide Disk Layout Parameters](layout-cluster.en.md#immediate_commit).
## pg_stripe_size
- Type: integer

View File

@@ -32,9 +32,6 @@
- [pg_count](#pg_count)
- [failure_domain](#failure_domain)
- [max_osd_combinations](#max_osd_combinations)
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
- [immediate_commit](#immediate_commit)
- [pg_stripe_size](#pg_stripe_size)
- [root_node](#root_node)
- [osd_tags](#osd_tags)
@@ -81,10 +78,7 @@
Настройки отдельных OSD задаются в ключах etcd `/vitastor/config/osd/<number>`
в JSON-формате `{"<key>":<value>}`.
На данный момент поддерживаются две настройки:
- [reweight](#reweight)
- [tags](#tags)
На данный момент поддерживается одна настройка:
## reweight
@@ -99,15 +93,6 @@
хранении данных вообще. Вы можете установить reweight в 0, чтобы убрать
все данные с OSD.
## tags
- Тип: строка или массив строк
Задаёт тег или набор тегов для данного OSD. Теги можно использовать, чтобы
делить OSD на множества и потом размещать пул только на части OSD, а не на
всех. Можно, например, пометить SSD OSD тегом "ssd", а HDD тегом "hdd", в
этом смысле теги работают аналогично классам устройств.
# Параметры
## name
@@ -200,51 +185,13 @@ PG в Vitastor эферемерны, то есть вы можете менят
Обычно данный параметр не требует изменений.
## block_size
- Тип: целое число
- По умолчанию: 131072
Размер блока для данного пула. Если не задан, используется значение из
/vitastor/config/global. Если в вашем кластере есть OSD с разными размерами
блока, пул должен быть ограничен только OSD, блок которых равен блоку пула,
с помощью [osd_tags](#osd_tags).
О самом параметре читайте в разделе [Дисковые параметры уровня кластера](layout-cluster.ru.md#block_size).
## bitmap_granularity
- Тип: целое число
- По умолчанию: 4096
Размер "сектора" виртуальных дисков в данном пуле. Если не задан, используется
значение из /vitastor/config/global. Аналогично block_size, пул должен быть
ограничен OSD со значением bitmap_granularity, равным значению пула, с помощью
[osd_tags](#osd_tags).
О самом параметре читайте в разделе [Дисковые параметры уровня кластера](layout-cluster.ru.md#bitmap_granularity).
## immediate_commit
- Тип: строка "all", "small" или "none"
- По умолчанию: none
Настройка мгновенного коммита для данного пула. Если не задана, используется
значение из /vitastor/config/global. Аналогично block_size, пул должен быть
ограничен OSD со значением bitmap_granularity, совместимым со значением пула, с
помощью [osd_tags](#osd_tags). Совместимость означает, что пул с отключенным
мгновенным коммитом может работать на OSD с включённым мгновенным коммитом, но
не наоборот.
О самом параметре читайте в разделе [Дисковые параметры уровня кластера](layout-cluster.ru.md#immediate_commit).
## pg_stripe_size
- Тип: целое число
- По умолчанию: 0
Данный параметр задаёт размер полосы "нарезки" образов на PG. Размер полосы не может
быть меньше, чем [block_size](#block_size), умноженный на
быть меньше, чем [block_size](layout-cluster.ru.md#block_size), умноженный на
(pg_size - parity_chunks) для EC-пулов или 1 для реплицированных пулов. То же
значение используется по умолчанию.

View File

@@ -2,13 +2,3 @@
These parameters apply to clients and OSDs, are fixed at the moment of OSD drive
initialization and can't be changed after it without losing data.
OSDs with different values of these parameters (for example, SSD and SSD+HDD
OSDs) can coexist in one Vitastor cluster within different pools. Each pool can
only include OSDs with identical settings of these parameters.
These parameters, when set to a non-default value, must also be specified in
etcd for clients to be aware of their values, either in /vitastor/config/global
or in pool configuration. Pool configuration overrides the global setting.
If the value for a pool in etcd doesn't match on-disk OSD configuration, the
OSD will refuse to start PGs of that pool.

View File

@@ -2,13 +2,3 @@
Данные параметры используются клиентами и OSD, задаются в момент инициализации
диска OSD и не могут быть изменены после этого без потери данных.
OSD с разными значениями данных параметров (например, SSD и гибридные SSD+HDD
OSD) могут сосуществовать в одном кластере Vitastor в разных пулах. Один пул
может включать только OSD с одинаковыми настройками этих параметров.
Данные параметры, отличаясь от значения по умолчанию, должны также быть заданы
в etcd, чтобы клиенты могли узнать их значение, либо в глобальной конфигурации
/vitastor/config/global, либо в настройках пулов. Настройки пула переопределяют
глобальное значение. Если значение в настройках пула не будет соответствовать
конфигурации OSD, OSD откажется запускать PG данного пула.

View File

@@ -2,28 +2,46 @@
type: int
default: 131072
info: |
Size of objects (data blocks) into which all physical and virtual drives
(within a pool) are subdivided in Vitastor. One of current main settings
in Vitastor, affects memory usage, write amplification and I/O load
distribution effectiveness.
Size of objects (data blocks) into which all physical and virtual drives are
subdivided in Vitastor. One of current main settings in Vitastor, affects
memory usage, write amplification and I/O load distribution effectiveness.
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
it's possible to use 4 MB for SSD too - it will lower memory usage, but
may increase average WA and reduce linear performance.
OSDs with different block sizes (for example, SSD and SSD+HDD OSDs) can
currently coexist in one etcd instance only within separate Vitastor
clusters with different etcd_prefix'es.
Also block size can't be changed after OSD initialization without losing
data.
You must always specify block_size in etcd in /vitastor/config/global if
you change it so all clients can know about it.
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
544 MB per 1 TB of used disk space with the default 128 KB block size.
info_ru: |
Размер объектов (блоков данных), на которые делятся физические и виртуальные
диски в Vitastor (в рамках каждого пула). Одна из ключевых на данный момент
настроек, влияет на потребление памяти, объём избыточной записи (write
amplification) и эффективность распределения нагрузки по OSD.
диски в Vitastor. Одна из ключевых на данный момент настроек, влияет на
потребление памяти, объём избыточной записи (write amplification) и
эффективность распределения нагрузки по OSD.
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
это понизит использование памяти, но ухудшит распределение нагрузки и в
среднем увеличит WA.
OSD с разными размерами блока (например, SSD и SSD+HDD OSD) на данный
момент могут сосуществовать в рамках одного etcd только в виде двух независимых
кластеров Vitastor с разными etcd_prefix.
Также размер блока нельзя менять после инициализации OSD без потери данных.
Если вы меняете размер блока, обязательно прописывайте его в etcd в
/vitastor/config/global, дабы все клиенты его знали.
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
стандартном 128 КБ блоке.
@@ -36,14 +54,25 @@
an allocation bitmap for each object containing 2 bits per each
(bitmap_granularity) bytes.
Can't be smaller than the OSD data device sector.
This parameter can't be changed after OSD initialization without losing
data. Also it's fixed for the whole Vitastor cluster i.e. two different
values can't be used in a single Vitastor cluster.
Clients MUST be aware of this parameter value, so put it into etcd key
/vitastor/config/global if you change it for any reason.
info_ru: |
Требуемое выравнивание записи на виртуальные диски (размер их "сектора").
Должен быть кратен disk_alignment. Называется гранулярностью битовой карты
потому, что Vitastor хранит битовую карту для каждого объекта, содержащую
по 2 бита на каждые (bitmap_granularity) байт.
Не может быть меньше размера сектора дисков данных OSD.
Данный параметр нельзя менять после инициализации OSD без потери данных.
Также он фиксирован для всего кластера Vitastor, т.е. разные значения
не могут сосуществовать в одном кластере.
Клиенты ДОЛЖНЫ знать правильное значение этого параметра, так что если вы
его меняете, обязательно прописывайте изменённое значение в etcd в ключ
/vitastor/config/global.
- name: immediate_commit
type: string
default: false
@@ -85,9 +114,10 @@
SSD cache or "media-cache" - for example, a lot of Seagate EXOS drives have
it (they have internal SSD cache even though it's not stated in datasheets).
Setting this parameter to "all" or "small" in OSD parameters requires enabling
disable_journal_fsync and disable_meta_fsync, setting it to "all" also requires
enabling disable_data_fsync.
This parameter must be set both in etcd in /vitastor/config/global and in
OSD command line or configuration. Setting it to "all" or "small" requires
enabling disable_journal_fsync and disable_meta_fsync, setting it to "all"
also requires enabling disable_data_fsync.
TLDR: For optimal performance, set immediate_commit to "all" if you only use
SSDs with supercapacitor-based power loss protection (nonvolatile
@@ -138,7 +168,8 @@
многих дисках Seagate EXOS (у них есть внутренний SSD-кэш, хотя это и не
указано в спецификациях).
Указание "all" или "small" в настройках / командной строке OSD требует
Данный параметр нужно указывать и в etcd в /vitastor/config/global, и в
командной строке или конфигурации OSD. Значения "all" и "small" требуют
включения disable_journal_fsync и disable_meta_fsync, значение "all" также
требует включения disable_data_fsync.
@@ -148,3 +179,22 @@
такие SSD для всех журналов, но не для данных - можете установить параметр
в "small". Если и какие-то из дисков журналов имеют волатильный кэш записи -
оставьте параметр пустым.
- name: client_dirty_limit
type: int
default: 33554432
info: |
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.
info_ru: |
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -223,22 +223,3 @@
detect disconnections quickly.
info_ru: |
Интервал проверки живости вебсокет-подключений к etcd.
- name: client_dirty_limit
type: int
default: 33554432
info: |
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.
info_ru: |
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -102,20 +102,6 @@
момент времени. На данный момент единственный параметр, который можно менять
для ускорения или замедления восстановления и перебалансировки данных, но
в планах реализация других параметров.
- name: recovery_pg_switch
type: int
default: 128
info: |
Number of recovery operations before switching to recovery of the next PG.
The idea is to mix all PGs during recovery for more even space and load
distribution but still benefit from recovery queue depth greater than 1.
Degraded PGs are anyway scanned first.
info_ru: |
Число операций восстановления перед переключением на восстановление другой PG.
Идея заключается в том, чтобы восстанавливать все PG одновременно для более
равномерного распределения места и нагрузки, но при этом всё равно выигрывать
от глубины очереди восстановления, большей, чем 1. Деградированные PG в любом
случае сканируются первыми.
- name: recovery_sync_batch
type: int
default: 16

View File

@@ -6,10 +6,10 @@
# Proxmox VE
To enable Vitastor support in Proxmox Virtual Environment (6.4-7.3 are supported):
To enable Vitastor support in Proxmox Virtual Environment (6.4 and 7.1 are supported):
- Add the corresponding Vitastor Debian repository into sources.list on Proxmox hosts:
buster for 6.4, bullseye for 7.3, pve7.1 for 7.1, pve7.2 for 7.2
- Add the corresponding Vitastor Debian repository into sources.list on Proxmox hosts
(buster for 6.4, bullseye for 7.1)
- Install vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* or see note) packages from Vitastor repository
- Define storage in `/etc/pve/storage.cfg` (see below)
- Block network access from VMs to Vitastor network (to OSDs and etcd),
@@ -35,5 +35,5 @@ vitastor: vitastor
vitastor_nbd 0
```
\* Note: you can also manually copy [patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) to Proxmox hosts
\* Note: you can also manually copy [patches/PVE_VitastorPlugin.pm](patches/PVE_VitastorPlugin.pm) to Proxmox hosts
as `/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm` instead of installing pve-storage-vitastor.

View File

@@ -6,10 +6,10 @@
# Proxmox
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4-7.3):
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4 и 7.1):
- Добавьте соответствующий Debian-репозиторий Vitastor в sources.list на хостах Proxmox:
buster для 6.4, bullseye для 7.3, pve7.1 для 7.1, pve7.2 для 7.2
- Добавьте соответствующий Debian-репозиторий Vitastor в sources.list на хостах Proxmox
(buster для 6.4, bullseye для 7.1)
- Установите пакеты vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* или см. сноску) из репозитория Vitastor
- Определите тип хранилища в `/etc/pve/storage.cfg` (см. ниже)
- Обязательно заблокируйте доступ от виртуальных машин к сети Vitastor (OSD и etcd), т.к. Vitastor (пока) не поддерживает аутентификацию
@@ -35,5 +35,5 @@ vitastor: vitastor
```
\* Примечание: вместо установки пакета pve-storage-vitastor вы можете вручную скопировать файл
[patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) на хосты Proxmox как
[patches/PVE_VitastorPlugin.pm](patches/PVE_VitastorPlugin.pm) на хосты Proxmox как
`/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm`.

View File

@@ -127,7 +127,7 @@
запросы записи клиенты копируют в памяти и при потере соединения и повторном соединении
с OSD повторяют из памяти. Скопированные в память данные удаляются при успешном fsync,
а чтобы хранение этих данных не приводило к чрезмерному потреблению памяти, клиенты
автоматически выполняют fsync каждые [client_dirty_limit](../config/network.ru.md#client_dirty_limit)
автоматически выполняют fsync каждые [client_dirty_limit](../config/layout-cluster.ru.md#client_dirty_limit)
записанных байт.
## Схожесть с Ceph

View File

@@ -34,7 +34,6 @@
- [Debian and CentOS packages](../installation/packages.en.md)
- [Image management CLI (vitastor-cli)](../usage/cli.en.md)
- [Disk management CLI (vitastor-disk)](docs/usage/disk.en.md)
- Generic user-space client library
- [Native QEMU driver](../usage/qemu.en.md)
- [Loadable fio engine for benchmarks](../usage/fio.en.md)
@@ -48,6 +47,7 @@
The following features are planned for the future:
- Better OSD creation and auto-start tools
- Other administrative tools
- Web GUI
- OpenNebula plugin

View File

@@ -36,7 +36,6 @@
- [Пакеты для Debian и CentOS](../installation/packages.ru.md)
- [Консольный интерфейс управления образами (vitastor-cli)](../usage/cli.ru.md)
- [Инструмент управления дисками (vitastor-disk)](docs/usage/disk.ru.md)
- Общая пользовательская клиентская библиотека для работы с кластером
- [Драйвер диска для QEMU](../usage/qemu.ru.md)
- [Драйвер диска для утилиты тестирования производительности fio](../usage/fio.ru.md)
@@ -48,6 +47,7 @@
## Планы развития
- Более корректные скрипты разметки дисков и автоматического запуска OSD
- Другие инструменты администрирования
- Web-интерфейс
- Плагин для OpenNebula

View File

@@ -26,14 +26,9 @@
## Configure monitors
On the monitor hosts:
- Put identical etcd_address into `/etc/vitastor/vitastor.conf`. Example:
```
{
"etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"]
}
```
- Create systemd units for etcd by running: `/usr/lib/vitastor/mon/make-etcd`
- Start etcd and monitors: `systemctl enable --now etcd vitastor-mon`
- Edit variables at the top of `/usr/lib/vitastor/mon/make-units.sh` to desired values.
- Create systemd units for the monitor and etcd: `/usr/lib/vitastor/mon/make-units.sh`
- Start etcd and monitors: `systemctl start etcd vitastor-mon`
## Configure OSDs
@@ -45,9 +40,11 @@ On the monitor hosts:
}
```
- Initialize OSDs:
- SSD-only: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`
- Hybrid, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
Pass all your devices (HDD and SSD) to this script &mdash; it will partition disks and initialize journals on its own.
- Simplest, SSD-only: `/usr/lib/vitastor/mon/make-osd.sh /dev/disk/by-partuuid/XXX [/dev/disk/by-partuuid/YYY ...]`
**Warning!** This very simple script by default makes units for server-grade SSDs with write-through cache!
If it's not your case, you MUST remove disable_data_fsync and immediate_commit from systemd units.
- Hybrid, HDD+SSD: `/usr/lib/vitastor/mon/make-osd-hybrid.js /dev/sda /dev/sdb ...` &mdash; pass all your
devices (HDD and SSD) to this script &mdash; it will partition disks and initialize journals on its own.
This script skips HDDs which are already partitioned so if you want to use non-empty disks for
Vitastor you should first wipe them with `wipefs -a`. SSDs with GPT partition table are not skipped,
but some free unpartitioned space must be available because the script creates new partitions for journals.
@@ -70,7 +67,7 @@ For EC pools the configuration should look like the following:
```
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}'
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}`
```
After you do this, one of the monitors will configure PGs and OSDs will start them.

View File

@@ -26,14 +26,16 @@
## Настройте мониторы
На хостах, выделенных под мониторы:
- Пропишите одинаковые etcd_address в `/etc/vitastor/vitastor.conf`. Например:
- Пропишите нужные вам значения в файле `/usr/lib/vitastor/mon/make-units.sh`
- Создайте юниты systemd для etcd и мониторов: `/usr/lib/vitastor/mon/make-units.sh`
- Запустите etcd и мониторы: `systemctl start etcd vitastor-mon`
- Пропишите etcd_address и osd_network в `/etc/vitastor/vitastor.conf`. Например:
```
{
"etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"]
"etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"],
"osd_network": "10.200.1.0/24"
}
```
- Инициализируйте сервисы etcd, запустив `/usr/lib/vitastor/mon/make-etcd`
- Запустите etcd и мониторы: `systemctl enable --now etcd vitastor-mon`
## Настройте OSD
@@ -45,10 +47,12 @@
}
```
- Инициализуйте OSD:
- SSD: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`
- Гибридные, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
Передайте все ваши SSD и HDD скрипту в командной строке подряд, скрипт автоматически выделит
разделы под журналы на SSD и данные на HDD. Скрипт пропускает HDD, на которых уже есть разделы
- SSD: `/usr/lib/vitastor/make-osd.sh /dev/disk/by-partuuid/XXX [/dev/disk/by-partuuid/YYY ...]`. \
**Внимание!** Скрипт по умолчанию рассчитан на то, что у вас диски с конденсаторами и отключённым
кэшем! Если это не так, из юнитов systemd нужно убрать строчки disable_data_fsync и immediate_commit!
- Гибридные, HDD+SSD: `/usr/lib/vitastor/mon/make-osd-hybrid.js /dev/sda /dev/sdb ...` - передайте
все ваши SSD и HDD скрипту в командной строке подряд, скрипт автоматически выделит разделы под
журналы на SSD и данные на HDD. Скрипт пропускает HDD, на которых уже есть разделы
или вообще какие-то данные, поэтому если диски непустые, сначала очистите их с помощью
`wipefs -a`. SSD с таблицей разделов не пропускаются, но так как скрипт создаёт новые разделы
для журналов, на SSD должно быть доступно свободное нераспределённое место.
@@ -71,7 +75,7 @@ etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool",
```
etcdctl --endpoints=... put /vitastor/config/pools '{"2":{"name":"ecpool",
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}'
"scheme":"ec","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}`
```
После этого один из мониторов должен сконфигурировать PG, а OSD должны запустить их.

View File

@@ -20,7 +20,7 @@ It supports the following commands:
- [rm-data](#rm-data)
- [merge-data](#merge-data)
- [alloc-osd](#alloc-osd)
- [rm-osd](#rm-osd)
- [simple-offsets](#simple-offsets)
Global options:
@@ -38,9 +38,9 @@ Global options:
`vitastor-cli status`
Show cluster status.
Показать состояние кластера.
Example output:
Пример вывода:
```
cluster:
@@ -65,9 +65,9 @@ Example output:
`vitastor-cli df`
Show pool space statistics.
Показать список пулов и занятое место.
Example output:
Пример вывода:
```
NAME SCHEME PGS TOTAL USED AVAILABLE USED% EFFICIENCY
@@ -76,26 +76,27 @@ size1 1/1 32 199.9 G 10 G 121.5 G 39.23% 100%
kaveri 2/1 32 0 B 10 G 0 B 100% 0%
```
In the example above, "kaveri" pool has "zero" efficiency because all its OSD are down.
В примере у пула "kaveri" эффективность равна нулю, так как все OSD выключены.
## ls
`vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]`
List images (only matching `<glob>` pattern(s) if passed).
Показать список образов, если переданы шаблоны `<glob>`, то только с именами,
соответствующими этим шаблонам (стандартные ФС-шаблоны с * и ?).
Options:
Опции:
```
-p|--pool POOL Filter images by pool ID or name
-l|--long Also report allocated size and I/O statistics
--del Also include delete operation statistics
--sort FIELD Sort by specified field (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)
-r|--reverse Sort in descending order
-n|--count N Only list first N items
-p|--pool POOL Фильтровать образы по пулу (ID или имени)
-l|--long Также выводить статистику занятого места и ввода-вывода
--del Также выводить статистику операций удаления
--sort FIELD Сортировать по заданному полю (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)
-r|--reverse Сортировать в обратном порядке
-n|--count N Показывать только первые N записей
```
Example output:
Пример вывода:
```
NAME POOL SIZE USED READ IOPS QUEUE LAT WRITE IOPS QUEUE LAT FLAGS PARENT
@@ -112,78 +113,94 @@ bench-kaveri kaveri 10 G 10 G 0 B/s 0 0 0 us 0 B/s 0
`vitastor-cli create -s|--size <size> [-p|--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>`
Create an image. You may use K/M/G/T suffixes for `<size>`. If `--parent` is specified,
a copy-on-write image clone is created. Parent must be a snapshot (readonly image).
Pool must be specified if there is more than one pool.
Создать образ. Для размера `<size>` можно использовать суффиксы K/M/G/T (килобайт-мегабайт-гигабайт-терабайт).
Если указана опция `--parent`, создаётся клон образа. Родитель `<parent_name>[@<snapshot>]` должен быть
снимком (или просто немодифицируемым образом). Пул обязательно указывать, если в кластере больше одного пула.
```
vitastor-cli create --snapshot <snapshot> [-p|--pool <id|name>] <image>
vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
```
Create a snapshot of image `<name>` (either form can be used). May be used live if only a single writer is active.
Создать снимок образа `<name>` (можно использовать любую форму команды). Снимок можно создавать без остановки
клиентов, если пишущий клиент максимум 1.
## modify
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]`
Rename, resize image or change its readonly status. Images with children can't be made read-write.
If the new size is smaller than the old size, extra data will be purged.
You should resize file system in the image, if present, before shrinking it.
Изменить размер, имя образа или флаг "только для чтения". Снимать флаг "только для чтения"
и уменьшать размер образов, у которых есть дочерние клоны, без `--force` нельзя.
Если новый размер меньше старого, "лишние" данные будут удалены, поэтому перед уменьшением
образа сначала уменьшите файловую систему в нём.
```
-f|--force Proceed with shrinking or setting readwrite flag even if the image has children.
-f|--force Разрешить уменьшение или перевод в чтение-запись образа, у которого есть клоны.
```
## rm
`vitastor-cli rm <from> [<to>] [--writers-stopped]`
Remove `<from>` or all layers between `<from>` and `<to>` (`<to>` must be a child of `<from>`),
rebasing all their children accordingly. --writers-stopped allows merging to be a bit
more effective in case of a single 'slim' read-write child and 'fat' removed parent:
the child is merged into parent and parent is renamed to child in that case.
In other cases parent layers are always merged into children.
Удалить образ `<from>` или все слои от `<from>` до `<to>` (`<to>` должен быть дочерним
образом `<from>`), одновременно меняя родительские образы их клонов (если таковые есть).
`--writers-stopped` позволяет чуть более эффективно удалять образы в частом случае, когда
у удаляемой цепочки есть только один дочерний образ, содержащий небольшой объём данных.
В этом случае дочерний образ вливается в родительский и удаляется, а родительский
переименовывается в дочерний.
В других случаях родительские слои вливаются в дочерние.
## flatten
`vitastor-cli flatten <layer>`
Flatten a layer, i.e. merge data and detach it from parents.
Сделай образ `<layer>` плоским, то есть, скопировать в него данные и разорвать его
соединение с родительскими.
## rm-data
`vitastor-cli rm-data --pool <pool> --inode <inode> [--wait-list] [--min-offset <offset>]`
Remove inode data without changing metadata.
Удалить данные инода, не меняя метаданные образов.
```
--wait-list Retrieve full objects listings before starting to remove objects.
Requires more memory, but allows to show correct removal progress.
--min-offset Purge only data starting with specified offset.
--wait-list Сначала запросить полный листинг объектов, а потом начать удалять.
Требует больше памяти, но позволяет правильно печатать прогресс удаления.
--min-offset Удалять только данные, начиная с заданного смещения.
```
## merge-data
`vitastor-cli merge-data <from> <to> [--target <target>]`
Merge layer data without changing metadata. Merge `<from>`..`<to>` to `<target>`.
`<to>` must be a child of `<from>` and `<target>` may be one of the layers between
`<from>` and `<to>`, including `<from>` and `<to>`.
Слить данные слоёв, не меняя метаданные. Вливает данные из слоёв от `<from>` до `<to>`
в целевой образ `<target>`. `<to>` должен быть дочерним образом `<from>`, а `<target>`
должен быть одним из слоёв между `<from>` и `<to>`, включая сами `<from>` и `<to>`.
## alloc-osd
`vitastor-cli alloc-osd`
Allocate a new OSD number and reserve it by creating empty `/osd/stats/<n>` key.
Атомарно выделить новый номер OSD и зарезервировать его, создав в etcd пустой
ключ `/osd/stats/<n>`.
## rm-osd
## simple-offsets
`vitastor-cli rm-osd [--force] [--allow-data-loss] [--dry-run] <osd_id> [osd_id...]`
`vitastor-cli simple-offsets <device>`
Remove metadata and configuration for specified OSD(s) from etcd.
Рассчитать смещения для простого и тупого создания OSD на диске (без суперблока).
Refuses to remove OSDs with data without `--force` and `--allow-data-loss`.
Опции (см. также [Дисковые параметры уровня кластера](../config/layout-cluster.ru.md)):
With `--dry-run` only checks if deletion is possible without data loss and
redundancy degradation.
```
--object_size 128k Размер блока хранилища
--bitmap_granularity 4k Гранулярность битовых карт
--journal_size 32M Размер журнала
--device_block_size 4k Размер блока устройства
--journal_offset 0 Смещение журнала
--device_size 0 Размер устройства
--format text Формат результата: json, options, env или text
```

View File

@@ -21,7 +21,7 @@ vitastor-cli - интерфейс командной строки для адм
- [rm-data](#rm-data)
- [merge-data](#merge-data)
- [alloc-osd](#alloc-osd)
- [rm-osd](#rm-osd)
- [simple-offsets](#simple-offsets)
Глобальные опции:
@@ -39,9 +39,9 @@ vitastor-cli - интерфейс командной строки для адм
`vitastor-cli status`
Показать состояние кластера.
Show cluster status.
Пример вывода:
Example output:
```
cluster:
@@ -66,9 +66,9 @@ vitastor-cli - интерфейс командной строки для адм
`vitastor-cli df`
Показать список пулов и занятое место.
Show pool space statistics.
Пример вывода:
Example output:
```
NAME SCHEME PGS TOTAL USED AVAILABLE USED% EFFICIENCY
@@ -77,27 +77,26 @@ size1 1/1 32 199.9 G 10 G 121.5 G 39.23% 100%
kaveri 2/1 32 0 B 10 G 0 B 100% 0%
```
В примере у пула "kaveri" эффективность равна нулю, так как все OSD выключены.
In the example above, "kaveri" pool has "zero" efficiency because all its OSD are down.
## ls
`vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]`
Показать список образов, если переданы шаблоны `<glob>`, то только с именами,
соответствующими этим шаблонам (стандартные ФС-шаблоны с * и ?).
List images (only matching `<glob>` pattern(s) if passed).
Опции:
Options:
```
-p|--pool POOL Фильтровать образы по пулу (ID или имени)
-l|--long Также выводить статистику занятого места и ввода-вывода
--del Также выводить статистику операций удаления
--sort FIELD Сортировать по заданному полю (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)
-r|--reverse Сортировать в обратном порядке
-n|--count N Показывать только первые N записей
-p|--pool POOL Filter images by pool ID or name
-l|--long Also report allocated size and I/O statistics
--del Also include delete operation statistics
--sort FIELD Sort by specified field (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)
-r|--reverse Sort in descending order
-n|--count N Only list first N items
```
Пример вывода:
Example output:
```
NAME POOL SIZE USED READ IOPS QUEUE LAT WRITE IOPS QUEUE LAT FLAGS PARENT
@@ -114,87 +113,85 @@ bench-kaveri kaveri 10 G 10 G 0 B/s 0 0 0 us 0 B/s 0
`vitastor-cli create -s|--size <size> [-p|--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>`
Создать образ. Для размера `<size>` можно использовать суффиксы K/M/G/T (килобайт-мегабайт-гигабайт-терабайт).
Если указана опция `--parent`, создаётся клон образа. Родитель `<parent_name>[@<snapshot>]` должен быть
снимком (или просто немодифицируемым образом). Пул обязательно указывать, если в кластере больше одного пула.
Create an image. You may use K/M/G/T suffixes for `<size>`. If `--parent` is specified,
a copy-on-write image clone is created. Parent must be a snapshot (readonly image).
Pool must be specified if there is more than one pool.
```
vitastor-cli create --snapshot <snapshot> [-p|--pool <id|name>] <image>
vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
```
Создать снимок образа `<name>` (можно использовать любую форму команды). Снимок можно создавать без остановки
клиентов, если пишущий клиент максимум 1.
Create a snapshot of image `<name>` (either form can be used). May be used live if only a single writer is active.
## modify
`vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]`
Изменить размер, имя образа или флаг "только для чтения". Снимать флаг "только для чтения"
и уменьшать размер образов, у которых есть дочерние клоны, без `--force` нельзя.
Если новый размер меньше старого, "лишние" данные будут удалены, поэтому перед уменьшением
образа сначала уменьшите файловую систему в нём.
Rename, resize image or change its readonly status. Images with children can't be made read-write.
If the new size is smaller than the old size, extra data will be purged.
You should resize file system in the image, if present, before shrinking it.
```
-f|--force Разрешить уменьшение или перевод в чтение-запись образа, у которого есть клоны.
-f|--force Proceed with shrinking or setting readwrite flag even if the image has children.
```
## rm
`vitastor-cli rm <from> [<to>] [--writers-stopped]`
Удалить образ `<from>` или все слои от `<from>` до `<to>` (`<to>` должен быть дочерним
образом `<from>`), одновременно меняя родительские образы их клонов (если таковые есть).
`--writers-stopped` позволяет чуть более эффективно удалять образы в частом случае, когда
у удаляемой цепочки есть только один дочерний образ, содержащий небольшой объём данных.
В этом случае дочерний образ вливается в родительский и удаляется, а родительский
переименовывается в дочерний.
В других случаях родительские слои вливаются в дочерние.
Remove `<from>` or all layers between `<from>` and `<to>` (`<to>` must be a child of `<from>`),
rebasing all their children accordingly. --writers-stopped allows merging to be a bit
more effective in case of a single 'slim' read-write child and 'fat' removed parent:
the child is merged into parent and parent is renamed to child in that case.
In other cases parent layers are always merged into children.
## flatten
`vitastor-cli flatten <layer>`
Сделай образ `<layer>` плоским, то есть, скопировать в него данные и разорвать его
соединение с родительскими.
Flatten a layer, i.e. merge data and detach it from parents.
## rm-data
`vitastor-cli rm-data --pool <pool> --inode <inode> [--wait-list] [--min-offset <offset>]`
Удалить данные инода, не меняя метаданные образов.
Remove inode data without changing metadata.
```
--wait-list Сначала запросить полный листинг объектов, а потом начать удалять.
Требует больше памяти, но позволяет правильно печатать прогресс удаления.
--min-offset Удалять только данные, начиная с заданного смещения.
--wait-list Retrieve full objects listings before starting to remove objects.
Requires more memory, but allows to show correct removal progress.
--min-offset Purge only data starting with specified offset.
```
## merge-data
`vitastor-cli merge-data <from> <to> [--target <target>]`
Слить данные слоёв, не меняя метаданные. Вливает данные из слоёв от `<from>` до `<to>`
в целевой образ `<target>`. `<to>` должен быть дочерним образом `<from>`, а `<target>`
должен быть одним из слоёв между `<from>` и `<to>`, включая сами `<from>` и `<to>`.
Merge layer data without changing metadata. Merge `<from>`..`<to>` to `<target>`.
`<to>` must be a child of `<from>` and `<target>` may be one of the layers between
`<from>` and `<to>`, including `<from>` and `<to>`.
## alloc-osd
`vitastor-cli alloc-osd`
Атомарно выделить новый номер OSD и зарезервировать его, создав в etcd пустой
ключ `/osd/stats/<n>`.
Allocate a new OSD number and reserve it by creating empty `/osd/stats/<n>` key.
## rm-osd
## simple-offsets
`vitastor-cli rm-osd [--force] [--allow-data-loss] [--dry-run] <osd_id> [osd_id...]`
`vitastor-cli simple-offsets <device>`
Удалить метаданные и конфигурацию для заданных OSD из etcd.
Calculate offsets for simple&stupid (no superblock) OSD deployment.
Отказывается удалять OSD с данными без опций `--force` и `--allow-data-loss`.
Options (see also [Cluster-Wide Disk Layout Parameters](../config/layout-cluster.en.md)):
С опцией `--dry-run` только проверяет, возможно ли удаление без потери данных и деградации
избыточности.
```
--object_size 128k Set blockstore block size
--bitmap_granularity 4k Set bitmap granularity
--journal_size 32M Set journal size
--device_block_size 4k Set device block size
--journal_offset 0 Set journal offset
--device_size 0 Set device size
--format text Result format: json, options, env, or text
```

View File

@@ -1,257 +0,0 @@
[Documentation](../../README.md#documentation) → Usage → Disk Tool
-----
[Читать на русском](disk.ru.md)
# Disk management tool
vitastor-disk is a command-line tool for physical Vitastor disk management.
It supports the following commands:
- [prepare](#prepare)
- [upgrade-simple](#upgrade-simple)
- [resize](#resize)
- [start/stop/restart/enable/disable](#start/stop/restart/enable/disable)
- [purge](#purge)
- [read-sb](#read-sb)
- [write-sb](#write-sb)
- [udev](#udev)
- [exec-osd](#exec-osd)
- [pre-exec](#pre-exec)
- Debugging:
- [dump-journal](#dump-journal)
- [write-journal](#write-journal)
- [dump-meta](#dump-meta)
- [write-meta](#write-meta)
- [simple-offsets](#simple-offsets)
## prepare
`vitastor-disk prepare [OPTIONS] [devices...]`
Initialize disk(s) for Vitastor OSD(s).
There are two modes of this command. In the first mode, you pass `<devices>` which
must be raw disks (not partitions). They are partitioned automatically and OSDs
are initialized on all of them.
In the second mode, you omit `<devices>` and pass `--data_device`, `--journal_device`
and/or `--meta_device` which must be already existing partitions identified by their
GPT partition UUIDs. In this case a single OSD is created.
Requires `vitastor-cli`, `wipefs`, `sfdisk` and `partprobe` (from parted) utilities.
Options (automatic mode):
```
--osd_per_disk <N>
Create <N> OSDs on each disk (default 1)
--hybrid
Prepare hybrid (HDD+SSD) OSDs using provided devices. SSDs will be used for
journals and metadata, HDDs will be used for data. Partitions for journals and
metadata will be created automatically. Whether disks are SSD or HDD is decided
by the `/sys/block/.../queue/rotational` flag. In hybrid mode, default object
size is 1 MB instead of 128 KB, default journal size is 1 GB instead of 32 MB,
and throttle_small_writes is enabled by default.
--disable_data_fsync auto
Disable data device cache and fsync (1/yes/true = on, default auto)
--disable_meta_fsync auto
Disable metadata/journal device cache and fsync (default auto)
--meta_reserve 2x,1G
New metadata partitions in --hybrid mode are created larger than actual
metadata size to ease possible future extension. The default is to allocate
2 times more space and at least 1G. Use this option to override.
--max_other 10%
Use disks for OSD data even if they already have non-Vitastor partitions,
but only if these take up no more than this percent of disk space.
```
Options (single-device mode):
```
--data_device <DEV> Use partition <DEV> for data
--meta_device <DEV> Use partition <DEV> for metadata (optional)
--journal_device <DEV> Use partition <DEV> for journal (optional)
--disable_data_fsync 0 Disable data device cache and fsync (default off)
--disable_meta_fsync 0 Disable metadata device cache and fsync (default off)
--disable_journal_fsync 0 Disable journal device cache and fsync (default off)
--force Bypass partition safety checks (for emptiness and so on)
```
Options (both modes):
```
--journal_size 1G/32M Set journal size (area or partition size)
--block_size 1M/128k Set blockstore object size
--bitmap_granularity 4k Set bitmap granularity
--data_device_block 4k Override data device block size
--meta_device_block 4k Override metadata device block size
--journal_device_block 4k Override journal device block size
```
[immediate_commit](../config/layout-cluster.en.md#immediate_commit) setting is
automatically derived from "disable fsync" options. It's set to "all" when fsync
is disabled on all devices, and to "small" if fsync is only disabled on journal device.
When data/meta/journal fsyncs are disabled, the OSD startup script automatically
checks the device cache status on start and tries to disable cache for SATA/SAS disks.
If it doesn't succeed it issues a warning in the system log.
You can also pass other OSD options here as arguments and they'll be persisted
to the superblock: max_write_iodepth, max_write_iodepth, min_flusher_count,
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
See [Runtime OSD Parameters](../config/osd.en.md) for details.
## upgrade-simple
`vitastor-disk upgrade-simple <UNIT_FILE|OSD_NUMBER>`
Upgrade an OSD created by old (0.7.1 and older) `make-osd.sh` or `make-osd-hybrid.js` scripts.
Adds superblocks to OSD devices, disables old `vitastor-osdN` unit and replaces it with `vitastor-osd@N`.
Can be invoked with an osd number of with a path to systemd service file `UNIT_FILE` which
must be `/etc/systemd/system/vitastor-osd<OSD_NUMBER>.service`.
Note that the procedure isn't atomic and may ruin OSD data in case of an interrupt,
so don't upgrade all your OSDs in parallel.
Requires the `sfdisk` utility.
## resize
`vitastor-disk resize <ALL_OSD_PARAMETERS> <NEW_LAYOUT> [--iodepth 32]`
Resize data area and/or rewrite/move journal and metadata.
`ALL_OSD_PARAMETERS` must include all (at least all disk-related)
parameters from OSD command line (i.e. from systemd unit or superblock).
`NEW_LAYOUT` may include new disk layout parameters:
```
--new_data_offset SIZE resize data area so it starts at SIZE
--new_data_len SIZE resize data area to SIZE bytes
--new_meta_device PATH use PATH for new metadata
--new_meta_offset SIZE make new metadata area start at SIZE
--new_meta_len SIZE make new metadata area SIZE bytes long
--new_journal_device PATH use PATH for new journal
--new_journal_offset SIZE make new journal area start at SIZE
--new_journal_len SIZE make new journal area SIZE bytes long
```
SIZE may include k/m/g/t suffixes. If any of the new layout parameter
options are not specified, old values will be used.
## start/stop/restart/enable/disable
`vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]`
Manipulate Vitastor OSDs using systemd by their device paths.
Commands are passed to `systemctl` with `vitastor-osd@<num>` units as arguments.
When `--now` is added to enable/disable, OSDs are also immediately started/stopped.
## purge
`vitastor-disk purge [--force] [--allow-data-loss] <device> [device2 device3 ...]`
Purge Vitastor OSD(s) on specified device(s). Uses `vitastor-cli rm-osd` to check
if deletion is possible without data loss and to actually remove metadata from etcd.
`--force` and `--allow-data-loss` options may be used to ignore safety check results.
Requires `vitastor-cli`, `sfdisk` and `partprobe` (from parted) utilities.
## read-sb
`vitastor-disk read-sb [--force] <device>`
Try to read Vitastor OSD superblock from `<device>` and print it in JSON format.
`--force` allows to ignore validation errors.
## write-sb
`vitastor-disk write-sb <device>`
Read JSON from STDIN and write it into Vitastor OSD superblock on `<device>`.
## udev
`vitastor-disk udev <device>`
Try to read Vitastor OSD superblock from `<device>` and print variables for udev.
## exec-osd
`vitastor-disk exec-osd <device>`
Read Vitastor OSD superblock from `<device>` and start the OSD with parameters from it.
Intended for use from startup scripts (i.e. from systemd units).
## pre-exec
`vitastor-disk pre-exec <device>`
Read Vitastor OSD superblock from `<device>` and perform pre-start checks for the OSD.
For now, this only checks that device cache is in write-through mode if fsync is disabled.
Intended for use from startup scripts (i.e. from systemd units).
## dump-journal
`vitastor-disk dump-journal [OPTIONS] <journal_file> <journal_block_size> <offset> <size>`
Dump journal in human-readable or JSON (if `--json` is specified) format.
Options:
```
--all Scan the whole journal area for entries and dump them, even outdated ones
--json Dump journal in JSON format
--format entries (Default) Dump actual journal entries as an array, without data
--format data Same as "entries", but also include small write data
--format blocks Dump as an array of journal blocks each containing array of entries
```
## write-journal
`vitastor-disk write-journal <journal_file> <journal_block_size> <bitmap_size> <offset> <size>`
Write journal from JSON taken from standard input in the same format as produced by
`dump-journal --json --format data`.
## dump-meta
`vitastor-disk dump-meta <meta_file> <meta_block_size> <offset> <size>`
Dump metadata in JSON format.
## write-meta
`vitastor-disk write-meta <meta_file> <offset> <size>`
Write metadata from JSON taken from standard input in the same format as produced by `dump-meta`.
## simple-offsets
`vitastor-disk simple-offsets <device>`
Calculate offsets for old simple&stupid (no superblock) OSD deployment.
Options (see also [Cluster-Wide Disk Layout Parameters](../config/layout-cluster.en.md)):
```
--object_size 128k Set blockstore block size
--bitmap_granularity 4k Set bitmap granularity
--journal_size 32M Set journal size
--device_block_size 4k Set device block size
--journal_offset 0 Set journal offset
--device_size 0 Set device size
--format text Result format: json, options, env, or text
```

View File

@@ -1,262 +0,0 @@
[Документация](../../README-ru.md#документация) → Использование → Управление дисками
-----
[Read in English](disk.en.md)
# Инструмент управления дисками
vitastor-disk - инструмент командной строки для управления дисками Vitastor OSD.
Поддерживаются следующие команды:
- [prepare](#prepare)
- [upgrade-simple](#upgrade-simple)
- [resize](#resize)
- [start/stop/restart/enable/disable](#start/stop/restart/enable/disable)
- [purge](#purge)
- [read-sb](#read-sb)
- [write-sb](#write-sb)
- [udev](#udev)
- [exec-osd](#exec-osd)
- [pre-exec](#pre-exec)
- Для отладки:
- [dump-journal](#dump-journal)
- [write-journal](#write-journal)
- [dump-meta](#dump-meta)
- [write-meta](#write-meta)
- [simple-offsets](#simple-offsets)
## prepare
`vitastor-disk prepare [OPTIONS] [devices...]`
Подготовить диск(и) для OSD Vitastor.
У команды есть 2 режима. В первом режиме вы указываете список устройств `<devices>`,
которые должны быть целыми дисками (не разделами). На них автоматически создаются
разделы и инициализируются OSD.
Во втором режиме вместо списка устройств вы указываете пути к отдельным устройствам
`--data_device`, `--journal_device` и/или `--meta_device`, которые должны быть
уже существующими GPT-разделами. В этом случае инициализируется ровно один OSD.
Команде требуются утилиты `vitastor-cli`, `wipefs`, `sfdisk` и `partprobe` (из состава parted).
Опции для автоматического режима:
```
--osd_per_disk <N>
Создавать по несколько (<N>) OSD на каждом диске (по умолчанию 1)
--hybrid
Инициализировать гибридные (HDD+SSD) OSD на указанных дисках. SSD будут
использованы для журналов и метаданных, а HDD - для данных. Разделы для журналов
и метаданных будут созданы автоматически. Является ли диск SSD или HDD, определяется
по флагу `/sys/block/.../queue/rotational`. В гибридном режиме по умолчанию
используется размер объекта 1 МБ вместо 128 КБ, размер журнала 1 ГБ вместо 32 МБ
и включённый throttle_small_writes.
--disable_data_fsync auto
Отключать кэш и fsync-и для устройств данных. (1/yes/true = да, по умолчанию автоопределение)
--disable_meta_fsync auto
Отключать кэш и fsync-и для журналов и метаданных (по умолчанию автоопределение)
--meta_reserve 2x,1G
В гибридном режиме для метаданных выделяется больше места, чем нужно на самом
деле, чтобы оставить запас под будущее расширение. По умолчанию выделяется
в 2 раза больше места, и не менее 1 ГБ. Чтобы изменить это поведение,
воспользуйтесь данной опцией.
--max_other 10%
Использовать диски под данные OSD, даже если на них уже есть не-Vitastor-овые
разделы, но только в случае, если они занимают не более данного процента диска.
```
Опции для режима одного OSD:
```
--data_device <DEV> Использовать раздел <DEV> для данных
--meta_device <DEV> Использовать раздел <DEV> для метаданных (опционально)
--journal_device <DEV> Использовать раздел <DEV> для журнала (опционально)
--disable_data_fsync 0 Отключить кэш и fsync устройства данных (по умолчанию нет)
--disable_meta_fsync 0 Отключить кэш и fsync метаданных (по умолчанию нет)
--disable_journal_fsync 0 Отключить кэш и fsync журнала (по умолчанию нет)
--force Пропустить проверки разделов (на пустоту и т.п.)
```
Опции для обоих режимов:
```
--journal_size 1G/32M Задать размер журнала (области или раздела журнала)
--block_size 1M/128k Задать размер объекта хранилища
--bitmap_granularity 4k Задать гранулярность битовых карт
--data_device_block 4k Задать размер блока устройства данных
--meta_device_block 4k Задать размер блока метаданных
--journal_device_block 4k Задать размер блока журнала
```
Настройка [immediate_commit](../config/layout-cluster.ru.md#immediate_commit)
автоматически выводится из опций отключения кэша - она устанавливается в "all", если кэш
отключён на всех устройствах, и в "small", если он отключён только на устройстве журнала.
Когда fsync данных/метаданных/журнала отключён, скрипты запуска OSD автоматически
проверяют состояние кэша диска и стараются его отключить для SATA/SAS дисков. Если
это не удаётся, в системный журнал выводится предупреждение.
Вы можете передать данной команде и некоторые другие опции OSD в качестве аргументов
и они тоже будут сохранены в суперблок: max_write_iodepth, max_write_iodepth, min_flusher_count,
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
Читайте об этих параметрах подробнее в разделе [Изменяемые параметры OSD](../config/osd.ru.md).
## upgrade-simple
`vitastor-disk upgrade-simple <UNIT_FILE|OSD_NUMBER>`
Обновить OSD, созданный старыми (0.7.1 и старее) скриптами `make-osd.sh` и `make-osd-hybrid.js`.
Добавляет суперблок на разделы OSD, отключает старый сервис `vitastor-osdN` и заменяет его на `vitastor-osd@N`.
Можно вызывать, указывая либо номер OSD, либо путь к файлу сервиса `UNIT_FILE`, но он обязан
иметь вид `/etc/systemd/system/vitastor-osd<OSD_NUMBER>.service`.
Имейте в виду, что процедура обновления не атомарна и при прерывании может уничтожить данные OSD,
так что обновляйте ваши OSD по очереди.
Команде требуется утилита `sfdisk`.
## resize
`vitastor-disk resize <ALL_OSD_PARAMETERS> <NEW_LAYOUT> [--iodepth 32]`
Изменить размер области данных и/или переместить журнал и метаданные.
В `ALL_OSD_PARAMETERS` нужно указать все относящиеся к диску параметры OSD
из суперблока OSD или из файла сервиса systemd (в старых версиях).
В `NEW_LAYOUT` нужно указать новые параметры расположения данных:
```
--new_data_offset РАЗМЕР сдвинуть начало области данных на РАЗМЕР байт
--new_data_len РАЗМЕР изменить размер области данных до РАЗМЕР байт
--new_meta_device ПУТЬ использовать ПУТЬ как новое устройство метаданных
--new_meta_offset РАЗМЕР разместить новые метаданные по смещению РАЗМЕР байт
--new_meta_len РАЗМЕР сделать новые метаданные размером РАЗМЕР байт
--new_journal_device ПУТЬ использовать ПУТЬ как новое устройство журнала
--new_journal_offset РАЗМЕР разместить новый журнал по смещению РАЗМЕР байт
--new_journal_len РАЗМЕР сделать новый журнал размером РАЗМЕР байт
```
РАЗМЕР может быть указан с суффиксами k/m/g/t. Если любой из новых параметров
расположения не указан, он принимается равным старому значению.
## start/stop/restart/enable/disable
`vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]`
Команды управления OSD по путям дисков через systemd.
Команды транслируются `systemctl` с сервисами `vitastor-osd@<num>` в виде аргументов.
Когда к командам включения/выключения добавляется параметр `--now`, OSD также сразу
запускаются/останавливаются.
## purge
`vitastor-disk purge [--force] [--allow-data-loss] <device> [device2 device3 ...]`
Удалить OSD на заданном диске/дисках. Использует `vitastor-cli rm-osd` для проверки
возможности удаления без потери данных и для удаления OSD из etcd. Опции `--force`
и `--allow-data-loss` служат для обхода данной защиты в случае необходимости.
Команде требуются утилиты `vitastor-cli`, `sfdisk` и `partprobe` (из состава parted).
## read-sb
`vitastor-disk read-sb [--force] <device>`
Прочитать суперблок OSD с диска `<device>` и вывести его в формате JSON.
Опция `--force` позволяет читать суперблок, даже если он считается некорректным
из-за ошибок валидации.
## write-sb
`vitastor-disk write-sb <device>`
Прочитать JSON со стандартного ввода и записать его в суперблок OSD на диск `<device>`.
## udev
`vitastor-disk udev <device>`
Прочитать суперблок OSD с диска `<device>` и вывести переменные для udev.
## exec-osd
`vitastor-disk exec-osd <device>`
Прочитать суперблок OSD с диска `<device>` и запустить исполняемый файл OSD с параметрами оттуда.
Команда предназначена для использования из скриптов запуска (например, из сервисов systemd).
## pre-exec
`vitastor-disk pre-exec <device>`
Прочитать суперблок OSD с диска `<device>` и провести проверки OSD перед запуском.
На данный момент только отключает кэш диска или проверяет, что он отключён, если в параметрах
OSD отключены fsync-и.
Команда предназначена для использования из скриптов запуска (например, из сервисов systemd).
## dump-journal
`vitastor-disk dump-journal [OPTIONS] <journal_file> <journal_block_size> <offset> <size>`
Вывести журнал в человекочитаемом или в JSON (с опцией `--json`) виде.
Опции:
```
--all Просканировать всю область журнала и вывести даже старые записи
--json Вывести журнал в формате JSON
--format entries (По умолчанию) Вывести только актуальные записи журнала без данных
--format data Вывести только актуальные записи журнала с данными
--format blocks Вывести массив блоков журнала, а в каждом массив актуальных записей без данных
```
## write-journal
`vitastor-disk write-journal <journal_file> <journal_block_size> <bitmap_size> <offset> <size>`
Записать журнал из JSON со стандартного ввода в формате, аналогичном `dump-journal --json --format data`.
## dump-meta
`vitastor-disk dump-meta <meta_file> <meta_block_size> <offset> <size>`
Вывести метаданные в формате JSON.
## write-meta
`vitastor-disk write-meta <meta_file> <offset> <size>`
Записать метаданные из JSON со стандартного ввода в формате, аналогичном `dump-meta`.
## simple-offsets
`vitastor-disk simple-offsets <device>`
Рассчитать смещения для старого ("простого и тупого") создания OSD на диске (без суперблока).
Опции (см. также [Дисковые параметры уровня кластера](../config/layout-cluster.ru.md)):
```
--object_size 128k Размер блока хранилища
--bitmap_granularity 4k Гранулярность битовых карт
--journal_size 32M Размер журнала
--device_block_size 4k Размер блока устройства
--journal_offset 0 Смещение журнала
--device_size 0 Размер устройства
--format text Формат результата: json, options, env или text
```

View File

@@ -1,7 +0,0 @@
SUBSYSTEM=="block", ENV{ID_PART_ENTRY_TYPE}=="e7009fac-a5a1-4d72-af72-53de13059903", \
OWNER="vitastor", GROUP="vitastor", \
IMPORT{program}="/usr/bin/vitastor-disk udev $devnode", \
SYMLINK+="vitastor/$env{VITASTOR_ALIAS}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="add", RUN{program}+="/usr/bin/systemctl enable --now vitastor-osd@$env{VITASTOR_OSD_NUM}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="remove", RUN{program}+="/usr/bin/systemctl disable --now vitastor-osd@$env{VITASTOR_OSD_NUM}"

View File

@@ -21,7 +21,7 @@ function add_pg_history(new_pg_history, new_pg, prev_pgs, prev_pg_history, old_p
{
for (const pg of oh.osd_sets)
{
nh.osd_sets[pg.join(' ')] = pg.map(osd_num => Number(osd_num));
nh.osd_sets[pg.join(' ')] = pg;
}
}
if (oh && oh.all_peers && oh.all_peers.length)

View File

@@ -1,110 +0,0 @@
#!/usr/bin/node
// Simple systemd unit generator for etcd
// Copyright (c) Vitaliy Filippov, 2019+
// License: MIT
// USAGE:
// 1) Put the same etcd_address into /etc/vitastor/vitastor.conf on all monitor nodes
// 2) Run ./make-etcd.js. It will create the etcd service on one of specified IPs
const child_process = require('child_process');
const fs = require('fs');
const os = require('os');
run().catch(e => { console.error(e); process.exit(1); });
async function run()
{
const config_path = process.argv[2] || '/etc/vitastor/vitastor.conf';
if (config_path == '-h' || config_path == '--help')
{
console.log(
'Initialize systemd etcd service for Vitastor\n'+
'(c) Vitaliy Filippov, 2019+ (MIT)\n'+
'\n'+
'USAGE:\n'+
'1) Put the same etcd_address into /etc/vitastor/vitastor.conf on all monitor nodes\n'+
'2) Run '+process.argv[1]+' [config_path]\n'
);
process.exit(0);
}
if (!fs.existsSync(config_path))
{
console.log(config_path+' is missing');
process.exit(1);
}
if (fs.existsSync("/etc/systemd/system/etcd.service"))
{
console.log("/etc/systemd/system/etcd.service already exists");
process.exit(1);
}
const config = JSON.parse(fs.readFileSync(config_path, { encoding: 'utf-8' }));
if (!config.etcd_address)
{
console.log("etcd_address is missing in "+config_path);
process.exit(1);
}
const etcds = (config.etcd_address instanceof Array ? config.etcd_address : (''+config.etcd_address).split(/,/))
.map(s => (''+s).replace(/^https?:\/\/\[?|\]?(:\d+)?(\/.*)?$/g, '').toLowerCase());
const num = select_local_etcd(etcds);
if (num < 0)
{
console.log('No matching IPs in etcd_address from '+config_path);
process.exit(0);
}
const etcd_cluster = etcds.map((e, i) => `etcd${i}=http://${e}:2380`).join(',');
await system(`mkdir -p /var/lib/etcd${num}.etcd`);
fs.writeFileSync(
"/etc/systemd/system/etcd.service",
`[Unit]
Description=etcd for vitastor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=/usr/local/bin/etcd -name etcd${num} --data-dir /var/lib/etcd${num}.etcd \\
--advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
--initial-advertise-peer-urls http://${etcds[num]}:2380 --listen-peer-urls http://${etcds[num]}:2380 \\
--initial-cluster-token vitastor-etcd-1 --initial-cluster ${etcd_cluster} \\
--initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\
--auto-compaction-retention=10 --auto-compaction-mode=revision
WorkingDirectory=/var/lib/etcd${num}.etcd
ExecStartPre=+chown -R etcd /var/lib/etcd${num}.etcd
User=etcd
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=local.target
`);
await system(`useradd etcd`);
await system(`systemctl daemon-reload`);
await system(`systemctl enable etcd`);
await system(`systemctl start etcd`);
process.exit(0);
}
function select_local_etcd(etcds)
{
const ifaces = os.networkInterfaces();
for (const ifname in ifaces)
for (const iface of ifaces[ifname])
for (let i = 0; i < etcds.length; i++)
if (etcds[i] == iface.address.toLowerCase())
return i;
return -1;
}
async function system(cmd)
{
const cp = child_process.spawn(cmd, { shell: true, stdio: [ 0, 1, 2 ] });
let finish_cb;
cp.on('exit', () => finish_cb && finish_cb());
if (cp.exitCode == null)
await new Promise(ok => finish_cb = ok);
return cp.exitCode;
}

414
mon/make-osd-hybrid.js Executable file
View File

@@ -0,0 +1,414 @@
#!/usr/bin/nodejs
// systemd unit generator for hybrid (HDD+SSD) vitastor OSDs
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1
// USAGE: nodejs make-osd-hybrid.js [--disable_ssd_cache 0] [--disable_hdd_cache 0] /dev/sda /dev/sdb /dev/sdc /dev/sdd ...
// I.e. - just pass all HDDs and SSDs mixed, the script will decide where
// to put journals on its own
const fs = require('fs');
const fsp = fs.promises;
const child_process = require('child_process');
const options = {
debug: 1,
journal_size: 1024*1024*1024,
min_meta_size: 1024*1024*1024,
object_size: 1024*1024,
bitmap_granularity: 4096,
device_block_size: 4096,
disable_ssd_cache: 1,
disable_hdd_cache: 1,
};
run().catch(console.fatal);
async function run()
{
const device_list = parse_options();
await system_or_die("mkdir -p /var/log/vitastor; chown vitastor /var/log/vitastor");
// Collect devices
const all_devices = await collect_devices(device_list);
const ssds = all_devices.filter(d => d.ssd);
const hdds = all_devices.filter(d => !d.ssd);
// Collect existing OSD units
const osd_units = await collect_osd_units();
// Count assigned HDD journals and unallocated space for each SSD
await check_journal_count(ssds, osd_units);
// Create new OSDs
await create_new_hybrid_osds(hdds, ssds, osd_units);
process.exit(0);
}
function parse_options()
{
const devices = [];
const opt = {};
for (let i = 2; i < process.argv.length; i++)
{
const arg = process.argv[i];
if (arg == '--help' || arg == '-h')
{
opt.help = true;
break;
}
else if (arg.substr(0, 2) == '--')
opt[arg.substr(2)] = process.argv[++i];
else
devices.push(arg);
}
if (opt.help || !devices.length)
{
console.log(
'Prepare hybrid (HDD+SSD) Vitastor OSDs\n'+
'(c) Vitaliy Filippov, 2019+, license: VNPL-1.1\n\n'+
'USAGE: nodejs make-osd-hybrid.js [OPTIONS] /dev/sda /dev/sdb /dev/sdc ...\n'+
'Just pass all your SSDs and HDDs in any order, the script will distribute OSDs for you.\n\n'+
'OPTIONS (with defaults):\n'+
Object.keys(options).map(k => ` --${k} ${options[k]}`).join('\n')
);
process.exit(0);
}
for (const k in opt)
options[k] = opt[k];
return devices;
}
// Collect devices
async function collect_devices(devices_to_check)
{
const devices = [];
for (const dev of devices_to_check)
{
if (dev.substr(0, 5) != '/dev/')
{
console.log(`${dev} does not start with /dev/, skipping`);
continue;
}
if (!await file_exists('/sys/block/'+dev.substr(5)))
{
console.log(`${dev} is a partition, skipping`);
continue;
}
// Check if the device is an SSD
const rot = '/sys/block/'+dev.substr(5)+'/queue/rotational';
if (!await file_exists(rot))
{
console.log(`${dev} does not have ${rot} to check whether it's an SSD, skipping`);
continue;
}
const ssd = !parseInt(await fsp.readFile(rot, { encoding: 'utf-8' }));
// Check if the device has partition table
let [ has_partition_table, parts ] = await system(`sfdisk --dump ${dev} --json`);
if (has_partition_table != 0)
{
// Check if the device has any data
const [ has_data, out ] = await system(`blkid ${dev}`);
if (has_data == 0)
{
console.log(`${dev} contains data, skipping:\n ${out.trim().replace(/\n/g, '\n ')}`);
continue;
}
}
parts = parts ? JSON.parse(parts).partitiontable : null;
if (parts && parts.label != 'gpt')
{
console.log(`${dev} contains "${parts.label}" partition table, only GPT is supported, skipping`);
continue;
}
devices.push({
path: dev,
ssd,
parts,
});
}
return devices;
}
// Collect existing OSD units
async function collect_osd_units()
{
const units = [];
for (const unit of (await system("ls /etc/systemd/system/vitastor-osd*.service"))[1].trim().split('\n'))
{
if (!unit)
{
continue;
}
let cmd = /^ExecStart\s*=\s*(([^\n]*\\\n)*[^\n]*)/.exec(await fsp.readFile(unit, { encoding: 'utf-8' }));
if (!cmd)
{
console.log('ExecStart= not found in '+unit+', skipping')
continue;
}
let kv = {}, key;
cmd = cmd[1].replace(/^bash\s+-c\s+'/, '')
.replace(/>>\s*\S+2>\s*&1\s*'$/, '')
.replace(/\s*\\\n\s*/g, ' ')
.replace(/([^\s']+)|'([^']+)'/g, (m, m1, m2) =>
{
m1 = m1||m2;
if (key == null)
{
if (m1.substr(0, 2) != '--')
{
console.log('Strange command line in '+unit+', stopping');
process.exit(1);
}
key = m1.substr(2);
}
else
{
kv[key] = m1;
key = null;
}
});
units.push(kv);
}
return units;
}
// Count assigned HDD journals and unallocated space for each SSD
async function check_journal_count(ssds, osd_units)
{
const units_by_journal = osd_units.reduce((a, c) =>
{
if (c.journal_device)
a[c.journal_device] = c;
return a;
}, {});
for (const dev of ssds)
{
dev.journals = 0;
if (dev.parts)
{
for (const part of dev.parts.partitions)
{
if (part.uuid && units_by_journal['/dev/disk/by-partuuid/'+part.uuid.toLowerCase()])
{
dev.journals++;
}
}
dev.free = free_from_parttable(dev.parts);
}
else
{
dev.free = parseInt(await system_or_die("blockdev --getsize64 "+dev.path));
}
}
}
async function create_new_hybrid_osds(hdds, ssds, osd_units)
{
const units_by_disk = osd_units.reduce((a, c) => { a[c.data_device] = c; return a; }, {});
for (const dev of hdds)
{
if (!dev.parts)
{
// HDD is not partitioned yet, create a single partition
// + is the "default value" for sfdisk
await system_or_die('sfdisk '+dev.path, 'label: gpt\n\n+ +\n');
dev.parts = JSON.parse(await system_or_die('sfdisk --dump '+dev.path+' --json')).partitiontable;
}
if (dev.parts.partitions.length != 1)
{
console.log(dev.path+' has more than 1 partition, skipping');
}
else if ((dev.parts.partitions[0].start + dev.parts.partitions[0].size) != (1 + dev.parts.lastlba))
{
console.log(dev.path+'1 is not a whole-disk partition, skipping');
}
else if (!dev.parts.partitions[0].uuid)
{
console.log(dev.parts.partitions[0].node+' does not have UUID. Please repartition '+dev.path+' with GPT');
}
else if (!units_by_disk['/dev/disk/by-partuuid/'+dev.parts.partitions[0].uuid.toLowerCase()])
{
await create_hybrid_osd(dev, ssds);
}
}
}
async function create_hybrid_osd(dev, ssds)
{
// Create a new OSD
// Calculate metadata size
const data_device = '/dev/disk/by-partuuid/'+dev.parts.partitions[0].uuid.toLowerCase();
const data_size = dev.parts.partitions[0].size * dev.parts.sectorsize;
const meta_entry_size = 24 + 2*options.object_size/options.bitmap_granularity/8;
const entries_per_block = Math.floor(options.device_block_size / meta_entry_size);
const object_count = Math.floor(data_size / options.object_size);
let meta_size = Math.ceil(1 + object_count / entries_per_block) * options.device_block_size;
// Leave some extra space for future metadata formats and round metadata area size to multiples of 1 MB
meta_size = 2*meta_size;
meta_size = Math.ceil(meta_size/1024/1024) * 1024*1024;
if (meta_size < options.min_meta_size)
meta_size = options.min_meta_size;
let journal_size = Math.ceil(options.journal_size/1024/1024) * 1024*1024;
// Pick an SSD for journal, balancing the number of journals across SSDs
let selected_ssd;
for (const ssd of ssds)
if (ssd.free >= (meta_size+journal_size) && (!selected_ssd || selected_ssd.journals > ssd.journals))
selected_ssd = ssd;
if (!selected_ssd)
{
console.error('Could not find free space for SSD journal and metadata for '+dev.path);
process.exit(1);
}
// Allocate an OSD number
const osd_num = (await system_or_die("vitastor-cli alloc-osd")).trim();
if (!osd_num)
{
console.error('Failed to run vitastor-cli alloc-osd');
process.exit(1);
}
console.log('Creating OSD '+osd_num+' on '+dev.path+' (HDD) with journal and metadata on '+selected_ssd.path+' (SSD)');
// Add two partitions: journal and metadata
const new_parts = await add_partitions(selected_ssd, [ journal_size, meta_size ]);
selected_ssd.journals++;
const journal_device = '/dev/disk/by-partuuid/'+new_parts[0].uuid.toLowerCase();
const meta_device = '/dev/disk/by-partuuid/'+new_parts[1].uuid.toLowerCase();
// Wait until the device symlinks appear
while (!await file_exists(journal_device))
{
await new Promise(ok => setTimeout(ok, 100));
}
while (!await file_exists(meta_device))
{
await new Promise(ok => setTimeout(ok, 100));
}
// Zero out metadata and journal
await system_or_die("dd if=/dev/zero of="+journal_device+" bs=1M count="+(journal_size/1024/1024)+" oflag=direct");
await system_or_die("dd if=/dev/zero of="+meta_device+" bs=1M count="+(meta_size/1024/1024)+" oflag=direct");
// Create unit file for the OSD
const has_scsi_cache_type = options.disable_ssd_cache &&
(await system("ls /sys/block/"+selected_ssd.path.substr(5)+"/device/scsi_disk/*/cache_type"))[0] == 0;
const write_through = options.disable_ssd_cache && (
has_scsi_cache_type || selected_ssd.path.substr(5, 4) == 'nvme'
&& (await system_or_die("/sys/block/"+selected_ssd.path.substr(5)+"/queue/write_cache")).trim() == "write through");
await fsp.writeFile('/etc/systemd/system/vitastor-osd'+osd_num+'.service',
`[Unit]
Description=Vitastor object storage daemon osd.${osd_num}
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
PartOf=vitastor.target
[Service]
LimitNOFILE=1048576
LimitNPROC=1048576
LimitMEMLOCK=infinity
ExecStart=bash -c '/usr/bin/vitastor-osd \\
--osd_num ${osd_num} ${write_through
? "--disable_meta_fsync 1 --disable_journal_fsync 1 --immediate_commit "+(options.disable_hdd_cache ? "all" : "small")
: ""} \\
--throttle_small_writes 1 \\
--disk_alignment ${options.device_block_size} \\
--journal_block_size ${options.device_block_size} \\
--meta_block_size ${options.device_block_size} \\
--journal_no_same_sector_overwrites true \\
--journal_sector_buffer_count 1024 \\
--block_size ${options.object_size} \\
--data_device ${data_device} \\
--journal_device ${journal_device} \\
--meta_device ${meta_device} >>/var/log/vitastor/osd${osd_num}.log 2>&1'
WorkingDirectory=/
ExecStartPre=+chown vitastor:vitastor ${data_device}
ExecStartPre=+chown vitastor:vitastor ${journal_device}
ExecStartPre=+chown vitastor:vitastor ${meta_device}${
has_scsi_cache_type
? "\nExecStartPre=+bash -c 'D=$$$(readlink "+journal_device+"); echo write through > $$$(dirname /sys/block/*/$$\${D##*/})/device/scsi_disk/*/cache_type'"
: ""}${
options.disable_hdd_cache
? "\nExecStartPre=+bash -c 'D=$$$(readlink "+data_device+"); echo write through > $$$(dirname /sys/block/*/$$\${D##*/})/device/scsi_disk/*/cache_type'"
: ""}
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target
`);
await system_or_die("systemctl enable vitastor-osd"+osd_num);
}
async function add_partitions(dev, sizes)
{
let script = 'label: gpt\n\n';
if (dev.parts)
{
// Old partitions
for (const part of dev.parts.partitions)
{
script += part.node+': '+Object.keys(part).map(k => k == 'node' ? '' : k+'='+part[k]).filter(k => k).join(', ')+'\n';
}
}
// New partitions
for (const size of sizes)
{
script += '+ '+Math.ceil(size/1024)+'KiB\n';
}
await system_or_die('sfdisk '+dev.path, script);
// Get new partition table and find the new partition
const newpt = JSON.parse(await system_or_die('sfdisk --dump '+dev.path+' --json')).partitiontable;
const old_nodes = dev.parts ? dev.parts.partitions.reduce((a, c) => { a[c.uuid] = true; return a; }, {}) : {};
const new_nodes = newpt.partitions.filter(part => !old_nodes[part.uuid]);
if (new_nodes.length != sizes.length)
{
console.error('Failed to partition '+dev.path+': new partitions not found in table');
process.exit(1);
}
dev.parts = newpt;
dev.free = free_from_parttable(newpt);
return new_nodes;
}
function free_from_parttable(pt)
{
let free = pt.lastlba + 1 - pt.firstlba;
for (const part of pt.partitions)
{
free -= part.size;
}
free *= pt.sectorsize;
return free;
}
async function system_or_die(cmd, input = '')
{
let [ exitcode, stdout, stderr ] = await system(cmd, input);
if (exitcode != 0)
{
console.error(cmd+' failed: '+stderr);
process.exit(1);
}
return stdout;
}
async function system(cmd, input = '')
{
if (options.debug)
{
process.stderr.write('+ '+cmd+(input ? " <<EOF\n"+input.replace(/\s*$/, '\n')+"EOF" : '')+'\n');
}
const cp = child_process.spawn(cmd, { shell: true });
let stdout = '', stderr = '', finish_cb;
cp.stdout.on('data', buf => stdout += buf.toString());
cp.stderr.on('data', buf => stderr += buf.toString());
cp.on('exit', () => finish_cb && finish_cb());
cp.stdin.write(input);
cp.stdin.end();
if (cp.exitCode == null)
{
await new Promise(ok => finish_cb = ok);
}
return [ cp.exitCode, stdout, stderr ];
}
async function file_exists(filename)
{
return new Promise((ok, no) => fs.access(filename, fs.constants.R_OK, err => ok(!err)));
}

66
mon/make-osd.sh Executable file
View File

@@ -0,0 +1,66 @@
#!/bin/bash
# Very simple systemd unit generator for vitastor-osd services
# Not the final solution yet, mostly for tests
# Copyright (c) Vitaliy Filippov, 2019+
# License: MIT
# USAGE:
# 1) Put etcd_address and osd_network into /etc/vitastor/vitastor.conf. Example:
# {
# "etcd_address":["http://10.200.1.10:2379/v3","http://10.200.1.11:2379/v3","http://10.200.1.12:2379/v3"],
# "osd_network":"10.200.1.0/24"
# }
# 2) Run ./make-osd.sh /dev/disk/by-partuuid/xxx [ /dev/disk/by-partuuid/yyy]...
set -e -x
# Create OSDs on all passed devices
for DEV in $*; do
OSD_NUM=$(vitastor-cli alloc-osd)
echo Creating OSD $OSD_NUM on $DEV
OPT=$(vitastor-cli simple-offsets --format options $DEV | tr '\n' ' ')
META=$(vitastor-cli simple-offsets --format json $DEV | jq .data_offset)
dd if=/dev/zero of=$DEV bs=1048576 count=$(((META+1048575)/1048576)) oflag=direct
mkdir -p /var/log/vitastor
id vitastor &>/dev/null || useradd vitastor
chown vitastor /var/log/vitastor
cat >/etc/systemd/system/vitastor-osd$OSD_NUM.service <<EOF
[Unit]
Description=Vitastor object storage daemon osd.$OSD_NUM
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
PartOf=vitastor.target
[Service]
LimitNOFILE=1048576
LimitNPROC=1048576
LimitMEMLOCK=infinity
ExecStart=bash -c '/usr/bin/vitastor-osd \\
--osd_num $OSD_NUM \\
--disable_data_fsync 1 \\
--immediate_commit all \\
--disk_alignment 4096 --journal_block_size 4096 --meta_block_size 4096 \\
--journal_no_same_sector_overwrites true \\
--journal_sector_buffer_count 1024 \\
$OPT >>/var/log/vitastor/osd$OSD_NUM.log 2>&1'
WorkingDirectory=/
ExecStartPre=+chown vitastor:vitastor $DEV
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target
EOF
systemctl enable vitastor-osd$OSD_NUM
done

86
mon/make-units.sh Executable file
View File

@@ -0,0 +1,86 @@
#!/bin/bash
# Very simple systemd unit generator for etcd & vitastor-mon services
# Not the final solution yet, mostly for tests
# Copyright (c) Vitaliy Filippov, 2019+
# License: MIT
# USAGE: ./make-units.sh
IP_SUBSTR="10.200.1."
ETCD_HOSTS="etcd0=http://10.200.1.10:2380,etcd1=http://10.200.1.11:2380,etcd2=http://10.200.1.12:2380"
# determine IP
IP=`ip -json a s | jq -r '.[].addr_info[] | select(.local | startswith("'$IP_SUBSTR'")) | .local'`
[ "$IP" != "" ] || exit 1
ETCD_NUM=${ETCD_HOSTS/$IP*/}
[ "$ETCD_NUM" != "$ETCD_HOSTS" ] || exit 1
ETCD_NUM=$(echo $ETCD_NUM | tr -d -c , | wc -c)
# etcd
useradd etcd
mkdir -p /var/lib/etcd$ETCD_NUM.etcd
cat >/etc/systemd/system/etcd.service <<EOF
[Unit]
Description=etcd for vitastor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=/usr/local/bin/etcd -name etcd$ETCD_NUM --data-dir /var/lib/etcd$ETCD_NUM.etcd \\
--advertise-client-urls http://$IP:2379 --listen-client-urls http://$IP:2379 \\
--initial-advertise-peer-urls http://$IP:2380 --listen-peer-urls http://$IP:2380 \\
--initial-cluster-token vitastor-etcd-1 --initial-cluster $ETCD_HOSTS \\
--initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\
--auto-compaction-retention=10 --auto-compaction-mode=revision
WorkingDirectory=/var/lib/etcd$ETCD_NUM.etcd
ExecStartPre=+chown -R etcd /var/lib/etcd$ETCD_NUM.etcd
User=etcd
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=local.target
EOF
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
useradd vitastor
chmod 755 /root
# Vitastor target
cat >/etc/systemd/system/vitastor.target <<EOF
[Unit]
Description=vitastor target
[Install]
WantedBy=multi-user.target
EOF
# Monitor unit
ETCD_MON=$(echo $ETCD_HOSTS | perl -pe 's/:2380/:2379/g; s/etcd\d*=//g;')
cat >/etc/systemd/system/vitastor-mon.service <<EOF
[Unit]
Description=Vitastor monitor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=node /usr/lib/vitastor/mon/mon-main.js --etcd_url '$ETCD_MON' --etcd_prefix '/vitastor' --etcd_start_timeout 5
WorkingDirectory=/
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target
EOF

View File

@@ -12,7 +12,7 @@ for (let i = 2; i < process.argv.length; i++)
if (process.argv[i] === '-h' || process.argv[i] === '--help')
{
console.error('USAGE: '+process.argv[0]+' '+process.argv[1]+' [--verbose 1]'+
' [--etcd_address "http://127.0.0.1:2379,..."] [--config_path /etc/vitastor/vitastor.conf]'+
' [--etcd_address "http://127.0.0.1:2379,..."] [--config_file /etc/vitastor/vitastor.conf]'+
' [--etcd_prefix "/vitastor"] [--etcd_start_timeout 5]');
process.exit();
}

View File

@@ -157,12 +157,7 @@ const etcd_tree = {
pg_count: 100,
failure_domain: 'host',
max_osd_combinations: 10000,
// block_size, bitmap_granularity, immediate_commit must match all OSDs used in that pool
block_size: 131072,
bitmap_granularity: 4096,
// 'all'/'small'/'none', same as in OSD options
immediate_commit: 'none',
pg_stripe_size: 0,
pg_stripe_size: 4194304,
root_node?: 'rack1',
// restrict pool to OSDs having all of these tags
osd_tags?: 'nvme' | [ 'nvme', ... ],
@@ -328,13 +323,6 @@ const etcd_tree = {
misplaced: uint64_t,
degraded: uint64_t,
incomplete: uint64_t,
},
object_bytes: {
total: uint64_t,
clean: uint64_t,
misplaced: uint64_t,
degraded: uint64_t,
incomplete: uint64_t,
}, */
},
history: {
@@ -663,15 +651,12 @@ class Mon
async save_last_clean()
{
// last_clean_pgs is used to avoid extra data move when observing a series of changes in the cluster
const new_clean_pgs = { items: {} };
next_pool:
for (const pool_id in this.state.config.pools)
{
new_clean_pgs.items[pool_id] = (this.state.history.last_clean_pgs.items||{})[pool_id];
const pool_cfg = this.state.config.pools[pool_id];
if (!this.validate_pool_cfg(pool_id, pool_cfg, false))
{
continue next_pool;
continue;
}
for (let pg_num = 1; pg_num <= pool_cfg.pg_count; pg_num++)
{
@@ -680,18 +665,17 @@ class Mon
!(this.state.pg.state[pool_id][pg_num].state instanceof Array))
{
// Unclean
continue next_pool;
return;
}
let st = this.state.pg.state[pool_id][pg_num].state.join(',');
if (st != 'active' && st != 'active,left_on_dead' && st != 'left_on_dead,active')
{
// Unclean
continue next_pool;
return;
}
}
new_clean_pgs.items[pool_id] = this.state.config.pgs.items[pool_id];
}
this.state.history.last_clean_pgs = new_clean_pgs;
this.state.history.last_clean_pgs = JSON.parse(JSON.stringify(this.state.config.pgs));
await this.etcd_call('/kv/txn', {
success: [ { requestPut: {
key: b64(this.etcd_prefix+'/history/last_clean_pgs'),
@@ -1378,14 +1362,16 @@ class Mon
// This is required for multiple change events to trigger at most 1 recheck in 1s
schedule_recheck()
{
if (!this.recheck_timer)
if (this.recheck_timer)
{
this.recheck_timer = setTimeout(() =>
{
this.recheck_timer = null;
this.recheck_pgs().catch(this.die);
}, this.config.mon_change_timeout || 1000);
clearTimeout(this.recheck_timer);
this.recheck_timer = null;
}
this.recheck_timer = setTimeout(() =>
{
this.recheck_timer = null;
this.recheck_pgs().catch(this.die);
}, this.config.mon_change_timeout || 1000);
}
sum_op_stats(timestamp, prev_stats)
@@ -1452,24 +1438,8 @@ class Mon
sum_object_counts()
{
const object_counts = { object: 0n, clean: 0n, misplaced: 0n, degraded: 0n, incomplete: 0n };
const object_bytes = { object: 0n, clean: 0n, misplaced: 0n, degraded: 0n, incomplete: 0n };
for (const pool_id in this.state.pg.stats)
{
let object_size = 0;
for (const osd_num of this.state.pg.stats[pool_id].write_osd_set||[])
{
if (osd_num && this.state.osd.stats[osd_num] && this.state.osd.stats[osd_num].block_size)
{
object_size = this.state.osd.stats[osd_num].block_size;
break;
}
}
if (!object_size)
{
object_size = (this.state.config.pools[pool_id]||{}).block_size ||
this.config.block_size || 131072;
}
object_size = BigInt(object_size);
for (const pg_num in this.state.pg.stats[pool_id])
{
const st = this.state.pg.stats[pool_id][pg_num];
@@ -1480,13 +1450,12 @@ class Mon
if (st[k+'_count'])
{
object_counts[k] += BigInt(st[k+'_count']);
object_bytes[k] += BigInt(st[k+'_count']) * object_size;
}
}
}
}
}
return { object_counts, object_bytes };
return object_counts;
}
sum_inode_stats(prev_stats, timestamp, prev_timestamp)
@@ -1599,7 +1568,7 @@ class Mon
{
const txn = [];
const timestamp = Date.now();
const { object_counts, object_bytes } = this.sum_object_counts();
const object_counts = this.sum_object_counts();
let stats = this.sum_op_stats(timestamp, this.prev_stats);
let inode_stats = this.sum_inode_stats(
this.prev_stats ? this.prev_stats.inode_stats : null,
@@ -1607,7 +1576,6 @@ class Mon
);
this.prev_stats = { timestamp, ...stats, inode_stats };
stats.object_counts = object_counts;
stats.object_bytes = object_bytes;
stats = this.serialize_bigints(stats);
inode_stats = this.serialize_bigints(inode_stats);
txn.push({ requestPut: { key: b64(this.etcd_prefix+'/stats'), value: b64(JSON.stringify(stats)) } });
@@ -1695,7 +1663,6 @@ class Mon
// Do not clear these to null
kv.value = kv.value || {};
}
const old = cur[key_parts[key_parts.length-1]];
cur[key_parts[key_parts.length-1]] = kv.value;
if (key === 'config/global')
{
@@ -1720,12 +1687,7 @@ class Mon
}
else if (key_parts[0] === 'osd' && key_parts[1] === 'stats')
{
// Recheck OSD tree on OSD addition/deletion
if ((!old) != (!kv.value) || old && kv.value && old.size != kv.value.size)
{
this.schedule_recheck();
}
// Recheck PGs <osd_out_time> after last OSD statistics report
// Recheck PGs <osd_out_time> later
this.schedule_next_recheck_at(
!this.state.osd.stats[key[2]] ? 0 : this.state.osd.stats[key[2]].time+this.config.osd_out_time
);

View File

@@ -1,18 +0,0 @@
[Unit]
Description=Vitastor monitor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=node /usr/lib/vitastor/mon/mon-main.js
WorkingDirectory=/
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target

View File

@@ -1,22 +0,0 @@
[Unit]
Description=Vitastor object storage daemon osd.%i
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
PartOf=vitastor.target
[Service]
LimitNOFILE=1048576
LimitNPROC=1048576
LimitMEMLOCK=infinity
ExecStart=bash -c 'exec vitastor-disk exec-osd /dev/vitastor/osd%i-data >>/var/log/vitastor/osd%i.log 2>&1'
ExecStartPre=+vitastor-disk pre-exec /dev/vitastor/osd%i-data
WorkingDirectory=/
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target

View File

@@ -1,4 +0,0 @@
[Unit]
Description=vitastor target
[Install]
WantedBy=multi-user.target

View File

@@ -50,7 +50,7 @@ from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils
VERSION = '0.8.3'
VERSION = '0.7.1'
LOG = logging.getLogger(__name__)

View File

@@ -1,169 +0,0 @@
Index: qemu/block/meson.build
===================================================================
--- qemu.orig/block/meson.build
+++ qemu/block/meson.build
@@ -91,6 +91,7 @@ foreach m : [
[libnfs, 'nfs', files('nfs.c')],
[libssh, 'ssh', files('ssh.c')],
[rbd, 'rbd', files('rbd.c')],
+ [vitastor, 'vitastor', files('vitastor.c')],
]
if m[0].found()
module_ss = ss.source_set()
Index: qemu/meson.build
===================================================================
--- qemu.orig/meson.build
+++ qemu/meson.build
@@ -838,6 +838,26 @@ if not get_option('rbd').auto() or have_
endif
endif
+vitastor = not_found
+if not get_option('vitastor').auto() or have_block
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
+ required: get_option('vitastor'), kwargs: static_kwargs)
+ if libvitastor_client.found()
+ if cc.links('''
+ #include <vitastor_c.h>
+ int main(void) {
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ return 0;
+ }''', dependencies: libvitastor_client)
+ vitastor = declare_dependency(dependencies: libvitastor_client)
+ elif get_option('vitastor').enabled()
+ error('could not link libvitastor_client')
+ else
+ warning('could not link libvitastor_client, disabling')
+ endif
+ endif
+endif
+
glusterfs = not_found
glusterfs_ftruncate_has_stat = false
glusterfs_iocb_has_stat = false
@@ -1459,6 +1479,7 @@ config_host_data.set('CONFIG_LINUX_AIO',
config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found())
config_host_data.set('CONFIG_LIBPMEM', libpmem.found())
config_host_data.set('CONFIG_RBD', rbd.found())
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
config_host_data.set('CONFIG_SDL', sdl.found())
config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found())
config_host_data.set('CONFIG_SECCOMP', seccomp.found())
@@ -3424,6 +3445,7 @@ if spice_protocol.found()
summary_info += {' spice server support': spice}
endif
summary_info += {'rbd support': rbd}
+summary_info += {'vitastor support': vitastor}
summary_info += {'xfsctl support': config_host.has_key('CONFIG_XFS')}
summary_info += {'smartcard support': cacard}
summary_info += {'U2F support': u2f}
Index: qemu/meson_options.txt
===================================================================
--- qemu.orig/meson_options.txt
+++ qemu/meson_options.txt
@@ -121,6 +121,8 @@ option('lzo', type : 'feature', value :
description: 'lzo compression support')
option('rbd', type : 'feature', value : 'auto',
description: 'Ceph block device driver')
+option('vitastor', type : 'feature', value : 'auto',
+ description: 'Vitastor block device driver')
option('gtk', type : 'feature', value : 'auto',
description: 'GTK+ user interface')
option('sdl', type : 'feature', value : 'auto',
Index: qemu/qapi/block-core.json
===================================================================
--- qemu.orig/qapi/block-core.json
+++ qemu/qapi/block-core.json
@@ -3179,7 +3179,7 @@
'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
'pbs',
- 'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor', 'vmdk', 'vpc', 'vvfat' ] }
##
# @BlockdevOptionsFile:
@@ -4125,6 +4125,28 @@
'*server': ['InetSocketAddressBase'] } }
##
+# @BlockdevOptionsVitastor:
+#
+# Driver specific block device options for vitastor
+#
+# @image: Image name
+# @inode: Inode number
+# @pool: Pool ID
+# @size: Desired image size in bytes
+# @config-path: Path to Vitastor configuration
+# @etcd-host: etcd connection address(es)
+# @etcd-prefix: etcd key/value prefix
+##
+{ 'struct': 'BlockdevOptionsVitastor',
+ 'data': { '*inode': 'uint64',
+ '*pool': 'uint64',
+ '*size': 'uint64',
+ '*image': 'str',
+ '*config-path': 'str',
+ '*etcd-host': 'str',
+ '*etcd-prefix': 'str' } }
+
+##
# @ReplicationMode:
#
# An enumeration of replication modes.
@@ -4520,6 +4542,7 @@
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
+ 'vitastor': 'BlockdevOptionsVitastor',
'vmdk': 'BlockdevOptionsGenericCOWFormat',
'vpc': 'BlockdevOptionsGenericFormat',
'vvfat': 'BlockdevOptionsVVFAT'
@@ -4910,6 +4933,17 @@
'*encrypt' : 'RbdEncryptionCreateOptions' } }
##
+# @BlockdevCreateOptionsVitastor:
+#
+# Driver specific image creation options for Vitastor.
+#
+# @size: Size of the virtual disk in bytes
+##
+{ 'struct': 'BlockdevCreateOptionsVitastor',
+ 'data': { 'location': 'BlockdevOptionsVitastor',
+ 'size': 'size' } }
+
+##
# @BlockdevVmdkSubformat:
#
# Subformat options for VMDK images
@@ -5108,6 +5142,7 @@
'ssh': 'BlockdevCreateOptionsSsh',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
'vmdk': 'BlockdevCreateOptionsVmdk',
'vpc': 'BlockdevCreateOptionsVpc'
} }
Index: qemu/scripts/ci/org.centos/stream/8/x86_64/configure
===================================================================
--- qemu.orig/scripts/ci/org.centos/stream/8/x86_64/configure
+++ qemu/scripts/ci/org.centos/stream/8/x86_64/configure
@@ -31,7 +31,7 @@
--with-git=meson \
--with-git-submodules=update \
--target-list="x86_64-softmmu" \
---block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
+--block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,vitastor,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
--audio-drv-list="" \
--block-drv-ro-whitelist="vmdk,vhdx,vpc,https,ssh" \
--with-coroutine=ucontext \
@@ -183,6 +183,7 @@
--enable-opengl \
--enable-pie \
--enable-rbd \
+--enable-vitastor \
--enable-rdma \
--enable-seccomp \
--enable-snappy \

View File

@@ -1,169 +0,0 @@
Index: qemu/block/meson.build
===================================================================
--- qemu.orig/block/meson.build
+++ qemu/block/meson.build
@@ -111,6 +111,7 @@ foreach m : [
[libnfs, 'nfs', files('nfs.c')],
[libssh, 'ssh', files('ssh.c')],
[rbd, 'rbd', files('rbd.c')],
+ [vitastor, 'vitastor', files('vitastor.c')],
]
if m[0].found()
module_ss = ss.source_set()
Index: qemu/meson.build
===================================================================
--- qemu.orig/meson.build
+++ qemu/meson.build
@@ -967,6 +967,26 @@ if not get_option('rbd').auto() or have_
endif
endif
+vitastor = not_found
+if not get_option('vitastor').auto() or have_block
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
+ required: get_option('vitastor'), kwargs: static_kwargs)
+ if libvitastor_client.found()
+ if cc.links('''
+ #include <vitastor_c.h>
+ int main(void) {
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ return 0;
+ }''', dependencies: libvitastor_client)
+ vitastor = declare_dependency(dependencies: libvitastor_client)
+ elif get_option('vitastor').enabled()
+ error('could not link libvitastor_client')
+ else
+ warning('could not link libvitastor_client, disabling')
+ endif
+ endif
+endif
+
glusterfs = not_found
glusterfs_ftruncate_has_stat = false
glusterfs_iocb_has_stat = false
@@ -1802,6 +1822,7 @@ config_host_data.set('CONFIG_NUMA', numa
config_host_data.set('CONFIG_OPENGL', opengl.found())
config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
config_host_data.set('CONFIG_RBD', rbd.found())
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
config_host_data.set('CONFIG_RDMA', rdma.found())
config_host_data.set('CONFIG_SDL', sdl.found())
config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found())
@@ -3965,6 +3986,7 @@ if spice_protocol.found()
summary_info += {' spice server support': spice}
endif
summary_info += {'rbd support': rbd}
+summary_info += {'vitastor support': vitastor}
summary_info += {'smartcard support': cacard}
summary_info += {'U2F support': u2f}
summary_info += {'libusb': libusb}
Index: qemu/meson_options.txt
===================================================================
--- qemu.orig/meson_options.txt
+++ qemu/meson_options.txt
@@ -167,6 +167,8 @@ option('lzo', type : 'feature', value :
description: 'lzo compression support')
option('rbd', type : 'feature', value : 'auto',
description: 'Ceph block device driver')
+option('vitastor', type : 'feature', value : 'auto',
+ description: 'Vitastor block device driver')
option('opengl', type : 'feature', value : 'auto',
description: 'OpenGL support')
option('rdma', type : 'feature', value : 'auto',
Index: qemu/qapi/block-core.json
===================================================================
--- qemu.orig/qapi/block-core.json
+++ qemu/qapi/block-core.json
@@ -3209,7 +3209,7 @@
'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
'pbs',
- 'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor', 'vmdk', 'vpc', 'vvfat' ] }
##
# @BlockdevOptionsFile:
@@ -4149,6 +4149,28 @@
'*server': ['InetSocketAddressBase'] } }
##
+# @BlockdevOptionsVitastor:
+#
+# Driver specific block device options for vitastor
+#
+# @image: Image name
+# @inode: Inode number
+# @pool: Pool ID
+# @size: Desired image size in bytes
+# @config-path: Path to Vitastor configuration
+# @etcd-host: etcd connection address(es)
+# @etcd-prefix: etcd key/value prefix
+##
+{ 'struct': 'BlockdevOptionsVitastor',
+ 'data': { '*inode': 'uint64',
+ '*pool': 'uint64',
+ '*size': 'uint64',
+ '*image': 'str',
+ '*config-path': 'str',
+ '*etcd-host': 'str',
+ '*etcd-prefix': 'str' } }
+
+##
# @ReplicationMode:
#
# An enumeration of replication modes.
@@ -4593,6 +4615,7 @@
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
+ 'vitastor': 'BlockdevOptionsVitastor',
'vmdk': 'BlockdevOptionsGenericCOWFormat',
'vpc': 'BlockdevOptionsGenericFormat',
'vvfat': 'BlockdevOptionsVVFAT'
@@ -4985,6 +5008,17 @@
'*encrypt' : 'RbdEncryptionCreateOptions' } }
##
+# @BlockdevCreateOptionsVitastor:
+#
+# Driver specific image creation options for Vitastor.
+#
+# @size: Size of the virtual disk in bytes
+##
+{ 'struct': 'BlockdevCreateOptionsVitastor',
+ 'data': { 'location': 'BlockdevOptionsVitastor',
+ 'size': 'size' } }
+
+##
# @BlockdevVmdkSubformat:
#
# Subformat options for VMDK images
@@ -5182,6 +5216,7 @@
'ssh': 'BlockdevCreateOptionsSsh',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
'vmdk': 'BlockdevCreateOptionsVmdk',
'vpc': 'BlockdevCreateOptionsVpc'
} }
Index: qemu/scripts/ci/org.centos/stream/8/x86_64/configure
===================================================================
--- qemu.orig/scripts/ci/org.centos/stream/8/x86_64/configure
+++ qemu/scripts/ci/org.centos/stream/8/x86_64/configure
@@ -31,7 +31,7 @@
--with-git=meson \
--with-git-submodules=update \
--target-list="x86_64-softmmu" \
---block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
+--block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,vitastor,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
--audio-drv-list="" \
--block-drv-ro-whitelist="vmdk,vhdx,vpc,https,ssh" \
--with-coroutine=ucontext \
@@ -179,6 +179,7 @@
--enable-opengl \
--enable-pie \
--enable-rbd \
+--enable-vitastor \
--enable-rdma \
--enable-seccomp \
--enable-snappy \

View File

@@ -9,7 +9,7 @@ for i in "$DIR"/qemu-*-vitastor.patch "$DIR"/pve-qemu-*-vitastor.patch; do
echo '===================================================================' >> $i
echo '--- /dev/null' >> $i
echo '+++ a/block/vitastor.c' >> $i
echo '@@ -0,0 +1,'$(wc -l "$DIR"/../src/qemu_driver.c | cut -d ' ' -f 1)' @@' >> $i
echo '@@ -0,0 +1,'$(wc -l "$DIR"/../src/qemu_driver.c)' @@' >> $i
cat "$DIR"/../src/qemu_driver.c | sed 's/^/+/' >> $i
fi
done

View File

@@ -25,4 +25,4 @@ rm fio
mv fio-copy fio
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
tar --transform 's#^#vitastor-0.8.3/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.8.3$(rpm --eval '%dist').tar.gz *
tar --transform 's#^#vitastor-0.7.1/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.7.1$(rpm --eval '%dist').tar.gz *

View File

@@ -58,7 +58,7 @@
+BuildRequires: gperftools-devel
+BuildRequires: libusbx-devel >= 1.0.21
%if %{have_usbredir}
BuildRequires: usbredir-devel >= 0.8.2
BuildRequires: usbredir-devel >= 0.7.1
%endif
@@ -856,12 +861,13 @@ BuildRequires: virglrenderer-devel
# For smartcard NSS support

View File

@@ -35,7 +35,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-0.8.3.el7.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-0.7.1.el7.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@@ -1,11 +1,11 @@
Name: vitastor
Version: 0.8.3
Version: 0.7.1
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-0.8.3.el7.tar.gz
Source0: vitastor-0.7.1.el7.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel
@@ -36,8 +36,6 @@ Requires: libJerasure2
Requires: libisa-l
Requires: liburing >= 0.6
Requires: vitastor-client = %{version}-%{release}
Requires: util-linux
Requires: parted
%description -n vitastor-osd
@@ -104,11 +102,8 @@ cd mon
npm install
cd ..
mkdir -p %buildroot/usr/lib/vitastor
cp mon/make-osd.sh %buildroot/usr/lib/vitastor
cp -r mon %buildroot/usr/lib/vitastor
mkdir -p %buildroot/lib/systemd/system
cp mon/vitastor.target mon/vitastor-mon.service mon/vitastor-osd@.service %buildroot/lib/systemd/system
mkdir -p %buildroot/lib/udev/rules.d
cp mon/90-vitastor.rules %buildroot/lib/udev/rules.d
%files
@@ -119,27 +114,10 @@ cp mon/90-vitastor.rules %buildroot/lib/udev/rules.d
%_bindir/vitastor-osd
%_bindir/vitastor-disk
%_bindir/vitastor-dump-journal
/lib/systemd/system/vitastor-osd@.service
/lib/systemd/system/vitastor.target
/lib/udev/rules.d/90-vitastor.rules
%pre -n vitastor-osd
groupadd -r -f vitastor 2>/dev/null ||:
useradd -r -g vitastor -s /sbin/nologin -c "Vitastor daemons" -M -d /nonexistent vitastor 2>/dev/null ||:
install -o vitastor -g vitastor -d /var/log/vitastor
mkdir -p /etc/vitastor
%files -n vitastor-mon
/usr/lib/vitastor/mon
/lib/systemd/system/vitastor-mon.service
%pre -n vitastor-mon
groupadd -r -f vitastor 2>/dev/null ||:
useradd -r -g vitastor -s /sbin/nologin -c "Vitastor daemons" -M -d /nonexistent vitastor 2>/dev/null ||:
mkdir -p /etc/vitastor
%files -n vitastor-client
@@ -150,6 +128,7 @@ mkdir -p /etc/vitastor
%_bindir/vita
%_libdir/libvitastor_blk.so*
%_libdir/libvitastor_client.so*
/usr/lib/vitastor/make-osd.sh
%files -n vitastor-client-devel

View File

@@ -35,7 +35,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-0.8.3.el8.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-0.7.1.el8.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@@ -1,11 +1,11 @@
Name: vitastor
Version: 0.8.3
Version: 0.7.1
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-0.8.3.el8.tar.gz
Source0: vitastor-0.7.1.el8.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel
@@ -35,8 +35,6 @@ Requires: libJerasure2
Requires: libisa-l
Requires: liburing >= 0.6
Requires: vitastor-client = %{version}-%{release}
Requires: util-linux
Requires: parted
%description -n vitastor-osd
@@ -101,11 +99,8 @@ cd mon
npm install
cd ..
mkdir -p %buildroot/usr/lib/vitastor
cp mon/make-osd.sh %buildroot/usr/lib/vitastor
cp -r mon %buildroot/usr/lib/vitastor
mkdir -p %buildroot/lib/systemd/system
cp mon/vitastor.target mon/vitastor-mon.service mon/vitastor-osd@.service %buildroot/lib/systemd/system
mkdir -p %buildroot/lib/udev/rules.d
cp mon/90-vitastor.rules %buildroot/lib/udev/rules.d
%files
@@ -116,27 +111,10 @@ cp mon/90-vitastor.rules %buildroot/lib/udev/rules.d
%_bindir/vitastor-osd
%_bindir/vitastor-disk
%_bindir/vitastor-dump-journal
/lib/systemd/system/vitastor-osd@.service
/lib/systemd/system/vitastor.target
/lib/udev/rules.d/90-vitastor.rules
%pre -n vitastor-osd
groupadd -r -f vitastor 2>/dev/null ||:
useradd -r -g vitastor -s /sbin/nologin -c "Vitastor daemons" -M -d /nonexistent vitastor 2>/dev/null ||:
install -o vitastor -g vitastor -d /var/log/vitastor
mkdir -p /etc/vitastor
%files -n vitastor-mon
/usr/lib/vitastor/mon
/lib/systemd/system/vitastor-mon.service
%pre -n vitastor-mon
groupadd -r -f vitastor 2>/dev/null ||:
useradd -r -g vitastor -s /sbin/nologin -c "Vitastor daemons" -M -d /nonexistent vitastor 2>/dev/null ||:
mkdir -p /etc/vitastor
%files -n vitastor-client
@@ -147,6 +125,7 @@ mkdir -p /etc/vitastor
%_bindir/vita
%_libdir/libvitastor_blk.so*
%_libdir/libvitastor_client.so*
/usr/lib/vitastor/make-osd.sh
%files -n vitastor-client-devel

View File

@@ -15,7 +15,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
add_definitions(-DVERSION="0.8.3")
add_definitions(-DVERSION="0.7.1")
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
if (${WITH_ASAN})
add_definitions(-fsanitize=address -fno-omit-frame-pointer)
@@ -64,7 +64,7 @@ include_directories(
# libvitastor_blk.so
add_library(vitastor_blk SHARED
allocator.cpp blockstore.cpp blockstore_impl.cpp blockstore_disk.cpp blockstore_init.cpp blockstore_open.cpp blockstore_journal.cpp blockstore_read.cpp
allocator.cpp blockstore.cpp blockstore_impl.cpp blockstore_init.cpp blockstore_open.cpp blockstore_journal.cpp blockstore_read.cpp
blockstore_write.cpp blockstore_sync.cpp blockstore_stable.cpp blockstore_rollback.cpp blockstore_flush.cpp crc32c.c ringloop.cpp
)
target_link_libraries(vitastor_blk
@@ -94,7 +94,7 @@ endif (IBVERBS_LIBRARIES)
add_library(vitastor_common STATIC
epoll_manager.cpp etcd_state_client.cpp messenger.cpp addr_util.cpp
msgr_stop.cpp msgr_op.cpp msgr_send.cpp msgr_receive.cpp ringloop.cpp ../json11/json11.cpp
http_client.cpp osd_ops.cpp pg_states.cpp timerfd_manager.cpp str_util.cpp ${MSGR_RDMA}
http_client.cpp osd_ops.cpp pg_states.cpp timerfd_manager.cpp base64.cpp ${MSGR_RDMA}
)
target_compile_options(vitastor_common PUBLIC -fPIC)
@@ -131,6 +131,7 @@ add_library(vitastor_client SHARED
vitastor_c.cpp
cli_common.cpp
cli_alloc_osd.cpp
cli_simple_offsets.cpp
cli_status.cpp
cli_df.cpp
cli_ls.cpp
@@ -140,7 +141,6 @@ add_library(vitastor_client SHARED
cli_merge.cpp
cli_rm_data.cpp
cli_rm.cpp
cli_rm_osd.cpp
)
set_target_properties(vitastor_client PROPERTIES PUBLIC_HEADER "vitastor_c.h")
target_link_libraries(vitastor_client
@@ -195,9 +195,7 @@ configure_file(vitastor.pc.in vitastor.pc @ONLY)
# vitastor-disk
add_executable(vitastor-disk
disk_tool.cpp disk_simple_offsets.cpp
disk_tool_journal.cpp disk_tool_meta.cpp disk_tool_prepare.cpp disk_tool_resize.cpp disk_tool_udev.cpp disk_tool_utils.cpp disk_tool_upgrade.cpp
crc32c.c str_util.cpp ../json11/json11.cpp rw_blocking.cpp allocator.cpp ringloop.cpp blockstore_disk.cpp
disk_tool.cpp crc32c.c rw_blocking.cpp allocator.cpp ringloop.cpp
)
target_link_libraries(vitastor-disk
tcmalloc_minimal
@@ -235,16 +233,9 @@ add_executable(osd_test osd_test.cpp rw_blocking.cpp addr_util.cpp)
target_link_libraries(osd_test tcmalloc_minimal)
# osd_rmw_test
# FIXME: Move to tests
add_executable(osd_rmw_test osd_rmw_test.cpp allocator.cpp)
target_link_libraries(osd_rmw_test Jerasure ${ISAL_LIBRARIES} tcmalloc_minimal)
if (ISAL_LIBRARIES)
add_executable(osd_rmw_test_je osd_rmw_test.cpp allocator.cpp)
target_compile_definitions(osd_rmw_test_je PUBLIC -DNO_ISAL)
target_link_libraries(osd_rmw_test_je Jerasure tcmalloc_minimal)
endif (ISAL_LIBRARIES)
# stub_uring_osd
add_executable(stub_uring_osd
stub_uring_osd.cpp
@@ -271,14 +262,6 @@ target_link_libraries(test_cas
vitastor_client
)
# test_crc32
add_executable(test_crc32
test_crc32.cpp
)
target_link_libraries(test_crc32
vitastor_blk
)
# test_cluster_client
add_executable(test_cluster_client
test_cluster_client.cpp

55
src/base64.cpp Normal file
View File

@@ -0,0 +1,55 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "base64.h"
std::string base64_encode(const std::string &in)
{
std::string out;
unsigned val = 0;
int valb = -6;
for (unsigned char c: in)
{
val = (val << 8) + c;
valb += 8;
while (valb >= 0)
{
out.push_back("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[(val>>valb) & 0x3F]);
valb -= 6;
}
}
if (valb > -6)
out.push_back("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[((val<<8)>>(valb+8)) & 0x3F]);
while (out.size() % 4)
out.push_back('=');
return out;
}
static char T[256] = { 0 };
std::string base64_decode(const std::string &in)
{
std::string out;
if (T[0] == 0)
{
for (int i = 0; i < 256; i++)
T[i] = -1;
for (int i = 0; i < 64; i++)
T[(unsigned char)("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[i])] = i;
}
unsigned val = 0;
int valb = -8;
for (unsigned char c: in)
{
if (T[c] == -1)
break;
val = (val<<6) + T[c];
valb += 6;
if (valb >= 0)
{
out.push_back(char((val >> valb) & 0xFF));
valb -= 8;
}
}
return out;
}

8
src/base64.h Normal file
View File

@@ -0,0 +1,8 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once
#include <string>
std::string base64_encode(const std::string &in);
std::string base64_decode(const std::string &in);

View File

@@ -11,6 +11,7 @@
#include <string>
#include <map>
#include <unordered_map>
#include <functional>
#include "object_id.h"
@@ -154,7 +155,7 @@ struct blockstore_op_t
uint8_t private_data[BS_OP_PRIVATE_DATA_SIZE];
};
typedef std::map<std::string, std::string> blockstore_config_t;
typedef std::unordered_map<std::string, std::string> blockstore_config_t;
class blockstore_impl_t;

View File

@@ -1,323 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <sys/file.h>
#include <stdexcept>
#include "blockstore_impl.h"
#include "blockstore_disk.h"
#include "str_util.h"
static uint32_t is_power_of_two(uint64_t value)
{
uint32_t l = 0;
while (value > 1)
{
if (value & 1)
{
return 64;
}
value = value >> 1;
l++;
}
return l;
}
void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config)
{
// Parse
if (config["disable_device_lock"] == "true" || config["disable_device_lock"] == "1" || config["disable_device_lock"] == "yes")
{
disable_flock = true;
}
cfg_journal_size = parse_size(config["journal_size"]);
data_device = config["data_device"];
data_offset = parse_size(config["data_offset"]);
cfg_data_size = parse_size(config["data_size"]);
meta_device = config["meta_device"];
meta_offset = parse_size(config["meta_offset"]);
data_block_size = parse_size(config["block_size"]);
journal_device = config["journal_device"];
journal_offset = parse_size(config["journal_offset"]);
disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10);
journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10);
meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10);
bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
// Validate
if (!data_block_size)
{
data_block_size = (1 << DEFAULT_DATA_BLOCK_ORDER);
}
if ((block_order = is_power_of_two(data_block_size)) >= 64 || data_block_size < MIN_DATA_BLOCK_SIZE || data_block_size >= MAX_DATA_BLOCK_SIZE)
{
throw std::runtime_error("Bad block size");
}
if (!disk_alignment)
{
disk_alignment = 4096;
}
else if (disk_alignment % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("disk_alignment must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (!journal_block_size)
{
journal_block_size = 4096;
}
else if (journal_block_size % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("journal_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (!meta_block_size)
{
meta_block_size = 4096;
}
else if (meta_block_size % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("meta_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (data_offset % disk_alignment)
{
throw std::runtime_error("data_offset must be a multiple of disk_alignment = "+std::to_string(disk_alignment));
}
if (!bitmap_granularity)
{
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
}
else if (bitmap_granularity % disk_alignment)
{
throw std::runtime_error("Sparse write tracking granularity must be a multiple of disk_alignment = "+std::to_string(disk_alignment));
}
if (data_block_size % bitmap_granularity)
{
throw std::runtime_error("Block size must be a multiple of sparse write tracking granularity");
}
if (meta_device == "")
{
meta_device = data_device;
}
if (journal_device == "")
{
journal_device = meta_device;
}
if (meta_offset % meta_block_size)
{
throw std::runtime_error("meta_offset must be a multiple of meta_block_size = "+std::to_string(meta_block_size));
}
if (journal_offset % journal_block_size)
{
throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size));
}
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
clean_entry_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
}
void blockstore_disk_t::calc_lengths(bool skip_meta_check)
{
// data
data_len = data_device_size - data_offset;
if (data_fd == meta_fd && data_offset < meta_offset)
{
data_len = meta_offset - data_offset;
}
if (data_fd == journal_fd && data_offset < journal_offset)
{
data_len = data_len < journal_offset-data_offset
? data_len : journal_offset-data_offset;
}
if (cfg_data_size != 0)
{
if (data_len < cfg_data_size)
{
throw std::runtime_error("Data area ("+std::to_string(data_len)+
" bytes) is smaller than configured size ("+std::to_string(cfg_data_size)+" bytes)");
}
data_len = cfg_data_size;
}
// meta
uint64_t meta_area_size = (meta_fd == data_fd ? data_device_size : meta_device_size) - meta_offset;
if (meta_fd == data_fd && meta_offset <= data_offset)
{
meta_area_size = data_offset - meta_offset;
}
if (meta_fd == journal_fd && meta_offset <= journal_offset)
{
meta_area_size = meta_area_size < journal_offset-meta_offset
? meta_area_size : journal_offset-meta_offset;
}
// journal
journal_len = (journal_fd == data_fd ? data_device_size : (journal_fd == meta_fd ? meta_device_size : journal_device_size)) - journal_offset;
if (journal_fd == data_fd && journal_offset <= data_offset)
{
journal_len = data_offset - journal_offset;
}
if (journal_fd == meta_fd && journal_offset <= meta_offset)
{
journal_len = journal_len < meta_offset-journal_offset
? journal_len : meta_offset-journal_offset;
}
// required metadata size
block_count = data_len / data_block_size;
meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size;
if (!skip_meta_check && meta_area_size < meta_len)
{
throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes");
}
// requested journal size
if (!skip_meta_check && cfg_journal_size > journal_len)
{
throw std::runtime_error("Requested journal_size is too large");
}
else if (cfg_journal_size > 0)
{
journal_len = cfg_journal_size;
}
if (journal_len < MIN_JOURNAL_SIZE)
{
throw std::runtime_error("Journal is too small, need at least "+std::to_string(MIN_JOURNAL_SIZE)+" bytes");
}
}
// FIXME: Move to utils
static void check_size(int fd, uint64_t *size, uint64_t *sectsize, std::string name)
{
int sect;
struct stat st;
if (fstat(fd, &st) < 0)
{
throw std::runtime_error("Failed to stat "+name);
}
if (S_ISREG(st.st_mode))
{
*size = st.st_size;
if (sectsize)
{
*sectsize = st.st_blksize;
}
}
else if (S_ISBLK(st.st_mode))
{
if (ioctl(fd, BLKGETSIZE64, size) < 0 ||
ioctl(fd, BLKSSZGET, &sect) < 0)
{
throw std::runtime_error("Failed to get "+name+" size or block size: "+strerror(errno));
}
if (sectsize)
{
*sectsize = sect;
}
}
else
{
throw std::runtime_error(name+" is neither a file nor a block device");
}
}
void blockstore_disk_t::open_data()
{
data_fd = open(data_device.c_str(), O_DIRECT|O_RDWR);
if (data_fd == -1)
{
throw std::runtime_error("Failed to open data device "+data_device+": "+std::string(strerror(errno)));
}
check_size(data_fd, &data_device_size, &data_device_sect, "data device");
if (disk_alignment % data_device_sect)
{
throw std::runtime_error(
"disk_alignment ("+std::to_string(disk_alignment)+
") is not a multiple of data device sector size ("+std::to_string(data_device_sect)+")"
);
}
if (data_offset >= data_device_size)
{
throw std::runtime_error("data_offset exceeds device size = "+std::to_string(data_device_size));
}
if (!disable_flock && flock(data_fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock data device: ") + strerror(errno));
}
}
void blockstore_disk_t::open_meta()
{
if (meta_device != data_device)
{
meta_fd = open(meta_device.c_str(), O_DIRECT|O_RDWR);
if (meta_fd == -1)
{
throw std::runtime_error("Failed to open metadata device "+meta_device+": "+std::string(strerror(errno)));
}
check_size(meta_fd, &meta_device_size, &meta_device_sect, "metadata device");
if (meta_offset >= meta_device_size)
{
throw std::runtime_error("meta_offset exceeds device size = "+std::to_string(meta_device_size));
}
if (!disable_flock && flock(meta_fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock metadata device: ") + strerror(errno));
}
}
else
{
meta_fd = data_fd;
meta_device_sect = data_device_sect;
meta_device_size = 0;
if (meta_offset >= data_device_size)
{
throw std::runtime_error("meta_offset exceeds device size = "+std::to_string(data_device_size));
}
}
if (meta_block_size % meta_device_sect)
{
throw std::runtime_error(
"meta_block_size ("+std::to_string(meta_block_size)+
") is not a multiple of data device sector size ("+std::to_string(meta_device_sect)+")"
);
}
}
void blockstore_disk_t::open_journal()
{
if (journal_device != meta_device)
{
journal_fd = open(journal_device.c_str(), O_DIRECT|O_RDWR);
if (journal_fd == -1)
{
throw std::runtime_error("Failed to open journal device "+journal_device+": "+std::string(strerror(errno)));
}
check_size(journal_fd, &journal_device_size, &journal_device_sect, "journal device");
if (!disable_flock && flock(journal_fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock journal device: ") + strerror(errno));
}
}
else
{
journal_fd = meta_fd;
journal_device_sect = meta_device_sect;
journal_device_size = 0;
if (journal_offset >= data_device_size)
{
throw std::runtime_error("journal_offset exceeds device size");
}
}
if (journal_block_size % journal_device_sect)
{
throw std::runtime_error(
"journal_block_size ("+std::to_string(journal_block_size)+
") is not a multiple of journal device sector size ("+std::to_string(journal_device_sect)+")"
);
}
}
void blockstore_disk_t::close_all()
{
if (data_fd >= 0)
close(data_fd);
if (meta_fd >= 0 && meta_fd != data_fd)
close(meta_fd);
if (journal_fd >= 0 && journal_fd != meta_fd)
close(journal_fd);
data_fd = meta_fd = journal_fd = -1;
}

View File

@@ -1,42 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once
#include <stdint.h>
#include <string>
#include <map>
struct blockstore_disk_t
{
std::string data_device, meta_device, journal_device;
uint32_t data_block_size;
uint64_t cfg_journal_size, cfg_data_size;
// Required write alignment and journal/metadata/data areas' location alignment
uint32_t disk_alignment = 4096;
// Journal block size - minimum_io_size of the journal device is the best choice
uint64_t journal_block_size = 4096;
// Metadata block size - minimum_io_size of the metadata device is the best choice
uint64_t meta_block_size = 4096;
// Sparse write tracking granularity. 4 KB is a good choice. Must be a multiple of disk_alignment
uint64_t bitmap_granularity = 4096;
// By default, Blockstore locks all opened devices exclusively. This option can be used to disable locking
bool disable_flock = false;
int meta_fd = -1, data_fd = -1, journal_fd = -1;
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len;
uint64_t data_offset, data_device_sect, data_device_size, data_len;
uint64_t journal_offset, journal_device_sect, journal_device_size, journal_len;
uint32_t block_order;
uint64_t block_count;
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0;
void parse_config(std::map<std::string, std::string> & config);
void open_data();
void open_meta();
void open_journal();
void calc_lengths(bool skip_meta_check = false);
void close_all();
};

View File

@@ -15,11 +15,11 @@ journal_flusher_t::journal_flusher_t(blockstore_impl_t *bs)
active_flushers = 0;
syncing_flushers = 0;
// FIXME: allow to configure flusher_start_threshold and journal_trim_interval
flusher_start_threshold = bs->dsk.journal_block_size / sizeof(journal_entry_stable);
flusher_start_threshold = bs->journal_block_size / sizeof(journal_entry_stable);
journal_trim_interval = 512;
journal_trim_counter = bs->journal.flush_journal ? 1 : 0;
trim_wanted = bs->journal.flush_journal ? 1 : 0;
journal_superblock = bs->journal.inmemory ? bs->journal.buffer : memalign_or_die(MEM_ALIGNMENT, bs->dsk.journal_block_size);
journal_superblock = bs->journal.inmemory ? bs->journal.buffer : memalign_or_die(MEM_ALIGNMENT, bs->journal_block_size);
co = new journal_flusher_co[max_flusher_count];
for (int i = 0; i < max_flusher_count; i++)
{
@@ -35,14 +35,24 @@ journal_flusher_co::journal_flusher_co()
{
bs->live = true;
if (data->res != data->iov.iov_len)
bs->disk_error_abort("read operation during flush", data->res, data->iov.iov_len);
{
throw std::runtime_error(
"data read operation failed during flush ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). can't continue, sorry :-("
);
}
wait_count--;
};
simple_callback_w = [this](ring_data_t* data)
{
bs->live = true;
if (data->res != data->iov.iov_len)
bs->disk_error_abort("write operation during flush", data->res, data->iov.iov_len);
{
throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). state "+std::to_string(wait_state)+". in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
);
}
wait_count--;
};
}
@@ -77,7 +87,7 @@ void journal_flusher_t::loop()
cur_flusher_count--;
}
}
for (int i = 0; (active_flushers > 0 || dequeuing || trim_wanted > 0) && i < cur_flusher_count; i++)
for (int i = 0; (active_flushers > 0 || dequeuing) && i < cur_flusher_count; i++)
co[i].loop();
}
@@ -162,8 +172,7 @@ void journal_flusher_t::mark_trim_possible()
if (trim_wanted > 0)
{
dequeuing = true;
if (!journal_trim_counter)
journal_trim_counter = journal_trim_interval;
journal_trim_counter++;
bs->ringloop->wakeup();
}
}
@@ -297,8 +306,6 @@ bool journal_flusher_co::loop()
goto resume_20;
else if (wait_state == 21)
goto resume_21;
else if (wait_state == 22)
goto resume_22;
resume_0:
if (flusher->flush_queue.size() < flusher->min_flusher_count && !flusher->trim_wanted ||
!flusher->flush_queue.size() || !flusher->dequeuing)
@@ -479,38 +486,31 @@ resume_1:
bs->ringloop->wakeup();
}
// Reads completed, submit writes and set bitmap bits
if (bs->dsk.clean_entry_bitmap_size)
if (bs->clean_entry_bitmap_size)
{
new_clean_bitmap = (bs->inmemory_meta
? (uint8_t*)meta_new.buf + meta_new.pos*bs->dsk.clean_entry_size + sizeof(clean_disk_entry)
: (uint8_t*)bs->clean_bitmap + (clean_loc >> bs->dsk.block_order)*(2*bs->dsk.clean_entry_bitmap_size));
? (uint8_t*)meta_new.buf + meta_new.pos*bs->clean_entry_size + sizeof(clean_disk_entry)
: (uint8_t*)bs->clean_bitmap + (clean_loc >> bs->block_order)*(2*bs->clean_entry_bitmap_size));
if (clean_init_bitmap)
{
memset(new_clean_bitmap, 0, bs->dsk.clean_entry_bitmap_size);
bitmap_set(new_clean_bitmap, clean_bitmap_offset, clean_bitmap_len, bs->dsk.bitmap_granularity);
memset(new_clean_bitmap, 0, bs->clean_entry_bitmap_size);
bitmap_set(new_clean_bitmap, clean_bitmap_offset, clean_bitmap_len, bs->bitmap_granularity);
}
}
for (it = v.begin(); it != v.end(); it++)
{
if (new_clean_bitmap)
{
bitmap_set(new_clean_bitmap, it->offset, it->len, bs->dsk.bitmap_granularity);
bitmap_set(new_clean_bitmap, it->offset, it->len, bs->bitmap_granularity);
}
await_sqe(4);
data->iov = (struct iovec){ it->buf, (size_t)it->len };
data->callback = simple_callback_w;
my_uring_prep_writev(
sqe, bs->dsk.data_fd, &data->iov, 1, bs->dsk.data_offset + clean_loc + it->offset
sqe, bs->data_fd, &data->iov, 1, bs->data_offset + clean_loc + it->offset
);
wait_count++;
}
// Wait for data writes before fsyncing it
resume_22:
if (wait_count > 0)
{
wait_state = 22;
return false;
}
// Sync data before writing metadata
resume_16:
resume_17:
@@ -521,7 +521,7 @@ resume_1:
return false;
}
resume_5:
// Submit metadata writes, but only when data is written and fsynced
// And metadata writes, but only after data writes complete
if (!bs->inmemory_meta && meta_new.it->second.state == 0 || wait_count > 0)
{
// metadata sector is still being read or data is still being written, wait for it
@@ -536,35 +536,35 @@ resume_1:
return false;
}
// zero out old metadata entry
memset((uint8_t*)meta_old.buf + meta_old.pos*bs->dsk.clean_entry_size, 0, bs->dsk.clean_entry_size);
memset((uint8_t*)meta_old.buf + meta_old.pos*bs->clean_entry_size, 0, bs->clean_entry_size);
await_sqe(15);
data->iov = (struct iovec){ meta_old.buf, bs->dsk.meta_block_size };
data->iov = (struct iovec){ meta_old.buf, bs->meta_block_size };
data->callback = simple_callback_w;
my_uring_prep_writev(
sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bs->dsk.meta_block_size + meta_old.sector
sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset + meta_old.sector
);
wait_count++;
}
if (has_delete)
{
clean_disk_entry *new_entry = (clean_disk_entry*)((uint8_t*)meta_new.buf + meta_new.pos*bs->dsk.clean_entry_size);
clean_disk_entry *new_entry = (clean_disk_entry*)((uint8_t*)meta_new.buf + meta_new.pos*bs->clean_entry_size);
if (new_entry->oid.inode != 0 && new_entry->oid != cur.oid)
{
printf("Fatal error (metadata corruption or bug): tried to delete metadata entry %lu (%lx:%lx v%lu) while deleting %lx:%lx\n",
clean_loc >> bs->dsk.block_order, new_entry->oid.inode, new_entry->oid.stripe,
clean_loc >> bs->block_order, new_entry->oid.inode, new_entry->oid.stripe,
new_entry->version, cur.oid.inode, cur.oid.stripe);
exit(1);
}
// zero out new metadata entry
memset((uint8_t*)meta_new.buf + meta_new.pos*bs->dsk.clean_entry_size, 0, bs->dsk.clean_entry_size);
memset((uint8_t*)meta_new.buf + meta_new.pos*bs->clean_entry_size, 0, bs->clean_entry_size);
}
else
{
clean_disk_entry *new_entry = (clean_disk_entry*)((uint8_t*)meta_new.buf + meta_new.pos*bs->dsk.clean_entry_size);
clean_disk_entry *new_entry = (clean_disk_entry*)((uint8_t*)meta_new.buf + meta_new.pos*bs->clean_entry_size);
if (new_entry->oid.inode != 0 && new_entry->oid != cur.oid)
{
printf("Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %lu (%lx:%lx v%lu) with %lx:%lx v%lu\n",
clean_loc >> bs->dsk.block_order, new_entry->oid.inode, new_entry->oid.stripe, new_entry->version,
clean_loc >> bs->block_order, new_entry->oid.inode, new_entry->oid.stripe, new_entry->version,
cur.oid.inode, cur.oid.stripe, cur.version);
exit(1);
}
@@ -572,20 +572,20 @@ resume_1:
new_entry->version = cur.version;
if (!bs->inmemory_meta)
{
memcpy(&new_entry->bitmap, new_clean_bitmap, bs->dsk.clean_entry_bitmap_size);
memcpy(&new_entry->bitmap, new_clean_bitmap, bs->clean_entry_bitmap_size);
}
// copy latest external bitmap/attributes
if (bs->dsk.clean_entry_bitmap_size)
if (bs->clean_entry_bitmap_size)
{
void *bmp_ptr = bs->dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_end->second.bitmap : &dirty_end->second.bitmap;
memcpy((uint8_t*)(new_entry+1) + bs->dsk.clean_entry_bitmap_size, bmp_ptr, bs->dsk.clean_entry_bitmap_size);
void *bmp_ptr = bs->clean_entry_bitmap_size > sizeof(void*) ? dirty_end->second.bitmap : &dirty_end->second.bitmap;
memcpy((uint8_t*)(new_entry+1) + bs->clean_entry_bitmap_size, bmp_ptr, bs->clean_entry_bitmap_size);
}
}
await_sqe(6);
data->iov = (struct iovec){ meta_new.buf, bs->dsk.meta_block_size };
data->iov = (struct iovec){ meta_new.buf, bs->meta_block_size };
data->callback = simple_callback_w;
my_uring_prep_writev(
sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bs->dsk.meta_block_size + meta_new.sector
sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset + meta_new.sector
);
wait_count++;
resume_7:
@@ -615,12 +615,7 @@ resume_1:
}
for (it = v.begin(); it != v.end(); it++)
{
// Free it if it's not taken from the journal
if (it->buf && (!bs->journal.inmemory || it->buf < bs->journal.buffer ||
it->buf >= (uint8_t*)bs->journal.buffer + bs->journal.len))
{
free(it->buf);
}
free(it->buf);
}
v.clear();
// And sync metadata (in batches - not per each operation!)
@@ -674,9 +669,9 @@ resume_1:
.version = JOURNAL_VERSION,
};
((journal_entry_start*)flusher->journal_superblock)->crc32 = je_crc32((journal_entry*)flusher->journal_superblock);
data->iov = (struct iovec){ flusher->journal_superblock, bs->dsk.journal_block_size };
data->iov = (struct iovec){ flusher->journal_superblock, bs->journal_block_size };
data->callback = simple_callback_w;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
my_uring_prep_writev(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset);
wait_count++;
resume_13:
if (wait_count > 0)
@@ -687,7 +682,7 @@ resume_1:
if (!bs->disable_journal_fsync)
{
await_sqe(20);
my_uring_prep_fsync(sqe, bs->dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, bs->journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = simple_callback_w;
resume_21:
@@ -765,22 +760,21 @@ bool journal_flusher_co::scan_dirty(int wait_base)
{
submit_offset = dirty_it->second.location + offset - dirty_it->second.offset;
submit_len = it == v.end() || it->offset >= end_offset ? end_offset-offset : it->offset-offset;
it = v.insert(it, (copy_buffer_t){ .offset = offset, .len = submit_len });
it = v.insert(it, (copy_buffer_t){ .offset = offset, .len = submit_len, .buf = memalign_or_die(MEM_ALIGNMENT, submit_len) });
copy_count++;
if (bs->journal.inmemory)
{
// Take it from memory, don't copy it
it->buf = (uint8_t*)bs->journal.buffer + submit_offset;
// Take it from memory
memcpy(it->buf, (uint8_t*)bs->journal.buffer + submit_offset, submit_len);
}
else
{
// Read it from disk
it->buf = memalign_or_die(MEM_ALIGNMENT, submit_len);
await_sqe(0);
data->iov = (struct iovec){ it->buf, (size_t)submit_len };
data->callback = simple_callback_r;
my_uring_prep_readv(
sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + submit_offset
sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + submit_offset
);
wait_count++;
}
@@ -831,8 +825,8 @@ bool journal_flusher_co::modify_meta_read(uint64_t meta_loc, flusher_meta_write_
// And yet another option is to use LSM trees for metadata, but it sophisticates everything a lot,
// so I'll avoid it as long as I can.
wr.submitted = false;
wr.sector = ((meta_loc >> bs->dsk.block_order) / (bs->dsk.meta_block_size / bs->dsk.clean_entry_size)) * bs->dsk.meta_block_size;
wr.pos = ((meta_loc >> bs->dsk.block_order) % (bs->dsk.meta_block_size / bs->dsk.clean_entry_size));
wr.sector = ((meta_loc >> bs->block_order) / (bs->meta_block_size / bs->clean_entry_size)) * bs->meta_block_size;
wr.pos = ((meta_loc >> bs->block_order) % (bs->meta_block_size / bs->clean_entry_size));
if (bs->inmemory_meta)
{
wr.buf = (uint8_t*)bs->metadata_buffer + wr.sector;
@@ -842,20 +836,20 @@ bool journal_flusher_co::modify_meta_read(uint64_t meta_loc, flusher_meta_write_
if (wr.it == flusher->meta_sectors.end())
{
// Not in memory yet, read it
wr.buf = memalign_or_die(MEM_ALIGNMENT, bs->dsk.meta_block_size);
wr.buf = memalign_or_die(MEM_ALIGNMENT, bs->meta_block_size);
wr.it = flusher->meta_sectors.emplace(wr.sector, (meta_sector_t){
.offset = wr.sector,
.len = bs->dsk.meta_block_size,
.len = bs->meta_block_size,
.state = 0, // 0 = not read yet
.buf = wr.buf,
.usage_count = 1,
}).first;
await_sqe(0);
data->iov = (struct iovec){ wr.it->second.buf, bs->dsk.meta_block_size };
data->iov = (struct iovec){ wr.it->second.buf, bs->meta_block_size };
data->callback = simple_callback_r;
wr.submitted = true;
my_uring_prep_readv(
sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bs->dsk.meta_block_size + wr.sector
sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset + wr.sector
);
wait_count++;
}
@@ -873,11 +867,11 @@ void journal_flusher_co::update_clean_db()
{
#ifdef BLOCKSTORE_DEBUG
printf("Free block %lu from %lx:%lx v%lu (new location is %lu)\n",
old_clean_loc >> bs->dsk.block_order,
old_clean_loc >> bs->block_order,
cur.oid.inode, cur.oid.stripe, cur.version,
clean_loc >> bs->dsk.block_order);
clean_loc >> bs->block_order);
#endif
bs->data_alloc->set(old_clean_loc >> bs->dsk.block_order, false);
bs->data_alloc->set(old_clean_loc >> bs->block_order, false);
}
auto & clean_db = bs->clean_db_shard(cur.oid);
if (has_delete)
@@ -886,10 +880,10 @@ void journal_flusher_co::update_clean_db()
clean_db.erase(clean_it);
#ifdef BLOCKSTORE_DEBUG
printf("Free block %lu from %lx:%lx v%lu (delete)\n",
clean_loc >> bs->dsk.block_order,
clean_loc >> bs->block_order,
cur.oid.inode, cur.oid.stripe, cur.version);
#endif
bs->data_alloc->set(clean_loc >> bs->dsk.block_order, false);
bs->data_alloc->set(clean_loc >> bs->block_order, false);
clean_loc = UINT64_MAX;
}
else
@@ -938,7 +932,7 @@ bool journal_flusher_co::fsync_batch(bool fsync_meta, int wait_base)
await_sqe(0);
data->iov = { 0 };
data->callback = simple_callback_w;
my_uring_prep_fsync(sqe, fsync_meta ? bs->dsk.meta_fd : bs->dsk.data_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, fsync_meta ? bs->meta_fd : bs->data_fd, IORING_FSYNC_DATASYNC);
cur_sync->state = 1;
wait_count++;
resume_2:

View File

@@ -11,19 +11,25 @@ blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *
ring_consumer.loop = [this]() { loop(); };
ringloop->register_consumer(&ring_consumer);
initialized = 0;
data_fd = meta_fd = journal.fd = -1;
parse_config(config);
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, dsk.data_block_size);
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, data_block_size);
try
{
dsk.open_data();
dsk.open_meta();
dsk.open_journal();
open_data();
open_meta();
open_journal();
calc_lengths();
data_alloc = new allocator(dsk.block_count);
data_alloc = new allocator(block_count);
}
catch (std::exception & e)
{
dsk.close_all();
if (data_fd >= 0)
close(data_fd);
if (meta_fd >= 0 && meta_fd != data_fd)
close(meta_fd);
if (journal.fd >= 0 && journal.fd != meta_fd)
close(journal.fd);
throw;
}
flusher = new journal_flusher_t(this);
@@ -35,7 +41,12 @@ blockstore_impl_t::~blockstore_impl_t()
delete flusher;
free(zero_object);
ringloop->unregister_consumer(&ring_consumer);
dsk.close_all();
if (data_fd >= 0)
close(data_fd);
if (meta_fd >= 0 && meta_fd != data_fd)
close(meta_fd);
if (journal.fd >= 0 && journal.fd != meta_fd)
close(journal.fd);
if (metadata_buffer)
free(metadata_buffer);
if (clean_bitmap)
@@ -107,7 +118,7 @@ void blockstore_impl_t::loop()
// has_writes == 0 - no writes before the current queue item
// has_writes == 1 - some writes in progress
// has_writes == 2 - tried to submit some writes, but failed
int has_writes = 0, op_idx = 0, new_idx = 0;
int has_writes = 0, op_idx = 0, new_idx = 0, done_lists = 0;
for (; op_idx < submit_queue.size(); op_idx++, new_idx++)
{
auto op = submit_queue[op_idx];
@@ -188,12 +199,16 @@ void blockstore_impl_t::loop()
else if (op->opcode == BS_OP_LIST)
{
// LIST doesn't have to be blocked by previous modifications
process_list(op);
wr_st = 2;
// But don't do a lot of LISTs at once, because they're blocking and potentially slow
if (single_tick_list_limit <= 0 || done_lists < single_tick_list_limit)
{
process_list(op);
done_lists++;
wr_st = 2;
}
}
if (wr_st == 2)
{
submit_queue[op_idx] = NULL;
new_idx--;
}
if (wr_st == 0)
@@ -302,6 +317,17 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
// do not submit
#ifdef BLOCKSTORE_DEBUG
printf("Still waiting for a journal buffer\n");
#endif
return;
}
PRIV(op)->wait_for = 0;
}
else if (PRIV(op)->wait_for == WAIT_FREE)
{
if (!data_alloc->get_free_count() && flusher->is_active())
{
#ifdef BLOCKSTORE_DEBUG
printf("Still waiting for free space on the data device\n");
#endif
return;
}
@@ -317,9 +343,9 @@ void blockstore_impl_t::enqueue_op(blockstore_op_t *op)
{
if (op->opcode < BS_OP_MIN || op->opcode > BS_OP_MAX ||
((op->opcode == BS_OP_READ || op->opcode == BS_OP_WRITE || op->opcode == BS_OP_WRITE_STABLE) && (
op->offset >= dsk.data_block_size ||
op->len > dsk.data_block_size-op->offset ||
(op->len % dsk.disk_alignment)
op->offset >= data_block_size ||
op->len > data_block_size-op->offset ||
(op->len % disk_alignment)
)) ||
readonly && op->opcode != BS_OP_READ && op->opcode != BS_OP_LIST)
{
@@ -583,7 +609,7 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
replace_stable(dirty_it->first.oid, 0, clean_stable_count, stable_count, stable);
}
}
else if (IS_STABLE(dirty_it->second.state) || (dirty_it->second.state & BS_ST_INSTANT))
else if (IS_STABLE(dirty_it->second.state))
{
// First try to replace a clean stable version in the first part of the list
if (!replace_stable(dirty_it->first.oid, dirty_it->first.version, 0, clean_stable_count, stable))
@@ -672,16 +698,3 @@ void blockstore_impl_t::dump_diagnostics()
journal.dump_diagnostics();
flusher->dump_diagnostics();
}
void blockstore_impl_t::disk_error_abort(const char *op, int retval, int expected)
{
if (retval == -EAGAIN)
{
fprintf(stderr, "EAGAIN error received from a disk %s during flush."
" It must never happen with io_uring and indicates a kernel bug."
" Please upgrade your kernel. Aborting.\n", op);
exit(1);
}
fprintf(stderr, "Disk %s failed: result is %d, expected %d. Can't continue, sorry :-(\n", op, retval, expected);
exit(1);
}

View File

@@ -4,7 +4,6 @@
#pragma once
#include "blockstore.h"
#include "blockstore_disk.h"
#include <sys/types.h>
#include <sys/ioctl.h>
@@ -18,7 +17,6 @@
#include <list>
#include <deque>
#include <new>
#include <unordered_map>
#include "cpp-btree/btree_map.h"
@@ -160,11 +158,12 @@ struct __attribute__((__packed__)) dirty_entry
#define WAIT_JOURNAL 3
// Suspend operation until the next journal sector buffer is free
#define WAIT_JOURNAL_BUFFER 4
// Suspend operation until there is some free space on the data device
#define WAIT_FREE 5
struct fulfill_read_t
{
uint64_t offset, len;
uint64_t journal_sector; // sector+1 if used and !journal.inmemory, otherwise 0
};
#define PRIV(op) ((blockstore_op_private_t*)(op)->private_data)
@@ -218,10 +217,23 @@ struct pool_shard_settings_t
class blockstore_impl_t
{
blockstore_disk_t dsk;
/******* OPTIONS *******/
std::string data_device, meta_device, journal_device;
uint32_t data_block_size;
uint64_t meta_offset;
uint64_t data_offset;
uint64_t cfg_journal_size, cfg_data_size;
// Required write alignment and journal/metadata/data areas' location alignment
uint32_t disk_alignment = 4096;
// Journal block size - minimum_io_size of the journal device is the best choice
uint64_t journal_block_size = 4096;
// Metadata block size - minimum_io_size of the metadata device is the best choice
uint64_t meta_block_size = 4096;
// Sparse write tracking granularity. 4 KB is a good choice. Must be a multiple of disk_alignment
uint64_t bitmap_granularity = 4096;
bool readonly = false;
// By default, Blockstore locks all opened devices exclusively. This option can be used to disable locking
bool disable_flock = false;
// It is safe to disable fsync() if drive write cache is writethrough
bool disable_data_fsync = false, disable_meta_fsync = false, disable_journal_fsync = false;
// Enable if you want every operation to be executed with an "implicit fsync"
@@ -240,6 +252,8 @@ class blockstore_impl_t
int throttle_target_parallelism = 1;
// Minimum difference in microseconds between target and real execution times to throttle the response
int throttle_threshold_us = 50;
// Maximum number of LIST operations to be processed between
int single_tick_list_limit = 1;
/******* END OF OPTIONS *******/
struct ring_consumer_t ring_consumer;
@@ -254,6 +268,16 @@ class blockstore_impl_t
allocator *data_alloc = NULL;
uint8_t *zero_object;
uint32_t block_order;
uint64_t block_count;
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0;
int meta_fd;
int data_fd;
uint64_t meta_device_size, meta_len;
uint64_t data_device_size, data_len;
uint64_t data_device_sect, meta_device_sect, journal_device_sect;
void *metadata_buffer = NULL;
struct journal_t journal;
@@ -290,7 +314,6 @@ class blockstore_impl_t
// Journaling
void prepare_journal_sector_write(int sector, blockstore_op_t *op);
void handle_journal_write(ring_data_t *data, uint64_t flush_id);
void disk_error_abort(const char *op, int retval, int expected);
// Asynchronous init
int initialized;
@@ -303,7 +326,7 @@ class blockstore_impl_t
// Read
int dequeue_read(blockstore_op_t *read_op);
int fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
uint32_t item_state, uint64_t item_version, uint64_t item_location, uint64_t journal_sector);
uint32_t item_state, uint64_t item_version, uint64_t item_location);
int fulfill_read_push(blockstore_op_t *op, void *buf, uint64_t offset, uint64_t len,
uint32_t item_state, uint64_t item_version);
void handle_read_event(ring_data_t *data, blockstore_op_t *op);
@@ -371,9 +394,9 @@ public:
// Print diagnostics to stdout
void dump_diagnostics();
inline uint32_t get_block_size() { return dsk.data_block_size; }
inline uint64_t get_block_count() { return dsk.block_count; }
inline uint32_t get_block_size() { return data_block_size; }
inline uint64_t get_block_count() { return block_count; }
inline uint64_t get_free_block_count() { return data_alloc->get_free_count(); }
inline uint32_t get_bitmap_granularity() { return dsk.disk_alignment; }
inline uint64_t get_journal_size() { return dsk.journal_len; }
inline uint32_t get_bitmap_granularity() { return disk_alignment; }
inline uint64_t get_journal_size() { return journal.len; }
};

View File

@@ -3,11 +3,6 @@
#include "blockstore_impl.h"
#define INIT_META_EMPTY 0
#define INIT_META_READING 1
#define INIT_META_READ_DONE 2
#define INIT_META_WRITING 3
#define GET_SQE() \
sqe = bs->get_sqe();\
if (!sqe)\
@@ -27,33 +22,32 @@ blockstore_init_meta::blockstore_init_meta(blockstore_impl_t *bs)
this->bs = bs;
}
void blockstore_init_meta::handle_event(ring_data_t *data, int buf_num)
void blockstore_init_meta::handle_event(ring_data_t *data)
{
if (data->res < 0)
{
throw std::runtime_error(
std::string("read metadata failed at offset ") + std::to_string(bufs[buf_num].offset) +
std::string("read metadata failed at offset ") + std::to_string(metadata_read) +
std::string(": ") + strerror(-data->res)
);
}
if (buf_num >= 0)
{
bufs[buf_num].state = (bufs[buf_num].state == INIT_META_READING
? INIT_META_READ_DONE
: INIT_META_EMPTY);
}
submitted--;
bs->ringloop->wakeup();
prev_done = data->res > 0 ? submitted : 0;
done_len = data->res;
done_pos = metadata_read;
metadata_read += data->res;
submitted = 0;
}
int blockstore_init_meta::loop()
{
if (wait_state == 1) goto resume_1;
else if (wait_state == 2) goto resume_2;
else if (wait_state == 3) goto resume_3;
else if (wait_state == 4) goto resume_4;
else if (wait_state == 5) goto resume_5;
else if (wait_state == 6) goto resume_6;
if (wait_state == 1)
goto resume_1;
else if (wait_state == 2)
goto resume_2;
else if (wait_state == 3)
goto resume_3;
else if (wait_state == 4)
goto resume_4;
printf("Reading blockstore metadata\n");
if (bs->inmemory_meta)
metadata_buffer = bs->metadata_buffer;
@@ -63,27 +57,27 @@ int blockstore_init_meta::loop()
throw std::runtime_error("Failed to allocate metadata read buffer");
// Read superblock
GET_SQE();
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
data->iov = { metadata_buffer, bs->meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data); };
my_uring_prep_readv(sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset);
bs->ringloop->submit();
submitted++;
submitted = 1;
resume_1:
if (submitted > 0)
if (submitted)
{
wait_state = 1;
return 1;
}
if (iszero((uint64_t*)metadata_buffer, bs->dsk.meta_block_size / sizeof(uint64_t)))
if (iszero((uint64_t*)metadata_buffer, bs->meta_block_size / sizeof(uint64_t)))
{
{
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)metadata_buffer;
hdr->zero = 0;
hdr->magic = BLOCKSTORE_META_MAGIC_V1;
hdr->version = BLOCKSTORE_META_VERSION_V1;
hdr->meta_block_size = bs->dsk.meta_block_size;
hdr->data_block_size = bs->dsk.data_block_size;
hdr->bitmap_granularity = bs->dsk.bitmap_granularity;
hdr->meta_block_size = bs->meta_block_size;
hdr->data_block_size = bs->data_block_size;
hdr->bitmap_granularity = bs->bitmap_granularity;
}
if (bs->readonly)
{
@@ -93,11 +87,11 @@ resume_1:
{
printf("Initializing metadata area\n");
GET_SQE();
data->iov = (struct iovec){ metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
data->iov = (struct iovec){ metadata_buffer, bs->meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data); };
my_uring_prep_writev(sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset);
bs->ringloop->submit();
submitted++;
submitted = 1;
resume_3:
if (submitted > 0)
{
@@ -121,132 +115,80 @@ resume_1:
);
exit(1);
}
if (hdr->meta_block_size != bs->dsk.meta_block_size ||
hdr->data_block_size != bs->dsk.data_block_size ||
hdr->bitmap_granularity != bs->dsk.bitmap_granularity)
if (hdr->meta_block_size != bs->meta_block_size ||
hdr->data_block_size != bs->data_block_size ||
hdr->bitmap_granularity != bs->bitmap_granularity)
{
printf(
"Configuration stored in metadata superblock"
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u)"
" differs from OSD configuration (%lu/%u/%lu).\n",
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity,
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity
bs->meta_block_size, bs->data_block_size, bs->bitmap_granularity
);
exit(1);
}
}
// Skip superblock
md_offset = bs->dsk.meta_block_size;
next_offset = md_offset;
entries_per_block = bs->dsk.meta_block_size / bs->dsk.clean_entry_size;
bs->meta_offset += bs->meta_block_size;
bs->meta_len -= bs->meta_block_size;
prev_done = 0;
done_len = 0;
done_pos = 0;
metadata_read = 0;
// Read the rest of the metadata
resume_2:
if (next_offset < bs->dsk.meta_len && submitted == 0)
while (1)
{
// Submit one read
for (int i = 0; i < 2; i++)
resume_2:
if (submitted)
{
if (!bufs[i].state)
{
bufs[i].buf = (uint8_t*)metadata_buffer + (bs->inmemory_meta
? next_offset-md_offset
: i*bs->metadata_buf_size);
bufs[i].offset = next_offset;
bufs[i].size = bs->dsk.meta_len-next_offset > bs->metadata_buf_size
? bs->metadata_buf_size : bs->dsk.meta_len-next_offset;
bufs[i].state = INIT_META_READING;
submitted++;
next_offset += bufs[i].size;
GET_SQE();
data->iov = { bufs[i].buf, bufs[i].size };
data->callback = [this, i](ring_data_t *data) { handle_event(data, i); };
if (!zero_on_init)
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
else
{
// Fill metadata with zeroes
memset(data->iov.iov_base, 0, data->iov.iov_len);
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
}
bs->ringloop->submit();
break;
}
wait_state = 2;
return 1;
}
}
for (int i = 0; i < 2; i++)
{
if (bufs[i].state == INIT_META_READ_DONE)
if (metadata_read < bs->meta_len)
{
// Handle result
bool changed = false;
for (uint64_t sector = 0; sector < bufs[i].size; sector += bs->dsk.meta_block_size)
{
// handle <count> entries
if (handle_meta_block(bufs[i].buf + sector, entries_per_block,
((bufs[i].offset + sector - md_offset) / bs->dsk.meta_block_size) * entries_per_block))
changed = true;
}
if (changed && !bs->inmemory_meta && !bs->readonly)
{
// write the modified buffer back
GET_SQE();
data->iov = { bufs[i].buf, bufs[i].size };
data->callback = [this, i](ring_data_t *data) { handle_event(data, i); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
bufs[i].state = INIT_META_WRITING;
submitted++;
}
GET_SQE();
data->iov = {
(uint8_t*)metadata_buffer + (bs->inmemory_meta
? metadata_read
: (prev == 1 ? bs->metadata_buf_size : 0)),
bs->meta_len - metadata_read > bs->metadata_buf_size ? bs->metadata_buf_size : bs->meta_len - metadata_read,
};
data->callback = [this](ring_data_t *data) { handle_event(data); };
if (!zero_on_init)
my_uring_prep_readv(sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset + metadata_read);
else
{
bufs[i].state = 0;
// Fill metadata with zeroes
memset(data->iov.iov_base, 0, data->iov.iov_len);
my_uring_prep_writev(sqe, bs->meta_fd, &data->iov, 1, bs->meta_offset + metadata_read);
}
bs->ringloop->wakeup();
bs->ringloop->submit();
submitted = (prev == 1 ? 2 : 1);
prev = submitted;
}
}
if (submitted > 0)
{
wait_state = 2;
return 1;
}
if (entries_to_zero.size() && !bs->inmemory_meta && !bs->readonly)
{
// we have to zero out additional entries
for (i = 0; i < entries_to_zero.size(); )
if (prev_done)
{
next_offset = entries_to_zero[i]/entries_per_block;
for (j = i; j < entries_to_zero.size() && entries_to_zero[j]/entries_per_block == next_offset; j++) {}
GET_SQE();
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
submitted++;
resume_5:
if (submitted > 0)
void *done_buf = bs->inmemory_meta
? ((uint8_t*)metadata_buffer + done_pos)
: ((uint8_t*)metadata_buffer + (prev_done == 2 ? bs->metadata_buf_size : 0));
unsigned count = bs->meta_block_size / bs->clean_entry_size;
for (int sector = 0; sector < done_len; sector += bs->meta_block_size)
{
wait_state = 5;
return 1;
}
for (; i < j; i++)
{
uint64_t pos = (entries_to_zero[i] % entries_per_block);
memset((uint8_t*)metadata_buffer + pos*bs->dsk.clean_entry_size, 0, bs->dsk.clean_entry_size);
}
GET_SQE();
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
submitted++;
resume_6:
if (submitted > 0)
{
wait_state = 6;
return 1;
// handle <count> entries
handle_entries((uint8_t*)done_buf + sector, count, bs->block_order);
done_cnt += count;
}
prev_done = 0;
done_len = 0;
}
if (!submitted)
{
break;
}
entries_to_zero.clear();
}
// metadata read finished
printf("Metadata entries loaded: %lu, free blocks: %lu / %lu\n", entries_loaded, bs->data_alloc->get_free_count(), bs->dsk.block_count);
printf("Metadata entries loaded: %lu, free blocks: %lu / %lu\n", entries_loaded, bs->data_alloc->get_free_count(), bs->block_count);
if (!bs->inmemory_meta)
{
free(metadata_buffer);
@@ -255,10 +197,10 @@ resume_6:
if (zero_on_init && !bs->disable_meta_fsync)
{
GET_SQE();
my_uring_prep_fsync(sqe, bs->dsk.meta_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, bs->meta_fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
submitted++;
data->callback = [this](ring_data_t *data) { handle_event(data); };
submitted = 1;
bs->ringloop->submit();
resume_4:
if (submitted > 0)
@@ -270,18 +212,14 @@ resume_6:
return 0;
}
bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_block, uint64_t done_cnt)
void blockstore_init_meta::handle_entries(void* entries, unsigned count, int block_order)
{
bool updated = false;
uint64_t max_i = entries_per_block;
if (max_i > bs->dsk.block_count-done_cnt)
max_i = bs->dsk.block_count-done_cnt;
for (uint64_t i = 0; i < max_i; i++)
for (unsigned i = 0; i < count; i++)
{
clean_disk_entry *entry = (clean_disk_entry*)(buf + i*bs->dsk.clean_entry_size);
if (!bs->inmemory_meta && bs->dsk.clean_entry_bitmap_size)
clean_disk_entry *entry = (clean_disk_entry*)((uint8_t*)entries + i*bs->clean_entry_size);
if (!bs->inmemory_meta && bs->clean_entry_bitmap_size)
{
memcpy(bs->clean_bitmap + (done_cnt+i)*2*bs->dsk.clean_entry_bitmap_size, &entry->bitmap, 2*bs->dsk.clean_entry_bitmap_size);
memcpy(bs->clean_bitmap + (done_cnt+i)*2*bs->clean_entry_bitmap_size, &entry->bitmap, 2*bs->clean_entry_bitmap_size);
}
if (entry->oid.inode > 0)
{
@@ -292,39 +230,17 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
if (clean_it != clean_db.end())
{
// free the previous block
// here we have to zero out the previous entry because otherwise we'll hit
// "tried to overwrite non-zero metadata entry" later
uint64_t old_clean_loc = clean_it->second.location >> bs->dsk.block_order;
if (bs->inmemory_meta)
{
uint64_t sector = (old_clean_loc / entries_per_block) * bs->dsk.meta_block_size;
uint64_t pos = (old_clean_loc % entries_per_block);
clean_disk_entry *old_entry = (clean_disk_entry*)((uint8_t*)bs->metadata_buffer + sector + pos*bs->dsk.clean_entry_size);
memset(old_entry, 0, bs->dsk.clean_entry_size);
}
else if (old_clean_loc >= done_cnt)
{
updated = true;
uint64_t sector = ((old_clean_loc - done_cnt) / entries_per_block) * bs->dsk.meta_block_size;
uint64_t pos = (old_clean_loc % entries_per_block);
clean_disk_entry *old_entry = (clean_disk_entry*)(buf + sector + pos*bs->dsk.clean_entry_size);
memset(old_entry, 0, bs->dsk.clean_entry_size);
}
else
{
entries_to_zero.push_back(clean_it->second.location >> bs->dsk.block_order);
}
#ifdef BLOCKSTORE_DEBUG
printf("Free block %lu from %lx:%lx v%lu (new location is %lu)\n",
old_clean_loc,
clean_it->second.location >> block_order,
clean_it->first.inode, clean_it->first.stripe, clean_it->second.version,
done_cnt+i);
#endif
bs->data_alloc->set(old_clean_loc, false);
bs->data_alloc->set(clean_it->second.location >> block_order, false);
}
else
{
bs->inode_space_stats[entry->oid.inode] += bs->dsk.data_block_size;
bs->inode_space_stats[entry->oid.inode] += bs->data_block_size;
}
entries_loaded++;
#ifdef BLOCKSTORE_DEBUG
@@ -333,21 +249,17 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
bs->data_alloc->set(done_cnt+i, true);
clean_db[entry->oid] = (struct clean_entry){
.version = entry->version,
.location = (done_cnt+i) << bs->dsk.block_order,
.location = (done_cnt+i) << block_order,
};
}
else
{
// here we also have to zero out the entry
updated = true;
memset(entry, 0, bs->dsk.clean_entry_size);
#ifdef BLOCKSTORE_DEBUG
printf("Old clean entry %lu: %lx:%lx v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
#endif
}
}
}
return updated;
}
blockstore_init_journal::blockstore_init_journal(blockstore_impl_t *bs)
@@ -416,7 +328,7 @@ int blockstore_init_journal::loop()
data = ((ring_data_t*)sqe->user_data);
data->iov = { submitted_buf, bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
my_uring_prep_readv(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset);
bs->ringloop->submit();
wait_count = 1;
resume_1:
@@ -455,7 +367,7 @@ resume_1:
GET_SQE();
data->iov = (struct iovec){ submitted_buf, 2*bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
my_uring_prep_writev(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset);
wait_count++;
bs->ringloop->submit();
resume_6:
@@ -467,7 +379,7 @@ resume_1:
if (!bs->disable_journal_fsync)
{
GET_SQE();
my_uring_prep_fsync(sqe, bs->dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, bs->journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = simple_callback;
wait_count++;
@@ -536,7 +448,7 @@ resume_1:
end - journal_pos < JOURNAL_BUFFER_SIZE ? end - journal_pos : JOURNAL_BUFFER_SIZE,
};
data->callback = [this](ring_data_t *data1) { handle_event(data1); };
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + journal_pos);
my_uring_prep_readv(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + journal_pos);
bs->ringloop->submit();
}
while (done.size() > 0)
@@ -551,7 +463,7 @@ resume_1:
GET_SQE();
data->iov = { init_write_buf, bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + init_write_sector);
my_uring_prep_writev(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + init_write_sector);
wait_count++;
bs->ringloop->submit();
resume_7:
@@ -565,7 +477,7 @@ resume_1:
GET_SQE();
data->iov = { 0 };
data->callback = simple_callback;
my_uring_prep_fsync(sqe, bs->dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, bs->journal.fd, IORING_FSYNC_DATASYNC);
wait_count++;
bs->ringloop->submit();
}
@@ -632,7 +544,7 @@ resume_1:
? bs->journal.len-bs->journal.block_size - (bs->journal.next_free-bs->journal.used_start)
: bs->journal.used_start - bs->journal.next_free),
bs->journal.used_start, bs->journal.next_free,
bs->data_alloc->get_free_count(), bs->dsk.block_count
bs->data_alloc->get_free_count(), bs->block_count
);
bs->journal.crc32_last = crc32_last;
return 0;
@@ -757,9 +669,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
};
void *bmp = NULL;
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_small_write);
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
if (bs->clean_entry_bitmap_size <= sizeof(void*))
{
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
memcpy(&bmp, bmp_from, bs->clean_entry_bitmap_size);
}
else
{
@@ -767,8 +679,8 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
// allocations for entry bitmaps. This can only be fixed by using
// a patched map with dynamic entry size, but not the btree_map,
// because it doesn't keep iterators valid all the time.
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
bmp = malloc_or_die(bs->clean_entry_bitmap_size);
memcpy(bmp, bmp_from, bs->clean_entry_bitmap_size);
}
bs->dirty_db.emplace(ov, (dirty_entry){
.state = (BS_ST_SMALL_WRITE | BS_ST_SYNCED),
@@ -800,7 +712,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
printf(
"je_big_write%s oid=%lx:%lx ver=%lu loc=%lu\n",
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location >> bs->dsk.block_order
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location >> bs->block_order
);
#endif
auto dirty_it = bs->dirty_db.upper_bound((obj_ver_id){
@@ -838,9 +750,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
};
void *bmp = NULL;
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_big_write);
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
if (bs->clean_entry_bitmap_size <= sizeof(void*))
{
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
memcpy(&bmp, bmp_from, bs->clean_entry_bitmap_size);
}
else
{
@@ -848,8 +760,8 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
// allocations for entry bitmaps. This can only be fixed by using
// a patched map with dynamic entry size, but not the btree_map,
// because it doesn't keep iterators valid all the time.
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
bmp = malloc_or_die(bs->clean_entry_bitmap_size);
memcpy(bmp, bmp_from, bs->clean_entry_bitmap_size);
}
auto dirty_it = bs->dirty_db.emplace(ov, (dirty_entry){
.state = (BS_ST_BIG_WRITE | BS_ST_SYNCED),
@@ -860,7 +772,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
.journal_sector = proc_pos,
.bitmap = bmp,
}).first;
if (bs->data_alloc->get(je->big_write.location >> bs->dsk.block_order))
if (bs->data_alloc->get(je->big_write.location >> bs->block_order))
{
// This is probably a big_write that's already flushed and freed, but it may
// also indicate a bug. So we remember such entries and recheck them afterwards.
@@ -873,11 +785,11 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
#ifdef BLOCKSTORE_DEBUG
printf(
"Allocate block (journal) %lu: %lx:%lx v%lu\n",
je->big_write.location >> bs->dsk.block_order,
je->big_write.location >> bs->block_order,
ov.oid.inode, ov.oid.stripe, ov.version
);
#endif
bs->data_alloc->set(je->big_write.location >> bs->dsk.block_order, true);
bs->data_alloc->set(je->big_write.location >> bs->block_order, true);
}
bs->journal.used_sectors[proc_pos]++;
#ifdef BLOCKSTORE_DEBUG
@@ -1001,8 +913,8 @@ void blockstore_init_journal::erase_dirty_object(blockstore_dirty_db_t::iterator
if (exists && clean_loc == UINT64_MAX)
{
auto & sp = bs->inode_space_stats[oid.inode];
if (sp > bs->dsk.data_block_size)
sp -= bs->dsk.data_block_size;
if (sp > bs->data_block_size)
sp -= bs->data_block_size;
else
bs->inode_space_stats.erase(oid.inode);
}

View File

@@ -3,32 +3,20 @@
#pragma once
struct blockstore_init_meta_buf
{
uint8_t *buf = NULL;
uint64_t size = 0;
uint64_t offset = 0;
int state = 0;
};
class blockstore_init_meta
{
blockstore_impl_t *bs;
int wait_state = 0;
bool zero_on_init = false;
void *metadata_buffer = NULL;
blockstore_init_meta_buf bufs[2] = {};
int submitted = 0;
uint64_t metadata_read = 0;
int prev = 0, prev_done = 0, done_len = 0, submitted = 0;
uint64_t done_cnt = 0, done_pos = 0;
uint64_t entries_loaded = 0;
struct io_uring_sqe *sqe;
struct ring_data_t *data;
uint64_t md_offset = 0;
uint64_t next_offset = 0;
uint64_t entries_loaded = 0;
unsigned entries_per_block = 0;
int i = 0, j = 0;
std::vector<uint64_t> entries_to_zero;
bool handle_meta_block(uint8_t *buf, uint64_t count, uint64_t done_cnt);
void handle_event(ring_data_t *data, int buf_num);
void handle_entries(void *entries, unsigned count, int block_order);
void handle_event(ring_data_t *data);
public:
blockstore_init_meta(blockstore_impl_t *bs);
int loop();

View File

@@ -175,7 +175,7 @@ void blockstore_impl_t::prepare_journal_sector_write(int cur_sector, blockstore_
};
data->callback = [this, flush_id = journal.submit_id](ring_data_t *data) { handle_journal_write(data, flush_id); };
my_uring_prep_writev(
sqe, dsk.journal_fd, &data->iov, 1, journal.offset + journal.sector_info[cur_sector].offset
sqe, journal.fd, &data->iov, 1, journal.offset + journal.sector_info[cur_sector].offset
);
}
journal.sector_info[cur_sector].dirty = false;
@@ -198,7 +198,10 @@ void blockstore_impl_t::handle_journal_write(ring_data_t *data, uint64_t flush_i
if (data->res != data->iov.iov_len)
{
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
disk_error_abort("journal write", data->res, data->iov.iov_len);
throw std::runtime_error(
"journal write failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
);
}
auto fl_it = journal.flushing_ops.upper_bound((pending_journaling_t){ .flush_id = flush_id });
if (fl_it != journal.flushing_ops.end() && fl_it->flush_id == flush_id)

View File

@@ -10,13 +10,11 @@
#define JOURNAL_MAGIC 0x4A33
#define JOURNAL_VERSION 1
#define JOURNAL_BUFFER_SIZE 4*1024*1024
#define JOURNAL_ENTRY_HEADER_SIZE 16
// We reserve some extra space for future stabilize requests during writes
// FIXME: This value should be dynamic i.e. Blockstore ideally shouldn't allow
// writing more than can be stabilized afterwards
#define JOURNAL_STABILIZE_RESERVATION 65536
#define JOURNAL_INSTANT_RESERVATION 131072
// Journal entries
// Journal entries are linked to each other by their crc32 value
@@ -166,6 +164,7 @@ inline bool operator < (const pending_journaling_t & a, const pending_journaling
struct journal_t
{
int fd;
uint64_t device_size;
bool inmemory = false;
bool flush_journal = false;
void *buffer = NULL;

View File

@@ -4,10 +4,23 @@
#include <sys/file.h>
#include "blockstore_impl.h"
static uint32_t is_power_of_two(uint64_t value)
{
uint32_t l = 0;
while (value > 1)
{
if (value & 1)
{
return 64;
}
value = value >> 1;
l++;
}
return l;
}
void blockstore_impl_t::parse_config(blockstore_config_t & config)
{
// Common disk options
dsk.parse_config(config);
// Parse
if (config["readonly"] == "true" || config["readonly"] == "1" || config["readonly"] == "yes")
{
@@ -25,6 +38,10 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
{
disable_journal_fsync = true;
}
if (config["disable_device_lock"] == "true" || config["disable_device_lock"] == "1" || config["disable_device_lock"] == "yes")
{
disable_flock = true;
}
if (config["flush_journal"] == "true" || config["flush_journal"] == "1" || config["flush_journal"] == "yes")
{
// Only flush journal and exit
@@ -39,11 +56,24 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
immediate_commit = IMMEDIATE_SMALL;
}
metadata_buf_size = strtoull(config["meta_buf_size"].c_str(), NULL, 10);
cfg_journal_size = strtoull(config["journal_size"].c_str(), NULL, 10);
data_device = config["data_device"];
data_offset = strtoull(config["data_offset"].c_str(), NULL, 10);
cfg_data_size = strtoull(config["data_size"].c_str(), NULL, 10);
meta_device = config["meta_device"];
meta_offset = strtoull(config["meta_offset"].c_str(), NULL, 10);
data_block_size = strtoull(config["block_size"].c_str(), NULL, 10);
inmemory_meta = config["inmemory_metadata"] != "false";
journal_device = config["journal_device"];
journal.offset = strtoull(config["journal_offset"].c_str(), NULL, 10);
journal.sector_count = strtoull(config["journal_sector_buffer_count"].c_str(), NULL, 10);
journal.no_same_sector_overwrites = config["journal_no_same_sector_overwrites"] == "true" ||
config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes";
journal.inmemory = config["inmemory_journal"] != "false";
disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10);
journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10);
meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10);
bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
max_flusher_count = strtoull(config["max_flusher_count"].c_str(), NULL, 10);
if (!max_flusher_count)
max_flusher_count = strtoull(config["flusher_count"].c_str(), NULL, 10);
@@ -55,6 +85,14 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
throttle_target_parallelism = strtoull(config["throttle_target_parallelism"].c_str(), NULL, 10);
throttle_threshold_us = strtoull(config["throttle_threshold_us"].c_str(), NULL, 10);
// Validate
if (!data_block_size)
{
data_block_size = (1 << DEFAULT_DATA_BLOCK_ORDER);
}
if ((block_order = is_power_of_two(data_block_size)) >= 64 || data_block_size < MIN_DATA_BLOCK_SIZE || data_block_size >= MAX_DATA_BLOCK_SIZE)
{
throw std::runtime_error("Bad block size");
}
if (!max_flusher_count)
{
max_flusher_count = 256;
@@ -67,6 +105,62 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
{
max_write_iodepth = 128;
}
if (!disk_alignment)
{
disk_alignment = 4096;
}
else if (disk_alignment % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("disk_alignment must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (!journal_block_size)
{
journal_block_size = 4096;
}
else if (journal_block_size % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("journal_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (!meta_block_size)
{
meta_block_size = 4096;
}
else if (meta_block_size % DIRECT_IO_ALIGNMENT)
{
throw std::runtime_error("meta_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
if (data_offset % disk_alignment)
{
throw std::runtime_error("data_offset must be a multiple of disk_alignment = "+std::to_string(disk_alignment));
}
if (!bitmap_granularity)
{
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
}
else if (bitmap_granularity % disk_alignment)
{
throw std::runtime_error("Sparse write tracking granularity must be a multiple of disk_alignment = "+std::to_string(disk_alignment));
}
if (data_block_size % bitmap_granularity)
{
throw std::runtime_error("Block size must be a multiple of sparse write tracking granularity");
}
if (journal_device == meta_device || meta_device == "" && journal_device == data_device)
{
journal_device = "";
}
if (meta_device == data_device)
{
meta_device = "";
}
if (meta_offset % meta_block_size)
{
throw std::runtime_error("meta_offset must be a multiple of meta_block_size = "+std::to_string(meta_block_size));
}
if (journal.offset % journal_block_size)
{
throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size));
}
if (journal.sector_count < 2)
{
journal.sector_count = 32;
@@ -75,11 +169,11 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
{
metadata_buf_size = 4*1024*1024;
}
if (dsk.meta_device == dsk.data_device)
if (meta_device == "")
{
disable_meta_fsync = disable_data_fsync;
}
if (dsk.journal_device == dsk.meta_device)
if (journal_device == "")
{
disable_journal_fsync = disable_meta_fsync;
}
@@ -108,46 +202,238 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
throttle_threshold_us = 50;
}
// init some fields
journal.block_size = dsk.journal_block_size;
journal.next_free = dsk.journal_block_size;
journal.used_start = dsk.journal_block_size;
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
clean_entry_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
journal.block_size = journal_block_size;
journal.next_free = journal_block_size;
journal.used_start = journal_block_size;
// no free space because sector is initially unmapped
journal.in_sector_pos = dsk.journal_block_size;
journal.in_sector_pos = journal_block_size;
}
void blockstore_impl_t::calc_lengths()
{
dsk.calc_lengths();
journal.len = dsk.journal_len;
journal.block_size = dsk.journal_block_size;
journal.offset = dsk.journal_offset;
// data
data_len = data_device_size - data_offset;
if (data_fd == meta_fd && data_offset < meta_offset)
{
data_len = meta_offset - data_offset;
}
if (data_fd == journal.fd && data_offset < journal.offset)
{
data_len = data_len < journal.offset-data_offset
? data_len : journal.offset-data_offset;
}
if (cfg_data_size != 0)
{
if (data_len < cfg_data_size)
{
throw std::runtime_error("Data area ("+std::to_string(data_len)+
" bytes) is less than configured size ("+std::to_string(cfg_data_size)+" bytes)");
}
data_len = cfg_data_size;
}
// meta
uint64_t meta_area_size = (meta_fd == data_fd ? data_device_size : meta_device_size) - meta_offset;
if (meta_fd == data_fd && meta_offset <= data_offset)
{
meta_area_size = data_offset - meta_offset;
}
if (meta_fd == journal.fd && meta_offset <= journal.offset)
{
meta_area_size = meta_area_size < journal.offset-meta_offset
? meta_area_size : journal.offset-meta_offset;
}
// journal
journal.len = (journal.fd == data_fd ? data_device_size : (journal.fd == meta_fd ? meta_device_size : journal.device_size)) - journal.offset;
if (journal.fd == data_fd && journal.offset <= data_offset)
{
journal.len = data_offset - journal.offset;
}
if (journal.fd == meta_fd && journal.offset <= meta_offset)
{
journal.len = journal.len < meta_offset-journal.offset
? journal.len : meta_offset-journal.offset;
}
// required metadata size
block_count = data_len / data_block_size;
meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size;
if (meta_area_size < meta_len)
{
throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes");
}
if (inmemory_meta)
{
metadata_buffer = memalign(MEM_ALIGNMENT, dsk.meta_len);
metadata_buffer = memalign(MEM_ALIGNMENT, meta_len);
if (!metadata_buffer)
throw std::runtime_error("Failed to allocate memory for the metadata");
}
else if (dsk.clean_entry_bitmap_size)
else if (clean_entry_bitmap_size)
{
clean_bitmap = (uint8_t*)malloc(dsk.block_count * 2*dsk.clean_entry_bitmap_size);
clean_bitmap = (uint8_t*)malloc(block_count * 2*clean_entry_bitmap_size);
if (!clean_bitmap)
throw std::runtime_error("Failed to allocate memory for the metadata sparse write bitmap");
}
// requested journal size
if (cfg_journal_size > journal.len)
{
throw std::runtime_error("Requested journal_size is too large");
}
else if (cfg_journal_size > 0)
{
journal.len = cfg_journal_size;
}
if (journal.len < MIN_JOURNAL_SIZE)
{
throw std::runtime_error("Journal is too small, need at least "+std::to_string(MIN_JOURNAL_SIZE)+" bytes");
}
if (journal.inmemory)
{
journal.buffer = memalign(MEM_ALIGNMENT, journal.len);
if (!journal.buffer)
throw std::runtime_error("Failed to allocate memory for journal");
}
}
static void check_size(int fd, uint64_t *size, uint64_t *sectsize, std::string name)
{
int sect;
struct stat st;
if (fstat(fd, &st) < 0)
{
throw std::runtime_error("Failed to stat "+name);
}
if (S_ISREG(st.st_mode))
{
*size = st.st_size;
if (sectsize)
{
*sectsize = st.st_blksize;
}
}
else if (S_ISBLK(st.st_mode))
{
if (ioctl(fd, BLKGETSIZE64, size) < 0 ||
ioctl(fd, BLKSSZGET, &sect) < 0)
{
throw std::runtime_error("Failed to get "+name+" size or block size: "+strerror(errno));
}
if (sectsize)
{
*sectsize = sect;
}
}
else
{
journal.sector_buf = (uint8_t*)memalign(MEM_ALIGNMENT, journal.sector_count * dsk.journal_block_size);
if (!journal.sector_buf)
throw std::bad_alloc();
throw std::runtime_error(name+" is neither a file nor a block device");
}
}
void blockstore_impl_t::open_data()
{
data_fd = open(data_device.c_str(), O_DIRECT|O_RDWR);
if (data_fd == -1)
{
throw std::runtime_error("Failed to open data device");
}
check_size(data_fd, &data_device_size, &data_device_sect, "data device");
if (disk_alignment % data_device_sect)
{
throw std::runtime_error(
"disk_alignment ("+std::to_string(disk_alignment)+
") is not a multiple of data device sector size ("+std::to_string(data_device_sect)+")"
);
}
if (data_offset >= data_device_size)
{
throw std::runtime_error("data_offset exceeds device size = "+std::to_string(data_device_size));
}
if (!disable_flock && flock(data_fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock data device: ") + strerror(errno));
}
}
void blockstore_impl_t::open_meta()
{
if (meta_device != "")
{
meta_offset = 0;
meta_fd = open(meta_device.c_str(), O_DIRECT|O_RDWR);
if (meta_fd == -1)
{
throw std::runtime_error("Failed to open metadata device");
}
check_size(meta_fd, &meta_device_size, &meta_device_sect, "metadata device");
if (meta_offset >= meta_device_size)
{
throw std::runtime_error("meta_offset exceeds device size = "+std::to_string(meta_device_size));
}
if (!disable_flock && flock(meta_fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock metadata device: ") + strerror(errno));
}
}
else
{
meta_fd = data_fd;
meta_device_sect = data_device_sect;
meta_device_size = 0;
if (meta_offset >= data_device_size)
{
throw std::runtime_error("meta_offset exceeds device size = "+std::to_string(data_device_size));
}
}
if (meta_block_size % meta_device_sect)
{
throw std::runtime_error(
"meta_block_size ("+std::to_string(meta_block_size)+
") is not a multiple of data device sector size ("+std::to_string(meta_device_sect)+")"
);
}
}
void blockstore_impl_t::open_journal()
{
if (journal_device != "")
{
journal.fd = open(journal_device.c_str(), O_DIRECT|O_RDWR);
if (journal.fd == -1)
{
throw std::runtime_error("Failed to open journal device");
}
check_size(journal.fd, &journal.device_size, &journal_device_sect, "journal device");
if (!disable_flock && flock(journal.fd, LOCK_EX|LOCK_NB) != 0)
{
throw std::runtime_error(std::string("Failed to lock journal device: ") + strerror(errno));
}
}
else
{
journal.fd = meta_fd;
journal_device_sect = meta_device_sect;
journal.device_size = 0;
if (journal.offset >= data_device_size)
{
throw std::runtime_error("journal_offset exceeds device size");
}
}
journal.sector_info = (journal_sector_info_t*)calloc(journal.sector_count, sizeof(journal_sector_info_t));
if (!journal.sector_info)
{
throw std::bad_alloc();
}
if (!journal.inmemory)
{
journal.sector_buf = (uint8_t*)memalign(MEM_ALIGNMENT, journal.sector_count * journal_block_size);
if (!journal.sector_buf)
throw std::bad_alloc();
}
if (journal_block_size % journal_device_sect)
{
throw std::runtime_error(
"journal_block_size ("+std::to_string(journal_block_size)+
") is not a multiple of journal device sector size ("+std::to_string(journal_device_sect)+")"
);
}
}

View File

@@ -32,9 +32,9 @@ int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_
PRIV(op)->pending_ops++;
my_uring_prep_readv(
sqe,
IS_JOURNAL(item_state) ? dsk.journal_fd : dsk.data_fd,
IS_JOURNAL(item_state) ? journal.fd : data_fd,
&data->iov, 1,
(IS_JOURNAL(item_state) ? dsk.journal_offset : dsk.data_offset) + offset
(IS_JOURNAL(item_state) ? journal.offset : data_offset) + offset
);
data->callback = [this, op](ring_data_t *data) { handle_read_event(data, op); };
return 1;
@@ -42,7 +42,7 @@ int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_
// FIXME I've seen a bug here so I want some tests
int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
uint32_t item_state, uint64_t item_version, uint64_t item_location, uint64_t journal_sector)
uint32_t item_state, uint64_t item_version, uint64_t item_location)
{
uint32_t cur_start = item_start;
if (cur_start < read_op->offset + read_op->len && item_end > read_op->offset)
@@ -72,7 +72,6 @@ int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfille
fulfill_read_t el = {
.offset = cur_start,
.len = it == PRIV(read_op)->read_vec.end() || it->offset >= item_end ? item_end-cur_start : it->offset-cur_start,
.journal_sector = journal_sector,
};
it = PRIV(read_op)->read_vec.insert(it, el);
if (!fulfill_read_push(read_op,
@@ -98,15 +97,15 @@ endwhile:
uint8_t* blockstore_impl_t::get_clean_entry_bitmap(uint64_t block_loc, int offset)
{
uint8_t *clean_entry_bitmap;
uint64_t meta_loc = block_loc >> dsk.block_order;
uint64_t meta_loc = block_loc >> block_order;
if (inmemory_meta)
{
uint64_t sector = (meta_loc / (dsk.meta_block_size / dsk.clean_entry_size)) * dsk.meta_block_size;
uint64_t pos = (meta_loc % (dsk.meta_block_size / dsk.clean_entry_size));
clean_entry_bitmap = ((uint8_t*)metadata_buffer + sector + pos*dsk.clean_entry_size + sizeof(clean_disk_entry) + offset);
uint64_t sector = (meta_loc / (meta_block_size / clean_entry_size)) * meta_block_size;
uint64_t pos = (meta_loc % (meta_block_size / clean_entry_size));
clean_entry_bitmap = ((uint8_t*)metadata_buffer + sector + pos*clean_entry_size + sizeof(clean_disk_entry) + offset);
}
else
clean_entry_bitmap = (uint8_t*)(clean_bitmap + meta_loc*2*dsk.clean_entry_bitmap_size + offset);
clean_entry_bitmap = (uint8_t*)(clean_bitmap + meta_loc*2*clean_entry_bitmap_size + offset);
return clean_entry_bitmap;
}
@@ -139,7 +138,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
while (dirty_it->first.oid == read_op->oid)
{
dirty_entry& dirty = dirty_it->second;
bool version_ok = !IS_IN_FLIGHT(dirty.state) && read_op->version >= dirty_it->first.version;
bool version_ok = read_op->version >= dirty_it->first.version;
if (IS_SYNCED(dirty.state))
{
if (!version_ok && read_op->version != 0)
@@ -153,14 +152,12 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
result_version = dirty_it->first.version;
if (read_op->bitmap)
{
void *bmp_ptr = (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap);
memcpy(read_op->bitmap, bmp_ptr, dsk.clean_entry_bitmap_size);
void *bmp_ptr = (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap);
memcpy(read_op->bitmap, bmp_ptr, clean_entry_bitmap_size);
}
}
// If inmemory_journal is false, journal trim will have to wait until the read is completed
if (!fulfill_read(read_op, fulfilled, dirty.offset, dirty.offset + dirty.len,
dirty.state, dirty_it->first.version, dirty.location + (IS_JOURNAL(dirty.state) ? 0 : dirty.offset),
(IS_JOURNAL(dirty.state) ? dirty.journal_sector+1 : 0)))
dirty.state, dirty_it->first.version, dirty.location + (IS_JOURNAL(dirty.state) ? 0 : dirty.offset)))
{
// need to wait. undo added requests, don't dequeue op
PRIV(read_op)->read_vec.clear();
@@ -174,23 +171,22 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
dirty_it--;
}
}
if (clean_found)
if (clean_it != clean_db.end())
{
if (!result_version)
{
result_version = clean_it->second.version;
if (read_op->bitmap)
{
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, dsk.clean_entry_bitmap_size);
memcpy(read_op->bitmap, bmp_ptr, dsk.clean_entry_bitmap_size);
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size);
memcpy(read_op->bitmap, bmp_ptr, clean_entry_bitmap_size);
}
}
if (fulfilled < read_op->len)
{
if (!dsk.clean_entry_bitmap_size)
if (!clean_entry_bitmap_size)
{
if (!fulfill_read(read_op, fulfilled, 0, dsk.data_block_size,
(BS_ST_BIG_WRITE | BS_ST_STABLE), 0, clean_it->second.location, 0))
if (!fulfill_read(read_op, fulfilled, 0, data_block_size, (BS_ST_BIG_WRITE | BS_ST_STABLE), 0, clean_it->second.location))
{
// need to wait. undo added requests, don't dequeue op
PRIV(read_op)->read_vec.clear();
@@ -200,7 +196,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
else
{
uint8_t *clean_entry_bitmap = get_clean_entry_bitmap(clean_it->second.location, 0);
uint64_t bmp_start = 0, bmp_end = 0, bmp_size = dsk.data_block_size/dsk.bitmap_granularity;
uint64_t bmp_start = 0, bmp_end = 0, bmp_size = data_block_size/bitmap_granularity;
while (bmp_start < bmp_size)
{
while (!(clean_entry_bitmap[bmp_end >> 3] & (1 << (bmp_end & 0x7))) && bmp_end < bmp_size)
@@ -210,8 +206,8 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
if (bmp_end > bmp_start)
{
// fill with zeroes
assert(fulfill_read(read_op, fulfilled, bmp_start * dsk.bitmap_granularity,
bmp_end * dsk.bitmap_granularity, (BS_ST_DELETE | BS_ST_STABLE), 0, 0, 0));
assert(fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity,
bmp_end * bitmap_granularity, (BS_ST_DELETE | BS_ST_STABLE), 0, 0));
}
bmp_start = bmp_end;
while (clean_entry_bitmap[bmp_end >> 3] & (1 << (bmp_end & 0x7)) && bmp_end < bmp_size)
@@ -220,9 +216,9 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
}
if (bmp_end > bmp_start)
{
if (!fulfill_read(read_op, fulfilled, bmp_start * dsk.bitmap_granularity,
bmp_end * dsk.bitmap_granularity, (BS_ST_BIG_WRITE | BS_ST_STABLE), 0,
clean_it->second.location + bmp_start * dsk.bitmap_granularity, 0))
if (!fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity,
bmp_end * bitmap_granularity, (BS_ST_BIG_WRITE | BS_ST_STABLE), 0,
clean_it->second.location + bmp_start * bitmap_granularity))
{
// need to wait. undo added requests, don't dequeue op
PRIV(read_op)->read_vec.clear();
@@ -237,7 +233,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
else if (fulfilled < read_op->len)
{
// fill remaining parts with zeroes
assert(fulfill_read(read_op, fulfilled, 0, dsk.data_block_size, (BS_ST_DELETE | BS_ST_STABLE), 0, 0, 0));
assert(fulfill_read(read_op, fulfilled, 0, data_block_size, (BS_ST_DELETE | BS_ST_STABLE), 0, 0));
}
assert(fulfilled == read_op->len);
read_op->version = result_version;
@@ -253,15 +249,6 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
FINISH_OP(read_op);
return 2;
}
if (!journal.inmemory)
{
// Journal trim has to wait until the read is completed - record journal sector usage
for (auto & rv: PRIV(read_op)->read_vec)
{
if (rv.journal_sector)
journal.used_sectors[rv.journal_sector-1]++;
}
}
read_op->retval = 0;
return 2;
}
@@ -277,22 +264,6 @@ void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op
}
if (PRIV(op)->pending_ops == 0)
{
if (!journal.inmemory)
{
// Release journal sector usage
for (auto & rv: PRIV(op)->read_vec)
{
if (rv.journal_sector)
{
auto used = --journal.used_sectors[rv.journal_sector-1];
if (used == 0)
{
journal.used_sectors.erase(rv.journal_sector-1);
flusher->mark_trim_possible();
}
}
}
}
if (op->retval == 0)
op->retval = op->len;
FINISH_OP(op);
@@ -317,8 +288,8 @@ int blockstore_impl_t::read_bitmap(object_id oid, uint64_t target_version, void
*result_version = dirty_it->first.version;
if (bitmap)
{
void *bmp_ptr = (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap);
memcpy(bitmap, bmp_ptr, dsk.clean_entry_bitmap_size);
void *bmp_ptr = (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap);
memcpy(bitmap, bmp_ptr, clean_entry_bitmap_size);
}
return 0;
}
@@ -335,14 +306,14 @@ int blockstore_impl_t::read_bitmap(object_id oid, uint64_t target_version, void
*result_version = clean_it->second.version;
if (bitmap)
{
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, dsk.clean_entry_bitmap_size);
memcpy(bitmap, bmp_ptr, dsk.clean_entry_bitmap_size);
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size);
memcpy(bitmap, bmp_ptr, clean_entry_bitmap_size);
}
return 0;
}
if (result_version)
*result_version = 0;
if (bitmap)
memset(bitmap, 0, dsk.clean_entry_bitmap_size);
memset(bitmap, 0, clean_entry_bitmap_size);
return -ENOENT;
}

View File

@@ -112,7 +112,7 @@ resume_2:
if (!disable_journal_fsync)
{
BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
@@ -127,6 +127,7 @@ resume_4:
{
mark_rolled_back(*v);
}
flusher->mark_trim_possible();
// Acknowledge op
op->retval = 0;
FINISH_OP(op);
@@ -216,12 +217,12 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
dirty_it->second.location != UINT64_MAX)
{
#ifdef BLOCKSTORE_DEBUG
printf("Free block %lu from %lx:%lx v%lu\n", dirty_it->second.location >> dsk.block_order,
printf("Free block %lu from %lx:%lx v%lu\n", dirty_it->second.location >> block_order,
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version);
#endif
data_alloc->set(dirty_it->second.location >> dsk.block_order, false);
data_alloc->set(dirty_it->second.location >> block_order, false);
}
auto used = --journal.used_sectors[dirty_it->second.journal_sector];
int used = --journal.used_sectors[dirty_it->second.journal_sector];
#ifdef BLOCKSTORE_DEBUG
printf(
"remove usage of journal offset %08lx by %lx:%lx v%lu (%d refs)\n", dirty_it->second.journal_sector,
@@ -231,9 +232,8 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
if (used == 0)
{
journal.used_sectors.erase(dirty_it->second.journal_sector);
flusher->mark_trim_possible();
}
if (dsk.clean_entry_bitmap_size > sizeof(void*))
if (clean_entry_bitmap_size > sizeof(void*))
{
free(dirty_it->second.bitmap);
dirty_it->second.bitmap = NULL;

View File

@@ -137,7 +137,7 @@ resume_2:
if (!disable_journal_fsync)
{
BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
@@ -195,14 +195,14 @@ void blockstore_impl_t::mark_stable(const obj_ver_id & v, bool forget_dirty)
}
if (!exists)
{
inode_space_stats[dirty_it->first.oid.inode] += dsk.data_block_size;
inode_space_stats[dirty_it->first.oid.inode] += data_block_size;
}
}
else if (IS_DELETE(dirty_it->second.state))
{
auto & sp = inode_space_stats[dirty_it->first.oid.inode];
if (sp > dsk.data_block_size)
sp -= dsk.data_block_size;
if (sp > data_block_size)
sp -= data_block_size;
else
inode_space_stats.erase(dirty_it->first.oid.inode);
}

View File

@@ -60,7 +60,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
if (!disable_data_fsync)
{
BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, dsk.data_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, data_fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
@@ -79,7 +79,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
// Check space in the journal and journal memory buffers
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(),
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION))
sizeof(journal_entry_big_write) + clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION))
{
return 0;
}
@@ -90,7 +90,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
int s = 0;
while (it != PRIV(op)->sync_big_writes.end())
{
if (!journal.entry_fits(sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size) &&
if (!journal.entry_fits(sizeof(journal_entry_big_write) + clean_entry_bitmap_size) &&
journal.sector_info[journal.cur_sector].dirty)
{
prepare_journal_sector_write(journal.cur_sector, op);
@@ -99,7 +99,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
auto & dirty_entry = dirty_db.at(*it);
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
journal, (dirty_entry.state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size
sizeof(journal_entry_big_write) + clean_entry_bitmap_size
);
dirty_entry.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
@@ -115,8 +115,8 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
je->offset = dirty_entry.offset;
je->len = dirty_entry.len;
je->location = dirty_entry.location;
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*)
? dirty_entry.bitmap : &dirty_entry.bitmap), dsk.clean_entry_bitmap_size);
memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*)
? dirty_entry.bitmap : &dirty_entry.bitmap), clean_entry_bitmap_size);
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
it++;
@@ -132,7 +132,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
if (!disable_journal_fsync)
{
BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, dsk.journal_fd, IORING_FSYNC_DATASYNC);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;

View File

@@ -10,9 +10,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
bool wait_big = false, wait_del = false;
void *bmp = NULL;
uint64_t version = 1;
if (!is_del && dsk.clean_entry_bitmap_size > sizeof(void*))
if (!is_del && clean_entry_bitmap_size > sizeof(void*))
{
bmp = calloc_or_die(1, dsk.clean_entry_bitmap_size);
bmp = calloc_or_die(1, clean_entry_bitmap_size);
}
if (dirty_db.size() > 0)
{
@@ -32,8 +32,8 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
: ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG);
if (!is_del && !deleted)
{
if (dsk.clean_entry_bitmap_size > sizeof(void*))
memcpy(bmp, dirty_it->second.bitmap, dsk.clean_entry_bitmap_size);
if (clean_entry_bitmap_size > sizeof(void*))
memcpy(bmp, dirty_it->second.bitmap, clean_entry_bitmap_size);
else
bmp = dirty_it->second.bitmap;
}
@@ -48,8 +48,8 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
version = clean_it->second.version + 1;
if (!is_del)
{
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, dsk.clean_entry_bitmap_size);
memcpy((dsk.clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, dsk.clean_entry_bitmap_size);
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size);
memcpy((clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, clean_entry_bitmap_size);
}
}
else
@@ -89,18 +89,15 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
else
{
// Invalid version requested
#ifdef BLOCKSTORE_DEBUG
printf("Write %lx:%lx v%lu requested, but we already have v%lu\n", op->oid.inode, op->oid.stripe, op->version, version);
#endif
op->retval = -EEXIST;
if (!is_del && dsk.clean_entry_bitmap_size > sizeof(void*))
if (!is_del && clean_entry_bitmap_size > sizeof(void*))
{
free(bmp);
}
return false;
}
}
if (wait_big && !is_del && !deleted && op->len < dsk.data_block_size &&
if (wait_big && !is_del && !deleted && op->len < data_block_size &&
immediate_commit != IMMEDIATE_ALL)
{
// Issue an additional sync so that the previous big write can reach the journal
@@ -118,14 +115,14 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
else if (!wait_del)
printf("Write %lx:%lx v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
#endif
// No strict need to add it into dirty_db here except maybe for listings to return
// correct data when there are inflight operations in the queue
// FIXME No strict need to add it into dirty_db here, it's just left
// from the previous implementation where reads waited for writes
uint32_t state;
if (is_del)
state = BS_ST_DELETE | BS_ST_IN_FLIGHT;
else
{
state = (op->len == dsk.data_block_size || deleted ? BS_ST_BIG_WRITE : BS_ST_SMALL_WRITE);
state = (op->len == data_block_size || deleted ? BS_ST_BIG_WRITE : BS_ST_SMALL_WRITE);
if (state == BS_ST_SMALL_WRITE && throttle_small_writes)
clock_gettime(CLOCK_REALTIME, &PRIV(op)->tv_begin);
if (wait_del)
@@ -139,10 +136,10 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
if (op->bitmap)
{
// Only allow to overwrite part of the object bitmap respective to the write's offset/len
uint8_t *bmp_ptr = (uint8_t*)(dsk.clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp);
uint32_t bit = op->offset/dsk.bitmap_granularity;
uint32_t bits_left = op->len/dsk.bitmap_granularity;
while (!(bit % 8) && bits_left >= 8)
uint8_t *bmp_ptr = (uint8_t*)(clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp);
uint32_t bit = op->offset/bitmap_granularity;
uint32_t bits_left = op->len/bitmap_granularity;
while (!(bit % 8) && bits_left > 8)
{
// Copy bytes
bmp_ptr[bit/8] = ((uint8_t*)op->bitmap)[bit/8];
@@ -178,22 +175,15 @@ void blockstore_impl_t::cancel_all_writes(blockstore_op_t *op, blockstore_dirty_
{
while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
{
if (dsk.clean_entry_bitmap_size > sizeof(void*))
if (clean_entry_bitmap_size > sizeof(void*))
free(dirty_it->second.bitmap);
dirty_db.erase(dirty_it++);
}
bool found = false;
for (auto other_op: submit_queue)
{
if (!other_op)
{
// freed operations during submitting are zeroed
}
else if (other_op == op)
{
// <op> may be present in queue multiple times due to moving operations in submit_queue
if (!found && other_op == op)
found = true;
}
else if (found && other_op->oid == op->oid &&
(other_op->opcode == BS_OP_WRITE || other_op->opcode == BS_OP_WRITE_STABLE))
{
@@ -261,8 +251,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
{
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, unsynced_big_write_count + 1,
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
(dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION))
sizeof(journal_entry_big_write) + clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION))
{
return 0;
}
@@ -271,28 +260,18 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
if (loc == UINT64_MAX)
{
// no space
if (flusher->is_active())
{
// hope that some space will be available after flush
PRIV(op)->wait_for = WAIT_FREE;
return 0;
}
cancel_all_writes(op, dirty_it, -ENOSPC);
return 2;
}
if (inmemory_meta)
{
// Check once more that metadata entry is zeroed (the reverse means a bug or corruption)
uint64_t sector = (loc / (dsk.meta_block_size / dsk.clean_entry_size)) * dsk.meta_block_size;
uint64_t pos = (loc % (dsk.meta_block_size / dsk.clean_entry_size));
clean_disk_entry *entry = (clean_disk_entry*)((uint8_t*)metadata_buffer + sector + pos*dsk.clean_entry_size);
if (entry->oid.inode || entry->oid.stripe || entry->version)
{
printf(
"Fatal error (metadata corruption or bug): tried to write object %lx:%lx v%lu"
" over a non-zero metadata entry %lu with %lx:%lx v%lu\n", op->oid.inode,
op->oid.stripe, op->version, loc, entry->oid.inode, entry->oid.stripe, entry->version
);
exit(1);
}
}
BS_SUBMIT_GET_SQE(sqe, data);
write_iodepth++;
dirty_it->second.location = loc << dsk.block_order;
dirty_it->second.location = loc << block_order;
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SUBMITTED;
#ifdef BLOCKSTORE_DEBUG
printf(
@@ -301,9 +280,9 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
);
#endif
data_alloc->set(loc, true);
uint64_t stripe_offset = (op->offset % dsk.bitmap_granularity);
uint64_t stripe_end = (op->offset + op->len) % dsk.bitmap_granularity;
// Zero fill up to dsk.bitmap_granularity
uint64_t stripe_offset = (op->offset % bitmap_granularity);
uint64_t stripe_end = (op->offset + op->len) % bitmap_granularity;
// Zero fill up to bitmap_granularity
int vcnt = 0;
if (stripe_offset)
{
@@ -312,13 +291,13 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len };
if (stripe_end)
{
stripe_end = dsk.bitmap_granularity - stripe_end;
stripe_end = bitmap_granularity - stripe_end;
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, stripe_end };
}
data->iov.iov_len = op->len + stripe_offset + stripe_end; // to check it in the callback
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
my_uring_prep_writev(
sqe, dsk.data_fd, PRIV(op)->iov_zerofill, vcnt, dsk.data_offset + (loc << dsk.block_order) + op->offset - stripe_offset
sqe, data_fd, PRIV(op)->iov_zerofill, vcnt, data_offset + (loc << block_order) + op->offset - stripe_offset
);
PRIV(op)->pending_ops = 1;
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
@@ -340,10 +319,9 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
blockstore_journal_check_t space_check(this);
if (unsynced_big_write_count &&
!space_check.check_available(op, unsynced_big_write_count,
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, 0)
sizeof(journal_entry_big_write) + clean_entry_bitmap_size, 0)
|| !space_check.check_available(op, 1,
sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size,
op->len + ((dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION)))
sizeof(journal_entry_small_write) + clean_entry_bitmap_size, op->len + JOURNAL_STABILIZE_RESERVATION))
{
return 0;
}
@@ -351,7 +329,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
BS_SUBMIT_CHECK_SQES(
// Write current journal sector only if it's dirty and full, or in the immediate_commit mode
(immediate_commit != IMMEDIATE_NONE ||
!journal.entry_fits(sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size) ? 1 : 0) +
!journal.entry_fits(sizeof(journal_entry_small_write) + clean_entry_bitmap_size) ? 1 : 0) +
(op->len > 0 ? 1 : 0)
);
write_iodepth++;
@@ -359,7 +337,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
auto cb = [this, op](ring_data_t *data) { handle_write_event(data, op); };
if (immediate_commit == IMMEDIATE_NONE)
{
if (!journal.entry_fits(sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size))
if (!journal.entry_fits(sizeof(journal_entry_small_write) + clean_entry_bitmap_size))
{
prepare_journal_sector_write(journal.cur_sector, op);
}
@@ -371,7 +349,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
// Then pre-fill journal entry
journal_entry_small_write *je = (journal_entry_small_write*)prefill_single_journal_entry(
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_SMALL_WRITE_INSTANT : JE_SMALL_WRITE,
sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size
sizeof(journal_entry_small_write) + clean_entry_bitmap_size
);
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
@@ -383,14 +361,14 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
);
#endif
// Figure out where data will be
journal.next_free = (journal.next_free + op->len) <= journal.len ? journal.next_free : dsk.journal_block_size;
journal.next_free = (journal.next_free + op->len) <= journal.len ? journal.next_free : journal_block_size;
je->oid = op->oid;
je->version = op->version;
je->offset = op->offset;
je->len = op->len;
je->data_offset = journal.next_free;
je->crc32_data = crc32c(0, op->buf, op->len);
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), dsk.clean_entry_bitmap_size);
memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), clean_entry_bitmap_size);
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
if (immediate_commit != IMMEDIATE_NONE)
@@ -409,7 +387,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
data2->iov = (struct iovec){ op->buf, op->len };
data2->callback = cb;
my_uring_prep_writev(
sqe2, dsk.journal_fd, &data2->iov, 1, journal.offset + journal.next_free
sqe2, journal.fd, &data2->iov, 1, journal.offset + journal.next_free
);
PRIV(op)->pending_ops++;
}
@@ -422,7 +400,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
journal.next_free += op->len;
if (journal.next_free >= journal.len)
{
journal.next_free = dsk.journal_block_size;
journal.next_free = journal_block_size;
}
if (!PRIV(op)->pending_ops)
{
@@ -454,22 +432,15 @@ int blockstore_impl_t::continue_write(blockstore_op_t *op)
resume_2:
// Only for the immediate_commit mode: prepare and submit big_write journal entry
{
BS_SUBMIT_CHECK_SQES(1);
auto dirty_it = dirty_db.find((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
assert(dirty_it != dirty_db.end());
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1,
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
((dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION)))
{
return 0;
}
BS_SUBMIT_CHECK_SQES(1);
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size
sizeof(journal_entry_big_write) + clean_entry_bitmap_size
);
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
@@ -485,7 +456,7 @@ resume_2:
je->offset = op->offset;
je->len = op->len;
je->location = dirty_it->second.location;
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), dsk.clean_entry_bitmap_size);
memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), clean_entry_bitmap_size);
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
prepare_journal_sector_write(journal.cur_sector, op);
@@ -598,7 +569,10 @@ void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *o
if (data->res != data->iov.iov_len)
{
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
disk_error_abort("data write", data->res, data->iov.iov_len);
throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
);
}
PRIV(op)->pending_ops--;
assert(PRIV(op)->pending_ops >= 0);
@@ -653,14 +627,14 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
});
assert(dirty_it != dirty_db.end());
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_del), JOURNAL_INSTANT_RESERVATION))
if (!space_check.check_available(op, 1, sizeof(journal_entry_del), JOURNAL_STABILIZE_RESERVATION))
{
return 0;
}
// Write current journal sector only if it's dirty and full, or in the immediate_commit mode
BS_SUBMIT_CHECK_SQES(
(immediate_commit != IMMEDIATE_NONE ||
(dsk.journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
(journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
journal.sector_info[journal.cur_sector].dirty) ? 1 : 0
);
if (write_iodepth >= max_write_iodepth)
@@ -671,7 +645,7 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
// Prepare journal sector write
if (immediate_commit == IMMEDIATE_NONE)
{
if ((dsk.journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
journal.sector_info[journal.cur_sector].dirty)
{
prepare_journal_sector_write(journal.cur_sector, op);

View File

@@ -12,87 +12,11 @@
#include "epoll_manager.h"
#include "cluster_client.h"
#include "pg_states.h"
#include "str_util.h"
#include "base64.h"
static const char *exe_name = NULL;
static const char* help_text =
"Vitastor command-line tool\n"
"(c) Vitaliy Filippov, 2019+ (VNPL-1.1)\n"
"\n"
"COMMANDS:\n"
"\n"
"vitastor-cli status\n"
" Show cluster status\n"
"\n"
"vitastor-cli df\n"
" Show pool space statistics\n"
"\n"
"vitastor-cli ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]\n"
" List images (only matching <glob> patterns if passed).\n"
" -p|--pool POOL Filter images by pool ID or name\n"
" -l|--long Also report allocated size and I/O statistics\n"
" --del Also include delete operation statistics\n"
" --sort FIELD Sort by specified field (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)\n"
" -r|--reverse Sort in descending order\n"
" -n|--count N Only list first N items\n"
"\n"
"vitastor-cli create -s|--size <size> [-p|--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>\n"
" Create an image. You may use K/M/G/T suffixes for <size>. If --parent is specified,\n"
" a copy-on-write image clone is created. Parent must be a snapshot (readonly image).\n"
" Pool must be specified if there is more than one pool.\n"
"\n"
"vitastor-cli create --snapshot <snapshot> [-p|--pool <id|name>] <image>\n"
"vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>\n"
" Create a snapshot of image <name>. May be used live if only a single writer is active.\n"
"\n"
"vitastor-cli modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]\n"
" Rename, resize image or change its readonly status. Images with children can't be made read-write.\n"
" If the new size is smaller than the old size, extra data will be purged.\n"
" You should resize file system in the image, if present, before shrinking it.\n"
" -f|--force Proceed with shrinking or setting readwrite flag even if the image has children.\n"
"\n"
"vitastor-cli rm <from> [<to>] [--writers-stopped]\n"
" Remove <from> or all layers between <from> and <to> (<to> must be a child of <from>),\n"
" rebasing all their children accordingly. --writers-stopped allows merging to be a bit\n"
" more effective in case of a single 'slim' read-write child and 'fat' removed parent:\n"
" the child is merged into parent and parent is renamed to child in that case.\n"
" In other cases parent layers are always merged into children.\n"
"\n"
"vitastor-cli flatten <layer>\n"
" Flatten a layer, i.e. merge data and detach it from parents.\n"
"\n"
"vitastor-cli rm-data --pool <pool> --inode <inode> [--wait-list] [--min-offset <offset>]\n"
" Remove inode data without changing metadata.\n"
" --wait-list Retrieve full objects listings before starting to remove objects.\n"
" Requires more memory, but allows to show correct removal progress.\n"
" --min-offset Purge only data starting with specified offset.\n"
"\n"
"vitastor-cli merge-data <from> <to> [--target <target>]\n"
" Merge layer data without changing metadata. Merge <from>..<to> to <target>.\n"
" <to> must be a child of <from> and <target> may be one of the layers between\n"
" <from> and <to>, including <from> and <to>.\n"
"\n"
"vitastor-cli alloc-osd\n"
" Allocate a new OSD number and reserve it by creating empty /osd/stats/<n> key.\n"
"\n"
"vitastor-cli rm-osd [--force] [--allow-data-loss] [--dry-run] <osd_id> [osd_id...]\n"
" Remove metadata and configuration for specified OSD(s) from etcd.\n"
" Refuses to remove OSDs with data without --force and --allow-data-loss.\n"
" With --dry-run only checks if deletion is possible without data loss and\n"
" redundancy degradation.\n"
"\n"
"Use vitastor-cli --help <command> for command details or vitastor-cli --help --all for all details.\n"
"\n"
"GLOBAL OPTIONS:\n"
" --etcd_address <etcd_address>\n"
" --iodepth N Send N operations in parallel to each OSD when possible (default 32)\n"
" --parallel_osds M Work with M osds in parallel when possible (default 4)\n"
" --progress 1|0 Report progress (default 1)\n"
" --cas 1|0 Use CAS writes for flatten, merge, rm (default is decide automatically)\n"
" --no-color Disable colored output\n"
" --json JSON output\n"
;
static void help();
static json11::Json::object parse_args(int narg, const char *args[])
{
@@ -101,47 +25,42 @@ static json11::Json::object parse_args(int narg, const char *args[])
cfg["progress"] = "1";
for (int i = 1; i < narg; i++)
{
if (args[i][0] == '-' && args[i][1] == 'h' && args[i][2] == 0)
if (!strcmp(args[i], "-h") || !strcmp(args[i], "--help"))
{
cfg["help"] = "1";
help();
}
else if (args[i][0] == '-' && args[i][1] == 'l' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 'l')
{
cfg["long"] = "1";
}
else if (args[i][0] == '-' && args[i][1] == 'n' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 'n')
{
cfg["count"] = args[++i];
}
else if (args[i][0] == '-' && args[i][1] == 'p' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 'p')
{
cfg["pool"] = args[++i];
}
else if (args[i][0] == '-' && args[i][1] == 's' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 's')
{
cfg["size"] = args[++i];
}
else if (args[i][0] == '-' && args[i][1] == 'r' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 'r')
{
cfg["reverse"] = "1";
}
else if (args[i][0] == '-' && args[i][1] == 'f' && args[i][2] == 0)
else if (args[i][0] == '-' && args[i][1] == 'f')
{
cfg["force"] = "1";
}
else if (args[i][0] == '-' && args[i][1] == '-')
{
const char *opt = args[i]+2;
cfg[opt] = i == narg-1 || !strcmp(opt, "json") ||
!strcmp(opt, "wait-list") || !strcmp(opt, "wait_list") ||
!strcmp(opt, "long") || !strcmp(opt, "del") ||
!strcmp(opt, "no-color") || !strcmp(opt, "no_color") ||
cfg[opt] = i == narg-1 || !strcmp(opt, "json") || !strcmp(opt, "wait-list") ||
!strcmp(opt, "long") || !strcmp(opt, "del") || !strcmp(opt, "no-color") ||
!strcmp(opt, "readonly") || !strcmp(opt, "readwrite") ||
!strcmp(opt, "force") || !strcmp(opt, "reverse") ||
!strcmp(opt, "allow-data-loss") || !strcmp(opt, "allow_data_loss") ||
!strcmp(opt, "dry-run") || !strcmp(opt, "dry_run") ||
!strcmp(opt, "help") || !strcmp(opt, "all") ||
(!strcmp(opt, "writers-stopped") || !strcmp(opt, "writers_stopped")) && strcmp("1", args[i+1]) != 0
!strcmp(opt, "writers-stopped") && strcmp("1", args[i+1]) != 0
? "1" : args[++i];
}
else
@@ -157,17 +76,98 @@ static json11::Json::object parse_args(int narg, const char *args[])
cmd.push_back("rm-data");
}
}
if (!cmd.size() || cfg["help"].bool_value())
{
print_help(help_text, "vitastor-cli", cmd.size() ? cmd[0].string_value() : "", cfg["all"].bool_value());
}
cfg["command"] = cmd;
return cfg;
}
static void help()
{
printf(
"Vitastor command-line tool\n"
"(c) Vitaliy Filippov, 2019+ (VNPL-1.1)\n"
"\n"
"USAGE:\n"
"%s status\n"
" Show cluster status\n"
"\n"
"%s df\n"
" Show pool space statistics\n"
"\n"
"%s ls [-l] [-p POOL] [--sort FIELD] [-r] [-n N] [<glob> ...]\n"
" List images (only matching <glob> patterns if passed).\n"
" -p|--pool POOL Filter images by pool ID or name\n"
" -l|--long Also report allocated size and I/O statistics\n"
" --del Also include delete operation statistics\n"
" --sort FIELD Sort by specified field (name, size, used_size, <read|write|delete>_<iops|bps|lat|queue>)\n"
" -r|--reverse Sort in descending order\n"
" -n|--count N Only list first N items\n"
"\n"
"%s create -s|--size <size> [-p|--pool <id|name>] [--parent <parent_name>[@<snapshot>]] <name>\n"
" Create an image. You may use K/M/G/T suffixes for <size>. If --parent is specified,\n"
" a copy-on-write image clone is created. Parent must be a snapshot (readonly image).\n"
" Pool must be specified if there is more than one pool.\n"
"\n"
"%s create --snapshot <snapshot> [-p|--pool <id|name>] <image>\n"
"%s snap-create [-p|--pool <id|name>] <image>@<snapshot>\n"
" Create a snapshot of image <name>. May be used live if only a single writer is active.\n"
"\n"
"%s modify <name> [--rename <new-name>] [--resize <size>] [--readonly | --readwrite] [-f|--force]\n"
" Rename, resize image or change its readonly status. Images with children can't be made read-write.\n"
" If the new size is smaller than the old size, extra data will be purged.\n"
" You should resize file system in the image, if present, before shrinking it.\n"
" -f|--force Proceed with shrinking or setting readwrite flag even if the image has children.\n"
"\n"
"%s rm <from> [<to>] [--writers-stopped]\n"
" Remove <from> or all layers between <from> and <to> (<to> must be a child of <from>),\n"
" rebasing all their children accordingly. --writers-stopped allows merging to be a bit\n"
" more effective in case of a single 'slim' read-write child and 'fat' removed parent:\n"
" the child is merged into parent and parent is renamed to child in that case.\n"
" In other cases parent layers are always merged into children.\n"
"\n"
"%s flatten <layer>\n"
" Flatten a layer, i.e. merge data and detach it from parents.\n"
"\n"
"%s rm-data --pool <pool> --inode <inode> [--wait-list] [--min-offset <offset>]\n"
" Remove inode data without changing metadata.\n"
" --wait-list Retrieve full objects listings before starting to remove objects.\n"
" Requires more memory, but allows to show correct removal progress.\n"
" --min-offset Purge only data starting with specified offset.\n"
"\n"
"%s merge-data <from> <to> [--target <target>]\n"
" Merge layer data without changing metadata. Merge <from>..<to> to <target>.\n"
" <to> must be a child of <from> and <target> may be one of the layers between\n"
" <from> and <to>, including <from> and <to>.\n"
"\n"
"%s alloc-osd\n"
" Allocate a new OSD number and reserve it by creating empty /osd/stats/<n> key.\n"
"%s simple-offsets <device>\n"
" Calculate offsets for simple&stupid (no superblock) OSD deployment. Options:\n"
" --object_size 128k Set blockstore block size\n"
" --bitmap_granularity 4k Set bitmap granularity\n"
" --journal_size 16M Set journal size\n"
" --device_block_size 4k Set device block size\n"
" --journal_offset 0 Set journal offset\n"
" --device_size 0 Set device size\n"
" --format text Result format: json, options, env, or text\n"
"\n"
"GLOBAL OPTIONS:\n"
" --etcd_address <etcd_address>\n"
" --iodepth N Send N operations in parallel to each OSD when possible (default 32)\n"
" --parallel_osds M Work with M osds in parallel when possible (default 4)\n"
" --progress 1|0 Report progress (default 1)\n"
" --cas 1|0 Use CAS writes for flatten, merge, rm (default is decide automatically)\n"
" --no-color Disable colored output\n"
" --json JSON output\n"
,
exe_name, exe_name, exe_name, exe_name, exe_name, exe_name, exe_name,
exe_name, exe_name, exe_name, exe_name, exe_name, exe_name
);
exit(0);
}
static int run(cli_tool_t *p, json11::Json::object cfg)
{
cli_result_t result = {};
cli_result_t result;
p->parse_config(cfg);
json11::Json::array cmd = cfg["command"].array_items();
cfg.erase("command");
@@ -235,16 +235,6 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
// Delete inode data
action_cb = p->start_rm_data(cfg);
}
else if (cmd[0] == "rm-osd")
{
// Delete OSD metadata from etcd
if (cmd.size() > 1)
{
cmd.erase(cmd.begin(), cmd.begin()+1);
cfg["osd_id"] = cmd;
}
action_cb = p->start_rm_osd(cfg);
}
else if (cmd[0] == "merge-data")
{
// Merge layer data without affecting metadata
@@ -281,6 +271,15 @@ static int run(cli_tool_t *p, json11::Json::object cfg)
// Allocate a new OSD number
action_cb = p->start_alloc_osd(cfg);
}
else if (cmd[0] == "simple-offsets")
{
// Calculate offsets for simple & stupid OSD deployment without superblock
if (cmd.size() > 1)
{
cfg["device"] = cmd[1];
}
action_cb = p->simple_offsets(cfg);
}
else
{
result = { .err = EINVAL, .text = "unknown command: "+cmd[0].string_value() };

View File

@@ -45,7 +45,7 @@ public:
cli_result_t etcd_err;
json11::Json etcd_result;
void parse_config(json11::Json::object & cfg);
void parse_config(json11::Json cfg);
void change_parent(inode_t cur, inode_t new_parent, cli_result_t *result);
inode_config_t* get_inode_cfg(const std::string & name);
@@ -64,8 +64,8 @@ public:
std::function<bool(cli_result_t &)> start_merge(json11::Json);
std::function<bool(cli_result_t &)> start_flatten(json11::Json);
std::function<bool(cli_result_t &)> start_rm(json11::Json);
std::function<bool(cli_result_t &)> start_rm_osd(json11::Json cfg);
std::function<bool(cli_result_t &)> start_alloc_osd(json11::Json cfg);
std::function<bool(cli_result_t &)> simple_offsets(json11::Json cfg);
// Should be called like loop_and_wait(start_status(), <completion callback>)
void loop_and_wait(std::function<bool(cli_result_t &)> loop_cb, std::function<void(const cli_result_t &)> complete_cb);
@@ -73,8 +73,12 @@ public:
void etcd_txn(json11::Json txn);
};
uint64_t parse_size(std::string size_str);
std::string print_table(json11::Json items, json11::Json header, bool use_esc);
std::string format_size(uint64_t size, bool nobytes = false);
std::string format_lat(uint64_t lat);
std::string format_q(double depth);

View File

@@ -4,7 +4,7 @@
#include <ctype.h>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
#include <algorithm>

View File

@@ -1,7 +1,7 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "str_util.h"
#include "base64.h"
#include "cluster_client.h"
#include "cli.h"
@@ -100,20 +100,9 @@ inode_config_t* cli_tool_t::get_inode_cfg(const std::string & name)
return NULL;
}
void cli_tool_t::parse_config(json11::Json::object & cfg)
void cli_tool_t::parse_config(json11::Json cfg)
{
for (auto kv_it = cfg.begin(); kv_it != cfg.end();)
{
// Translate all options with - to _
if (kv_it->first.find("-") != std::string::npos)
{
cfg[str_replace(kv_it->first, "-", "_")] = kv_it->second;
cfg.erase(kv_it++);
}
else
kv_it++;
}
color = !cfg["no_color"].bool_value();
color = !cfg["no-color"].bool_value();
json_output = cfg["json"].bool_value();
iodepth = cfg["iodepth"].uint64_value();
if (!iodepth)
@@ -123,7 +112,7 @@ void cli_tool_t::parse_config(json11::Json::object & cfg)
parallel_osds = 4;
log_level = cfg["log_level"].int64_value();
progress = cfg["progress"].uint64_value() ? true : false;
list_first = cfg["wait_list"].uint64_value() ? true : false;
list_first = cfg["wait-list"].uint64_value() ? true : false;
}
struct cli_result_looper_t

View File

@@ -4,7 +4,7 @@
#include <ctype.h>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
// Create an image, snapshot or clone
//
@@ -507,6 +507,34 @@ resume_3:
}
};
uint64_t parse_size(std::string size_str)
{
if (!size_str.length())
{
return 0;
}
uint64_t mul = 1;
char type_char = tolower(size_str[size_str.length()-1]);
if (type_char == 'k' || type_char == 'm' || type_char == 'g' || type_char == 't')
{
if (type_char == 'k')
mul = (uint64_t)1<<10;
else if (type_char == 'm')
mul = (uint64_t)1<<20;
else if (type_char == 'g')
mul = (uint64_t)1<<30;
else /*if (type_char == 't')*/
mul = (uint64_t)1<<40;
size_str = size_str.substr(0, size_str.length()-1);
}
uint64_t size = json11::Json(size_str).uint64_value() * mul;
if (size == 0 && size_str != "0" && (size_str != "" || mul != 1))
{
return UINT64_MAX;
}
return size;
}
std::function<bool(cli_result_t &)> cli_tool_t::start_create(json11::Json cfg)
{
auto image_creator = new image_creator_t();
@@ -517,7 +545,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_create(json11::Json cfg)
image_creator->force_size = cfg["force_size"].bool_value();
if (cfg["image_meta"].is_object())
{
image_creator->new_meta = cfg["image_meta"];
image_creator->new_meta = cfg["image-meta"];
}
if (cfg["snapshot"].string_value() != "")
{
@@ -526,9 +554,8 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_create(json11::Json cfg)
image_creator->new_parent = cfg["parent"].string_value();
if (cfg["size"].string_value() != "")
{
bool ok;
image_creator->size = parse_size(cfg["size"].string_value(), &ok);
if (!ok)
image_creator->size = parse_size(cfg["size"].string_value());
if (image_creator->size == UINT64_MAX)
{
return [size = cfg["size"].string_value()](cli_result_t & result)
{

View File

@@ -3,7 +3,7 @@
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
// List pools with space statistics
struct pool_lister_t

View File

@@ -133,7 +133,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_flatten(json11::Json cfg)
auto flattener = new snap_flattener_t();
flattener->parent = this;
flattener->target_name = cfg["image"].string_value();
flattener->fsync_interval = cfg["fsync_interval"].uint64_value();
flattener->fsync_interval = cfg["fsync-interval"].uint64_value();
if (!flattener->fsync_interval)
flattener->fsync_interval = 128;
if (!cfg["cas"].is_null())

View File

@@ -4,7 +4,7 @@
#include <algorithm>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
// List existing images
//
@@ -446,6 +446,33 @@ std::string print_table(json11::Json items, json11::Json header, bool use_esc)
return str;
}
static uint64_t size_thresh[] = { (uint64_t)1024*1024*1024*1024, (uint64_t)1024*1024*1024, (uint64_t)1024*1024, 1024, 0 };
static uint64_t size_thresh_d[] = { (uint64_t)1000000000000, (uint64_t)1000000000, (uint64_t)1000000, (uint64_t)1000, 0 };
static const int size_thresh_n = sizeof(size_thresh)/sizeof(size_thresh[0]);
static const char *size_unit = "TGMKB";
std::string format_size(uint64_t size, bool nobytes)
{
uint64_t *thr = nobytes ? size_thresh_d : size_thresh;
char buf[256];
for (int i = 0; i < size_thresh_n; i++)
{
if (size >= thr[i] || i >= size_thresh_n-1)
{
double value = thr[i] ? (double)size/thr[i] : size;
int l = snprintf(buf, sizeof(buf), "%.1f", value);
assert(l < sizeof(buf)-2);
if (buf[l-1] == '0')
l -= 2;
buf[l] = i == size_thresh_n-1 && nobytes ? 0 : ' ';
buf[l+1] = i == size_thresh_n-1 && nobytes ? 0 : size_unit[i];
buf[l+2] = 0;
break;
}
}
return std::string(buf);
}
std::string format_lat(uint64_t lat)
{
char buf[256];

View File

@@ -47,7 +47,6 @@ struct snap_merger_t
int state = 0;
int lists_todo = 0;
uint64_t target_block_size = 0;
uint32_t target_bitmap_granularity = 0;
btree::safe_btree_set<uint64_t> merge_offsets;
btree::safe_btree_set<uint64_t>::iterator oit;
std::map<inode_t, std::vector<uint64_t>> layer_lists;
@@ -102,7 +101,7 @@ struct snap_merger_t
std::vector<inode_t> chain_list;
inode_config_t *cur = to_cfg;
chain_list.push_back(cur->num);
layer_block_size[cur->num] = get_block_size(cur->num, NULL);
layer_block_size[cur->num] = get_block_size(cur->num);
while (cur->parent_id != from_cfg->num &&
cur->parent_id != to_cfg->num &&
cur->parent_id != 0)
@@ -125,7 +124,7 @@ struct snap_merger_t
}
cur = &it->second;
chain_list.push_back(cur->num);
layer_block_size[cur->num] = get_block_size(cur->num, NULL);
layer_block_size[cur->num] = get_block_size(cur->num);
}
if (cur->parent_id != from_cfg->num)
{
@@ -134,7 +133,7 @@ struct snap_merger_t
return;
}
chain_list.push_back(from_cfg->num);
layer_block_size[from_cfg->num] = get_block_size(from_cfg->num, NULL);
layer_block_size[from_cfg->num] = get_block_size(from_cfg->num);
int i = chain_list.size()-1;
for (inode_t item: chain_list)
{
@@ -205,16 +204,14 @@ struct snap_merger_t
use_cas ? " online (with CAS)" : "", INODE_NO_POOL(target), INODE_POOL(target)
);
}
target_block_size = get_block_size(target, &target_bitmap_granularity);
target_block_size = get_block_size(target);
}
uint64_t get_block_size(inode_t inode, uint32_t *bitmap_granularity)
uint64_t get_block_size(inode_t inode)
{
auto & pool_cfg = parent->cli->st_cli.pool_config.at(INODE_POOL(inode));
uint64_t pg_data_size = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
if (bitmap_granularity)
*bitmap_granularity = pool_cfg.bitmap_granularity;
return pool_cfg.data_block_size * pg_data_size;
return parent->cli->get_bs_block_size() * pg_data_size;
}
void continue_merge_reent()
@@ -412,7 +409,7 @@ struct snap_merger_t
}
else
{
uint64_t bitmap_bytes = target_block_size/target_bitmap_granularity/8;
uint64_t bitmap_bytes = target_block_size/parent->cli->get_bs_bitmap_granularity()/8;
int i;
for (i = 0; i < bitmap_bytes; i++)
{
@@ -472,7 +469,7 @@ struct snap_merger_t
{
// Write each non-empty range using an individual operation
// FIXME: Allow to use single write with "holes" (OSDs don't allow it yet)
uint32_t gran = target_bitmap_granularity;
uint32_t gran = parent->cli->get_bs_bitmap_granularity();
uint64_t bitmap_size = target_block_size / gran;
while (rwo->end < bitmap_size && !rwo->error_code)
{
@@ -631,8 +628,8 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_merge(json11::Json cfg)
merger->from_name = cfg["from"].string_value();
merger->to_name = cfg["to"].string_value();
merger->target_name = cfg["target"].string_value();
merger->delete_source = cfg["delete_source"].string_value() != "";
merger->fsync_interval = cfg["fsync_interval"].uint64_value();
merger->delete_source = cfg["delete-source"].string_value() != "";
merger->fsync_interval = cfg["fsync-interval"].uint64_value();
if (!merger->fsync_interval)
merger->fsync_interval = 128;
if (!cfg["cas"].is_null())

View File

@@ -3,7 +3,7 @@
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
// Rename, resize image (and purge extra data on shrink) or change its readonly status
struct image_changer_t
@@ -236,7 +236,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_modify(json11::Json cfg)
changer->force = cfg["force"].bool_value();
changer->set_readonly = cfg["readonly"].bool_value();
changer->set_readwrite = cfg["readwrite"].bool_value();
changer->fsync_interval = cfg["fsync_interval"].uint64_value();
changer->fsync_interval = cfg["fsync-interval"].uint64_value();
if (!changer->fsync_interval)
changer->fsync_interval = 128;
// FIXME Check that the image doesn't have children when shrinking

View File

@@ -4,7 +4,7 @@
#include <fcntl.h>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
// Remove layer(s): similar to merge, but alters metadata and processes multiple merge targets
//
@@ -639,7 +639,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_rm(json11::Json cfg)
snap_remover->parent = this;
snap_remover->from_name = cfg["from"].string_value();
snap_remover->to_name = cfg["to"].string_value();
snap_remover->fsync_interval = cfg["fsync_interval"].uint64_value();
snap_remover->fsync_interval = cfg["fsync-interval"].uint64_value();
if (!snap_remover->fsync_interval)
snap_remover->fsync_interval = 128;
if (!cfg["cas"].is_null())

View File

@@ -218,7 +218,7 @@ std::function<bool(cli_result_t &)> cli_tool_t::start_rm_data(json11::Json cfg)
remover->inode = (remover->inode & (((uint64_t)1 << (64-POOL_ID_BITS)) - 1)) | (((uint64_t)remover->pool_id) << (64-POOL_ID_BITS));
}
remover->pool_id = INODE_POOL(remover->inode);
remover->min_offset = cfg["min_offset"].uint64_value();
remover->min_offset = cfg["min-offset"].uint64_value();
return [remover](cli_result_t & result)
{
remover->loop();

View File

@@ -1,491 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <ctype.h>
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "epoll_manager.h"
#include <algorithm>
// Delete OSD metadata from etcd
struct rm_osd_t
{
cli_tool_t *parent;
bool dry_run, force_warning, force_dataloss;
uint64_t etcd_tx_retry_ms = 500;
uint64_t etcd_tx_retries = 10000;
std::vector<uint64_t> osd_ids;
int state = 0;
cli_result_t result;
std::set<uint64_t> to_remove;
std::set<uint64_t> to_restart;
json11::Json::array pool_effects;
json11::Json::array history_updates, history_checks;
json11::Json new_pgs, new_clean_pgs;
uint64_t new_pgs_mod_rev, new_clean_pgs_mod_rev;
uint64_t cur_retry = 0;
uint64_t retry_wait = 0;
bool is_warning, is_dataloss;
bool is_done()
{
return state == 100;
}
void loop()
{
if (state == 1)
goto resume_1;
else if (state == 2)
goto resume_2;
else if (state == 3)
goto resume_3;
else if (state == 4)
goto resume_4;
if (!osd_ids.size())
{
result = (cli_result_t){ .err = EINVAL, .text = "OSD numbers are not specified" };
state = 100;
return;
}
for (auto osd_id: osd_ids)
{
if (!osd_id)
{
result = (cli_result_t){ .err = EINVAL, .text = "OSD number can't be zero" };
state = 100;
return;
}
to_remove.insert(osd_id);
}
// Check if OSDs are still used in data distribution
is_warning = is_dataloss = false;
for (auto & pp: parent->cli->st_cli.pool_config)
{
// Will OSD deletion make pool incomplete / down / degraded?
bool pool_incomplete = false, pool_down = false, pool_degraded = false;
bool hist_incomplete = false, hist_degraded = false;
auto & pool_cfg = pp.second;
uint64_t pg_data_size = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
for (auto & pgp: pool_cfg.pg_config)
{
auto & pg_cfg = pgp.second;
int pg_cursize = 0, pg_rm = 0;
for (auto pg_osd: pg_cfg.target_set)
{
if (pg_osd != 0)
{
pg_cursize++;
if (to_remove.find(pg_osd) != to_remove.end())
pg_rm++;
}
}
for (auto & hist_item: pg_cfg.target_history)
{
int hist_size = 0, hist_rm = 0;
for (auto & old_osd: hist_item)
{
if (old_osd != 0)
{
hist_size++;
if (to_remove.find(old_osd) != to_remove.end())
hist_rm++;
}
}
if (hist_rm > 0)
{
hist_degraded = true;
if (hist_size-hist_rm == 0)
pool_incomplete = true;
else if (hist_size-hist_rm < pg_data_size)
hist_incomplete = true;
}
}
if (pg_rm > 0)
{
pool_degraded = true;
if (pg_cursize-pg_rm < pg_data_size)
pool_incomplete = true;
else if (pg_cursize-pg_rm < pool_cfg.pg_minsize)
pool_down = true;
}
}
if (pool_incomplete || pool_down || pool_degraded || hist_incomplete || hist_degraded)
{
pool_effects.push_back(json11::Json::object {
{ "pool_id", (uint64_t)pool_cfg.id },
{ "pool_name", pool_cfg.name },
{ "effect", (pool_incomplete
? "incomplete"
: (hist_incomplete
? "has_incomplete"
: (pool_down
? "offline"
: (pool_degraded
? "degraded"
: (hist_degraded ? "has_degraded" : "?")
)
)
)
) },
});
is_warning = true;
if (pool_incomplete || hist_incomplete)
is_dataloss = true;
}
}
result.data = json11::Json::object {
{ "osd_ids", osd_ids },
{ "pool_errors", pool_effects },
};
if (is_dataloss || is_warning || dry_run)
{
std::string error;
for (auto & e: pool_effects)
{
error += "Pool "+e["pool_name"].string_value()+" (ID "+e["pool_id"].as_string()+") will have "+(
e["effect"] == "has_incomplete"
? std::string("INCOMPLETE objects (DATA LOSS)")
: (e["effect"] == "incomplete"
? std::string("INCOMPLETE PGs (DATA LOSS)")
: (e["effect"] == "has_degraded"
? std::string("DEGRADED objects")
: strtoupper(e["effect"].string_value())+" PGs"))
)+" after deleting OSD(s).\n";
}
if (is_dataloss && !force_dataloss && !dry_run)
error += "OSDs not deleted. Please move data to other OSDs or bypass this check with --allow-data-loss if you know what you are doing.\n";
else if (is_warning && !force_warning && !dry_run)
error += "OSDs not deleted. Please move data to other OSDs or bypass this check with --force if you know what you are doing.\n";
else if (!is_dataloss && !is_warning && dry_run)
error += "OSDs can be deleted without data loss.\n";
result.text = error;
if (dry_run || is_dataloss && !force_dataloss || is_warning && !force_warning)
{
result.err = is_dataloss && !force_dataloss || is_warning && !force_warning ? EBUSY : 0;
state = 100;
return;
}
}
parent->etcd_txn(json11::Json::object { { "success", json11::Json::array {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/config/pgs"
) },
} },
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+"/history/last_clean_pgs"
) },
} },
},
} } });
resume_4:
state = 4;
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
{
auto kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][0]["response_range"]["kvs"][0]);
new_pgs = remove_osds_from_pgs(kv);
new_pgs_mod_rev = kv.mod_revision;
kv = parent->cli->st_cli.parse_etcd_kv(parent->etcd_result["responses"][1]["response_range"]["kvs"][0]);
new_clean_pgs = remove_osds_from_pgs(kv);
new_clean_pgs_mod_rev = kv.mod_revision;
}
// Remove keys from etcd
{
json11::Json::array rm_items, rm_checks;
for (auto osd_id: osd_ids)
{
rm_items.push_back("/config/osd/"+std::to_string(osd_id));
rm_items.push_back("/osd/stats/"+std::to_string(osd_id));
rm_items.push_back("/osd/state/"+std::to_string(osd_id));
rm_items.push_back("/osd/inodestats/"+std::to_string(osd_id));
rm_items.push_back("/osd/space/"+std::to_string(osd_id));
}
for (int i = 0; i < rm_items.size(); i++)
{
rm_items[i] = json11::Json::object {
{ "request_delete_range", json11::Json::object {
{ "key", base64_encode(
parent->cli->st_cli.etcd_prefix+rm_items[i].string_value()
) },
} },
};
}
if (!new_pgs.is_null())
{
auto pgs_key = base64_encode(parent->cli->st_cli.etcd_prefix+"/config/pgs");
rm_items.push_back(json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", pgs_key },
{ "value", base64_encode(new_pgs.dump()) },
} },
});
rm_checks.push_back(json11::Json::object {
{ "target", "MOD" },
{ "key", pgs_key },
{ "result", "LESS" },
{ "mod_revision", new_pgs_mod_rev+1 },
});
}
if (!new_clean_pgs.is_null())
{
auto pgs_key = base64_encode(parent->cli->st_cli.etcd_prefix+"/history/last_clean_pgs");
rm_items.push_back(json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", pgs_key },
{ "value", base64_encode(new_clean_pgs.dump()) },
} },
});
rm_checks.push_back(json11::Json::object {
{ "target", "MOD" },
{ "key", pgs_key },
{ "result", "LESS" },
{ "mod_revision", new_clean_pgs_mod_rev+1 },
});
}
parent->etcd_txn(json11::Json::object { { "success", rm_items }, { "checks", rm_checks } });
}
resume_1:
state = 1;
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
// Remove old OSD from PG all_peers to prevent left_on_dead and from
// target_history to prevent INCOMPLETE if --allow-data-loss is specified
for (auto & rsp: parent->etcd_result["responses"].array_items())
{
if (rsp["response_delete_range"]["deleted"].uint64_value() > 0)
{
// Wait for mon_change_timeout before updating PG history, or the monitor's change will likely interfere with ours
retry_wait = parent->cli->merged_config["mon_change_timeout"].uint64_value();
if (!retry_wait)
retry_wait = 1000;
retry_wait += etcd_tx_retry_ms;
}
}
while (1)
{
resume_2:
if (!remove_osds_from_history(2))
return;
resume_3:
state = 3;
if (parent->waiting > 0)
return;
if (parent->etcd_err.err)
{
result = parent->etcd_err;
state = 100;
return;
}
if (parent->etcd_result["succeeded"].bool_value())
break;
if ((++cur_retry) >= etcd_tx_retries)
{
result.err = EAGAIN;
result.text += "Failed to remove OSDs from PG history due to update conflicts."
" Some PGs may remain left_on_dead or incomplete. Please retry later\n";
state = 100;
return;
}
retry_wait = etcd_tx_retry_ms;
}
std::string ids = "";
for (auto osd_id: osd_ids)
{
ids += (ids.size() ? ", " : "")+std::to_string(osd_id);
}
ids = (osd_ids.size() > 1 ? "OSDs " : "OSD ")+ids+(osd_ids.size() > 1 ? " are" : " is")+" removed from etcd";
state = 100;
result.text = (result.text != "" ? ids+"\n"+result.text : ids);
result.err = 0;
}
json11::Json remove_osds_from_pgs(const etcd_kv_t & kv)
{
if (kv.value.is_null())
{
return kv.value;
}
json11::Json::object new_pgs;
for (auto & pp: kv.value["items"].object_items())
{
if (pp.second.is_object())
{
json11::Json::object new_pool;
for (auto & pgp: pp.second.object_items())
{
json11::Json::array osd_set;
for (auto & osd_json: pgp.second["osd_set"].array_items())
{
uint64_t osd_num = osd_json.uint64_value();
osd_set.push_back(osd_num == 0 || to_remove.find(osd_num) != to_remove.end() ? 0 : osd_num);
}
json11::Json::object new_pg = pgp.second.object_items();
new_pg["osd_set"] = osd_set;
new_pool[pgp.first] = new_pg;
}
new_pgs[pp.first] = new_pool;
}
else
new_pgs[pp.first] = pp.second;
}
auto res = kv.value.object_items();
res["items"] = new_pgs;
return res;
}
bool remove_osds_from_history(int base_state)
{
if (state == base_state+0)
goto resume_0;
history_updates.clear();
history_checks.clear();
for (auto & pp: parent->cli->st_cli.pool_config)
{
bool update_pg_history = false;
auto & pool_cfg = pp.second;
for (auto & pgp: pool_cfg.pg_config)
{
auto pg_num = pgp.first;
auto & pg_cfg = pgp.second;
for (int i = 0; i < pg_cfg.all_peers.size(); i++)
{
if (to_remove.find(pg_cfg.all_peers[i]) != to_remove.end())
{
update_pg_history = true;
pg_cfg.all_peers.erase(pg_cfg.all_peers.begin()+i, pg_cfg.all_peers.begin()+i+1);
i--;
}
}
for (int i = 0; i < pg_cfg.target_history.size(); i++)
{
int hist_size = 0, hist_rm = 0;
for (auto & old_osd: pg_cfg.target_history[i])
{
if (old_osd != 0)
{
hist_size++;
if (to_remove.find(old_osd) != to_remove.end())
{
hist_rm++;
old_osd = 0;
}
}
}
if (hist_rm > 0)
{
if (hist_size-hist_rm == 0)
{
pg_cfg.target_history.erase(pg_cfg.target_history.begin()+i, pg_cfg.target_history.begin()+i+1);
i--;
}
update_pg_history = true;
}
}
if (update_pg_history)
{
std::string history_key = base64_encode(
parent->cli->st_cli.etcd_prefix+"/pg/history/"+
std::to_string(pool_cfg.id)+"/"+std::to_string(pg_num)
);
history_updates.push_back(json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", history_key },
{ "value", base64_encode(json11::Json(json11::Json::object {
{ "epoch", pg_cfg.epoch },
{ "all_peers", pg_cfg.all_peers },
{ "osd_sets", pg_cfg.target_history },
}).dump()) },
} },
});
history_checks.push_back(json11::Json::object {
{ "target", "MOD" },
{ "key", history_key },
{ "result", "LESS" },
{ "mod_revision", parent->cli->st_cli.etcd_watch_revision+1 },
});
}
}
}
if (history_updates.size())
{
if (retry_wait)
{
parent->waiting++;
parent->epmgr->tfd->set_timer(retry_wait, false, [this](int timer_id)
{
parent->waiting--;
parent->ringloop->wakeup();
});
resume_0:
state = base_state+0;
if (parent->waiting > 0)
return false;
}
parent->etcd_txn(json11::Json::object {
{ "success", history_updates },
{ "compare", history_checks },
});
}
else
parent->etcd_result = json11::Json::object{ { "succeeded", true } };
return true;
}
};
std::function<bool(cli_result_t &)> cli_tool_t::start_rm_osd(json11::Json cfg)
{
auto rm_osd = new rm_osd_t();
rm_osd->parent = this;
rm_osd->dry_run = cfg["dry_run"].bool_value();
rm_osd->force_dataloss = cfg["allow_data_loss"].bool_value();
rm_osd->force_warning = rm_osd->force_dataloss || cfg["force"].bool_value();
if (!cfg["etcd_tx_retries"].is_null())
rm_osd->etcd_tx_retries = cfg["etcd_tx_retries"].uint64_value();
if (!cfg["etcd_tx_retry_ms"].is_null())
{
rm_osd->etcd_tx_retry_ms = cfg["etcd_tx_retry_ms"].uint64_value();
if (rm_osd->etcd_tx_retry_ms < 100)
rm_osd->etcd_tx_retry_ms = 100;
}
if (cfg["osd_id"].is_number() || cfg["osd_id"].is_string())
rm_osd->osd_ids.push_back(cfg["osd_id"].uint64_value());
else
{
for (auto & id: cfg["osd_id"].array_items())
rm_osd->osd_ids.push_back(id.uint64_value());
}
return [rm_osd](cli_result_t & result)
{
rm_osd->loop();
if (rm_osd->is_done())
{
result = rm_osd->result;
delete rm_osd;
return true;
}
return false;
};
}

View File

@@ -5,14 +5,13 @@
#include <sys/ioctl.h>
#include <ctype.h>
#include <unistd.h>
#include "cli.h"
#include "cluster_client.h"
#include "base64.h"
#include <sys/stat.h>
#include "json11/json11.hpp"
#include "str_util.h"
#include "blockstore.h"
// Calculate offsets for a block device and print OSD command line parameters
void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
std::function<bool(cli_result_t &)> cli_tool_t::simple_offsets(json11::Json cfg)
{
std::string device = cfg["device"].string_value();
if (device == "")
@@ -30,7 +29,7 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
if (json_output)
format = "json";
if (!object_size)
object_size = 1 << DEFAULT_DATA_BLOCK_ORDER;
object_size = DEFAULT_BLOCK_SIZE;
if (!bitmap_granularity)
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
if (!journal_size)
@@ -146,4 +145,5 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
device.c_str(), journal_offset, meta_offset, data_offset
);
}
return NULL;
}

View File

@@ -3,12 +3,10 @@
#include "cli.h"
#include "cluster_client.h"
#include "str_util.h"
#include "base64.h"
#include "pg_states.h"
#include "http_client.h"
static const char *obj_states[] = { "clean", "misplaced", "degraded", "incomplete" };
// Print cluster status:
// etcd, mon, osd states
// raw/used space, object states, pool states, pg states
@@ -198,57 +196,21 @@ resume_2:
}
pgs_by_state_str += std::to_string(kv.second)+" "+kv.first;
}
uint64_t object_size = parent->cli->get_bs_block_size();
std::string more_states;
uint64_t obj_n;
obj_n = agg_stats["object_counts"]["misplaced"].uint64_value();
if (obj_n > 0)
more_states += ", "+format_size(obj_n*object_size)+" misplaced";
obj_n = agg_stats["object_counts"]["degraded"].uint64_value();
if (obj_n > 0)
more_states += ", "+format_size(obj_n*object_size)+" degraded";
obj_n = agg_stats["object_counts"]["incomplete"].uint64_value();
if (obj_n > 0)
more_states += ", "+format_size(obj_n*object_size)+" incomplete";
bool readonly = json_is_true(parent->cli->merged_config["readonly"]);
bool no_recovery = json_is_true(parent->cli->merged_config["no_recovery"]);
bool no_rebalance = json_is_true(parent->cli->merged_config["no_rebalance"]);
if (parent->json_output)
{
// JSON output
auto json_status = json11::Json::object {
{ "etcd_alive", etcd_alive },
{ "etcd_count", (uint64_t)etcd_states.size() },
{ "etcd_db_size", etcd_db_size },
{ "mon_count", mon_count },
{ "mon_master", mon_master },
{ "osd_up", osd_up },
{ "osd_count", osd_count },
{ "total_raw", total_raw },
{ "free_raw", free_raw },
{ "down_raw", down_raw },
{ "free_down_raw", free_down_raw },
{ "readonly", readonly },
{ "no_recovery", no_recovery },
{ "no_rebalance", no_rebalance },
{ "pool_count", pool_count },
{ "active_pool_count", pools_active },
{ "pg_states", pgs_by_state },
{ "op_stats", agg_stats["op_stats"] },
{ "recovery_stats", agg_stats["recovery_stats"] },
{ "object_counts", agg_stats["object_counts"] },
};
for (int i = 0; i < sizeof(obj_states)/sizeof(obj_states[0]); i++)
{
std::string str(obj_states[i]);
uint64_t obj_n = agg_stats["object_bytes"][str].uint64_value();
if (!obj_n)
obj_n = agg_stats["object_counts"][str].uint64_value() * parent->cli->st_cli.global_block_size;
json_status[str+"_data"] = obj_n;
}
printf("%s\n", json11::Json(json_status).dump().c_str());
state = 100;
return;
}
std::string more_states;
for (int i = 0; i < sizeof(obj_states)/sizeof(obj_states[0]); i++)
{
std::string str(obj_states[i]);
uint64_t obj_n = agg_stats["object_bytes"][str].uint64_value();
if (!obj_n)
obj_n = agg_stats["object_counts"][str].uint64_value() * parent->cli->st_cli.global_block_size;
if (!i || obj_n > 0)
more_states += format_size(obj_n)+" "+str+", ";
}
more_states.resize(more_states.size()-2);
std::string recovery_io;
{
uint64_t deg_bps = agg_stats["recovery_stats"]["degraded"]["bps"].uint64_value();
@@ -270,6 +232,38 @@ resume_2:
else if (no_rebalance)
recovery_io += " rebalance: disabled\n";
}
if (parent->json_output)
{
// JSON output
printf("%s\n", json11::Json(json11::Json::object {
{ "etcd_alive", etcd_alive },
{ "etcd_count", (uint64_t)etcd_states.size() },
{ "etcd_db_size", etcd_db_size },
{ "mon_count", mon_count },
{ "mon_master", mon_master },
{ "osd_up", osd_up },
{ "osd_count", osd_count },
{ "total_raw", total_raw },
{ "free_raw", free_raw },
{ "down_raw", down_raw },
{ "free_down_raw", free_down_raw },
{ "readonly", readonly },
{ "no_recovery", no_recovery },
{ "no_rebalance", no_rebalance },
{ "clean_data", agg_stats["object_counts"]["clean"].uint64_value() * object_size },
{ "misplaced_data", agg_stats["object_counts"]["misplaced"].uint64_value() * object_size },
{ "degraded_data", agg_stats["object_counts"]["degraded"].uint64_value() * object_size },
{ "incomplete_data", agg_stats["object_counts"]["incomplete"].uint64_value() * object_size },
{ "pool_count", pool_count },
{ "active_pool_count", pools_active },
{ "pg_states", pgs_by_state },
{ "op_stats", agg_stats["op_stats"] },
{ "recovery_stats", agg_stats["recovery_stats"] },
{ "object_counts", agg_stats["object_counts"] },
}).dump().c_str());
state = 100;
return;
}
printf(
" cluster:\n"
" etcd: %d / %ld up, %s database size\n"
@@ -278,7 +272,7 @@ resume_2:
" \n"
" data:\n"
" raw: %s used, %s / %s available%s\n"
" state: %s\n"
" state: %s clean%s\n"
" pools: %d / %d active\n"
" pgs: %s\n"
" \n"
@@ -292,7 +286,7 @@ resume_2:
format_size(free_raw-free_down_raw).c_str(),
format_size(total_raw-down_raw).c_str(),
(down_raw > 0 ? (", "+format_size(down_raw)+" down").c_str() : ""),
more_states.c_str(),
format_size(agg_stats["object_counts"]["clean"].uint64_value() * object_size).c_str(), more_states.c_str(),
pools_active, pool_count,
pgs_by_state_str.c_str(),
readonly ? " (read-only mode)" : "",

View File

@@ -14,7 +14,6 @@
#define CACHE_FLUSHING 2
#define CACHE_REPEATING 3
#define OP_FLUSH_BUFFER 0x02
#define OP_IMMEDIATE_COMMIT 0x04
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config)
{
@@ -128,26 +127,26 @@ void cluster_client_t::calc_wait(cluster_op_t *op)
op->prev_wait++;
}
}
if (!op->prev_wait)
if (!op->prev_wait && pgs_loaded)
continue_rw(op);
}
else if (op->opcode == OSD_OP_SYNC)
{
for (auto prev = op->prev; prev; prev = prev->prev)
{
if (prev->opcode == OSD_OP_SYNC || prev->opcode == OSD_OP_WRITE && !(prev->flags & OP_IMMEDIATE_COMMIT))
if (prev->opcode == OSD_OP_SYNC || prev->opcode == OSD_OP_WRITE)
{
op->prev_wait++;
}
}
if (!op->prev_wait)
if (!op->prev_wait && pgs_loaded)
continue_sync(op);
}
else /* if (op->opcode == OSD_OP_READ || op->opcode == OSD_OP_READ_BITMAP) */
{
for (auto prev = op_queue_head; prev && prev != op; prev = prev->next)
{
if (prev->opcode == OSD_OP_WRITE && (prev->flags & OP_FLUSH_BUFFER))
if (prev->opcode == OSD_OP_WRITE && prev->flags & OP_FLUSH_BUFFER)
{
op->prev_wait++;
}
@@ -157,7 +156,7 @@ void cluster_client_t::calc_wait(cluster_op_t *op)
break;
}
}
if (!op->prev_wait)
if (!op->prev_wait && pgs_loaded)
continue_rw(op);
}
}
@@ -169,7 +168,7 @@ void cluster_client_t::inc_wait(uint64_t opcode, uint64_t flags, cluster_op_t *n
while (next)
{
auto n2 = next->next;
if (next->opcode == OSD_OP_SYNC && !(flags & OP_IMMEDIATE_COMMIT) ||
if (next->opcode == OSD_OP_SYNC ||
next->opcode == OSD_OP_WRITE && (flags & OP_FLUSH_BUFFER) && !(next->flags & OP_FLUSH_BUFFER) ||
(next->opcode == OSD_OP_READ || next->opcode == OSD_OP_READ_BITMAP) && (flags & OP_FLUSH_BUFFER))
{
@@ -221,11 +220,9 @@ void cluster_client_t::erase_op(cluster_op_t *op)
if (op_queue_tail == op)
op_queue_tail = op->prev;
op->next = op->prev = NULL;
if (!(flags & OP_IMMEDIATE_COMMIT))
inc_wait(opcode, flags, next, -1);
// Call callback at the end to avoid inconsistencies in prev_wait
// if the callback adds more operations itself
std::function<void(cluster_op_t*)>(op->callback)(op);
if (!immediate_commit)
inc_wait(opcode, flags, next, -1);
}
void cluster_client_t::continue_ops(bool up_retry)
@@ -265,6 +262,21 @@ restart:
continuing_ops = 0;
}
static uint32_t is_power_of_two(uint64_t value)
{
uint32_t l = 0;
while (value > 1)
{
if (value & 1)
{
return 64;
}
value = value >> 1;
l++;
}
return l;
}
void cluster_client_t::on_load_config_hook(json11::Json::object & config)
{
this->merged_config = config;
@@ -272,6 +284,24 @@ void cluster_client_t::on_load_config_hook(json11::Json::object & config)
{
this->merged_config[kv.first] = kv.second;
}
bs_block_size = config["block_size"].uint64_value();
bs_bitmap_granularity = config["bitmap_granularity"].uint64_value();
if (!bs_block_size)
{
bs_block_size = DEFAULT_BLOCK_SIZE;
}
if (!bs_bitmap_granularity)
{
bs_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
}
bs_bitmap_size = bs_block_size / bs_bitmap_granularity / 8;
uint32_t block_order;
if ((block_order = is_power_of_two(bs_block_size)) >= 64 || bs_block_size < MIN_DATA_BLOCK_SIZE || bs_block_size >= MAX_DATA_BLOCK_SIZE)
{
throw std::runtime_error("Bad block size");
}
// Cluster-wide immediate_commit mode
immediate_commit = (config["immediate_commit"] == "all");
if (config.find("client_max_dirty_bytes") != config.end())
{
client_max_dirty_bytes = config["client_max_dirty_bytes"].uint64_value();
@@ -349,15 +379,9 @@ void cluster_client_t::on_change_hook(std::map<std::string, etcd_kv_t> & changes
continue_ops();
}
bool cluster_client_t::get_immediate_commit(uint64_t inode)
bool cluster_client_t::get_immediate_commit()
{
pool_id_t pool_id = INODE_POOL(inode);
if (!pool_id)
return true;
auto pool_it = st_cli.pool_config.find(pool_id);
if (pool_it == st_cli.pool_config.end())
return true;
return pool_it->second.immediate_commit == IMMEDIATE_ALL;
return immediate_commit;
}
void cluster_client_t::on_change_osd_state_hook(uint64_t peer_osd)
@@ -415,45 +439,9 @@ void cluster_client_t::execute(cluster_op_t *op)
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
if (!pgs_loaded)
{
offline_ops.push_back(op);
return;
}
op->cur_inode = op->inode;
op->retval = 0;
op->flags = op->flags & OSD_OP_IGNORE_READONLY; // single allowed flag
if (op->opcode != OSD_OP_SYNC)
{
pool_id_t pool_id = INODE_POOL(op->cur_inode);
if (!pool_id)
{
op->retval = -EINVAL;
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
auto pool_it = st_cli.pool_config.find(pool_id);
if (pool_it == st_cli.pool_config.end() || pool_it->second.real_pg_count == 0)
{
// Pools are loaded, but this one is unknown
op->retval = -EINVAL;
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
// Check alignment
if ((op->opcode == OSD_OP_READ || op->opcode == OSD_OP_WRITE) && !op->len ||
op->offset % pool_it->second.bitmap_granularity || op->len % pool_it->second.bitmap_granularity)
{
op->retval = -EINVAL;
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
if (pool_it->second.immediate_commit == IMMEDIATE_ALL)
{
op->flags |= OP_IMMEDIATE_COMMIT;
}
}
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT))
if (op->opcode == OSD_OP_WRITE && !immediate_commit)
{
if (dirty_bytes >= client_max_dirty_bytes || dirty_ops >= client_max_dirty_ops)
{
@@ -492,9 +480,9 @@ void cluster_client_t::execute(cluster_op_t *op)
}
else
op_queue_tail = op_queue_head = op;
if (!(op->flags & OP_IMMEDIATE_COMMIT))
if (!immediate_commit)
calc_wait(op);
else
else if (pgs_loaded)
{
if (op->opcode == OSD_OP_SYNC)
continue_sync(op);
@@ -622,6 +610,28 @@ int cluster_client_t::continue_rw(cluster_op_t *op)
else if (op->state == 3)
goto resume_3;
resume_0:
if ((op->opcode == OSD_OP_READ || op->opcode == OSD_OP_WRITE) && !op->len ||
op->offset % bs_bitmap_granularity || op->len % bs_bitmap_granularity)
{
op->retval = -EINVAL;
erase_op(op);
return 1;
}
{
pool_id_t pool_id = INODE_POOL(op->cur_inode);
if (!pool_id)
{
op->retval = -EINVAL;
erase_op(op);
return 1;
}
if (st_cli.pool_config.find(pool_id) == st_cli.pool_config.end() ||
st_cli.pool_config[pool_id].real_pg_count == 0)
{
// Postpone operations to unknown pools
return 0;
}
}
if (op->opcode == OSD_OP_WRITE || op->opcode == OSD_OP_DELETE)
{
if (!(op->flags & OSD_OP_IGNORE_READONLY))
@@ -634,7 +644,7 @@ resume_0:
return 1;
}
}
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT) && !(op->flags & OP_FLUSH_BUFFER))
if (op->opcode == OSD_OP_WRITE && !immediate_commit && !(op->flags & OP_FLUSH_BUFFER))
{
copy_write(op, dirty_buffers);
}
@@ -804,7 +814,7 @@ void cluster_client_t::slice_rw(cluster_op_t *op)
// Primary OSDs still operate individual stripes, but their size is multiplied by PG minsize in case of EC
auto & pool_cfg = st_cli.pool_config.at(INODE_POOL(op->cur_inode));
uint32_t pg_data_size = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
uint64_t pg_block_size = pool_cfg.data_block_size * pg_data_size;
uint64_t pg_block_size = bs_block_size * pg_data_size;
uint64_t first_stripe = (op->offset / pg_block_size) * pg_block_size;
uint64_t last_stripe = op->len > 0 ? ((op->offset + op->len - 1) / pg_block_size) * pg_block_size : first_stripe;
op->retval = 0;
@@ -812,9 +822,9 @@ void cluster_client_t::slice_rw(cluster_op_t *op)
if (op->opcode == OSD_OP_READ || op->opcode == OSD_OP_READ_BITMAP)
{
// Allocate memory for the bitmap
unsigned object_bitmap_size = (((op->opcode == OSD_OP_READ_BITMAP ? pg_block_size : op->len) / pool_cfg.bitmap_granularity + 7) / 8);
unsigned object_bitmap_size = (((op->opcode == OSD_OP_READ_BITMAP ? pg_block_size : op->len) / bs_bitmap_granularity + 7) / 8);
object_bitmap_size = (object_bitmap_size < 8 ? 8 : object_bitmap_size);
unsigned bitmap_mem = object_bitmap_size + (pool_cfg.data_block_size / pool_cfg.bitmap_granularity / 8 * pg_data_size) * op->parts.size();
unsigned bitmap_mem = object_bitmap_size + (bs_bitmap_size * pg_data_size) * op->parts.size();
if (op->bitmap_buf_size < bitmap_mem)
{
op->bitmap_buf = realloc_or_die(op->bitmap_buf, bitmap_mem);
@@ -844,7 +854,7 @@ void cluster_client_t::slice_rw(cluster_op_t *op)
bool skip_prev = true;
while (cur < end)
{
unsigned bmp_loc = (cur - op->offset)/pool_cfg.bitmap_granularity;
unsigned bmp_loc = (cur - op->offset)/bs_bitmap_granularity;
bool skip = (((*((uint8_t*)op->bitmap_buf + bmp_loc/8)) >> (bmp_loc%8)) & 0x1);
if (skip_prev != skip)
{
@@ -862,7 +872,7 @@ void cluster_client_t::slice_rw(cluster_op_t *op)
skip_prev = skip;
prev = cur;
}
cur += pool_cfg.bitmap_granularity;
cur += bs_bitmap_granularity;
}
assert(cur > prev);
if (skip_prev)
@@ -894,7 +904,7 @@ bool cluster_client_t::affects_osd(uint64_t inode, uint64_t offset, uint64_t len
{
auto & pool_cfg = st_cli.pool_config.at(INODE_POOL(inode));
uint32_t pg_data_size = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
uint64_t pg_block_size = pool_cfg.data_block_size * pg_data_size;
uint64_t pg_block_size = bs_block_size * pg_data_size;
uint64_t first_stripe = (offset / pg_block_size) * pg_block_size;
uint64_t last_stripe = len > 0 ? ((offset + len - 1) / pg_block_size) * pg_block_size : first_stripe;
for (uint64_t stripe = first_stripe; stripe <= last_stripe; stripe += pg_block_size)
@@ -925,7 +935,7 @@ bool cluster_client_t::try_send(cluster_op_t *op, int i)
part->osd_num = primary_osd;
part->flags |= PART_SENT;
op->inflight_count++;
uint64_t pg_bitmap_size = (pool_cfg.data_block_size / pool_cfg.bitmap_granularity / 8) * (
uint64_t pg_bitmap_size = bs_bitmap_size * (
pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks
);
uint64_t meta_rev = 0;
@@ -941,7 +951,7 @@ bool cluster_client_t::try_send(cluster_op_t *op, int i)
.req = { .rw = {
.header = {
.magic = SECONDARY_OSD_OP_MAGIC,
.id = next_op_id(),
.id = op_id++,
.opcode = op->opcode == OSD_OP_READ_BITMAP ? OSD_OP_READ : op->opcode,
},
.inode = op->cur_inode,
@@ -973,7 +983,7 @@ int cluster_client_t::continue_sync(cluster_op_t *op)
{
if (op->state == 1)
goto resume_1;
if (!dirty_osds.size())
if (immediate_commit || !dirty_osds.size())
{
// Sync is not required in the immediate_commit mode or if there are no dirty_osds
op->retval = 0;
@@ -1069,7 +1079,7 @@ void cluster_client_t::send_sync(cluster_op_t *op, cluster_op_part_t *part)
.req = {
.hdr = {
.magic = SECONDARY_OSD_OP_MAGIC,
.id = next_op_id(),
.id = op_id++,
.opcode = OSD_OP_SYNC,
},
},
@@ -1130,8 +1140,7 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
else
{
// OK
if (!(op->flags & OP_IMMEDIATE_COMMIT))
dirty_osds.insert(part->osd_num);
dirty_osds.insert(part->osd_num);
part->flags |= PART_DONE;
op->done_count++;
if (op->opcode == OSD_OP_READ || op->opcode == OSD_OP_READ_BITMAP)
@@ -1153,12 +1162,12 @@ void cluster_client_t::copy_part_bitmap(cluster_op_t *op, cluster_op_part_t *par
{
// Copy (OR) bitmap
auto & pool_cfg = st_cli.pool_config.at(INODE_POOL(op->cur_inode));
uint32_t pg_block_size = pool_cfg.data_block_size * (
uint32_t pg_block_size = bs_block_size * (
pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks
);
uint32_t object_offset = (part->op.req.rw.offset - op->offset) / pool_cfg.bitmap_granularity;
uint32_t part_offset = (part->op.req.rw.offset % pg_block_size) / pool_cfg.bitmap_granularity;
uint32_t part_len = (op->opcode == OSD_OP_READ_BITMAP ? pg_block_size : part->op.req.rw.len) / pool_cfg.bitmap_granularity;
uint32_t object_offset = (part->op.req.rw.offset - op->offset) / bs_bitmap_granularity;
uint32_t part_offset = (part->op.req.rw.offset % pg_block_size) / bs_bitmap_granularity;
uint32_t part_len = (op->opcode == OSD_OP_READ_BITMAP ? pg_block_size : part->op.req.rw.len) / bs_bitmap_granularity;
if (!(object_offset & 0x7) && !(part_offset & 0x7) && (part_len >= 8))
{
// Copy bytes
@@ -1181,5 +1190,5 @@ void cluster_client_t::copy_part_bitmap(cluster_op_t *op, cluster_op_part_t *par
uint64_t cluster_client_t::next_op_id()
{
return msgr.next_subop_id++;
return op_id++;
}

View File

@@ -6,6 +6,8 @@
#include "messenger.h"
#include "etcd_state_client.h"
#define MIN_DATA_BLOCK_SIZE 4*1024
#define MAX_DATA_BLOCK_SIZE 128*1024*1024
#define DEFAULT_CLIENT_MAX_DIRTY_BYTES 32*1024*1024
#define DEFAULT_CLIENT_MAX_DIRTY_OPS 1024
#define INODE_LIST_DONE 1
@@ -77,7 +79,11 @@ class cluster_client_t
timerfd_manager_t *tfd;
ring_loop_t *ringloop;
uint64_t bs_block_size = 0;
uint32_t bs_bitmap_granularity = 0, bs_bitmap_size = 0;
std::map<pool_id_t, uint64_t> pg_counts;
// WARNING: initially true so execute() doesn't create fake sync
bool immediate_commit = true;
// FIXME: Implement inmemory_commit mode. Note that it requires to return overlapping reads from memory.
uint64_t client_max_dirty_bytes = 0;
uint64_t client_max_dirty_ops = 0;
@@ -85,6 +91,7 @@ class cluster_client_t
int up_wait_retry_interval = 500; // ms
int retry_timeout_id = 0;
uint64_t op_id = 1;
std::vector<cluster_op_t*> offline_ops;
cluster_op_t *op_queue_head = NULL, *op_queue_tail = NULL;
std::map<object_id, cluster_buffer_t> dirty_buffers;
@@ -112,7 +119,7 @@ public:
bool is_ready();
void on_ready(std::function<void(void)> fn);
bool get_immediate_commit(uint64_t inode);
bool get_immediate_commit();
static void copy_write(cluster_op_t *op, std::map<object_id, cluster_buffer_t> & dirty_buffers);
void continue_ops(bool up_retry = false);
@@ -120,8 +127,8 @@ public:
std::function<void(inode_list_t* lst, std::set<object_id>&& objects, pg_num_t pg_num, osd_num_t primary_osd, int status)> callback);
int list_pg_count(inode_list_t *lst);
void list_inode_next(inode_list_t *lst, int next_pgs);
//inline uint32_t get_bs_bitmap_granularity() { return st_cli.global_bitmap_granularity; }
//inline uint64_t get_bs_block_size() { return st_cli.global_block_size; }
inline uint32_t get_bs_bitmap_granularity() { return bs_bitmap_granularity; }
inline uint64_t get_bs_block_size() { return bs_block_size; }
uint64_t next_op_id();
protected:

Some files were not shown because too many files have changed in this diff Show More