Compare commits

..

No commits in common. "master" and "v1.11.0" have entirely different histories.

163 changed files with 1016 additions and 4309 deletions

View File

@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
project(vitastor) project(vitastor)
set(VITASTOR_VERSION "2.1.0") set(VITASTOR_VERSION "1.11.0")
add_subdirectory(src) add_subdirectory(src)

View File

@ -6,7 +6,7 @@
Вернём былую скорость кластерному блочному хранилищу! Вернём былую скорость кластерному блочному хранилищу!
Vitastor - распределённая блочная, файловая и объектная SDS (программная СХД), прямой аналог Ceph RBD, CephFS и RGW, Vitastor - распределённая блочная и файловая SDS (программная СХД), прямой аналог Ceph RBD и CephFS,
а также внутренних СХД популярных облачных провайдеров. Однако, в отличие от них, Vitastor а также внутренних СХД популярных облачных провайдеров. Однако, в отличие от них, Vitastor
быстрый и при этом простой. Только пока маленький :-). быстрый и при этом простой. Только пока маленький :-).
@ -46,7 +46,6 @@ Vitastor поддерживает QEMU-драйвер, протоколы NBD и
- [OpenNebula](docs/installation/opennebula.ru.md) - [OpenNebula](docs/installation/opennebula.ru.md)
- [OpenStack](docs/installation/openstack.ru.md) - [OpenStack](docs/installation/openstack.ru.md)
- [Kubernetes CSI](docs/installation/kubernetes.ru.md) - [Kubernetes CSI](docs/installation/kubernetes.ru.md)
- [S3](docs/installation/s3.ru.md)
- [Сборка из исходных кодов](docs/installation/source.ru.md) - [Сборка из исходных кодов](docs/installation/source.ru.md)
- Конфигурация - Конфигурация
- [Обзор](docs/config.ru.md) - [Обзор](docs/config.ru.md)

View File

@ -6,7 +6,7 @@
Make Clustered Block Storage Fast Again. Make Clustered Block Storage Fast Again.
Vitastor is a distributed block, file and object SDS, direct replacement of Ceph RBD, CephFS and RGW, Vitastor is a distributed block and file SDS, direct replacement of Ceph RBD and CephFS,
and also internal SDS's of public clouds. However, in contrast to them, Vitastor is fast and also internal SDS's of public clouds. However, in contrast to them, Vitastor is fast
and simple at the same time. The only thing is it's slightly young :-). and simple at the same time. The only thing is it's slightly young :-).
@ -46,7 +46,6 @@ Read more details in the documentation. You can start from here: [Quick Start](d
- [OpenNebula](docs/installation/opennebula.en.md) - [OpenNebula](docs/installation/opennebula.en.md)
- [OpenStack](docs/installation/openstack.en.md) - [OpenStack](docs/installation/openstack.en.md)
- [Kubernetes CSI](docs/installation/kubernetes.en.md) - [Kubernetes CSI](docs/installation/kubernetes.en.md)
- [S3](docs/installation/s3.en.md)
- [Building from Source](docs/installation/source.en.md) - [Building from Source](docs/installation/source.en.md)
- Configuration - Configuration
- [Overview](docs/config.en.md) - [Overview](docs/config.en.md)

View File

@ -37,8 +37,8 @@ RUN (echo deb http://vitastor.io/debian bookworm main > /etc/apt/sources.list.d/
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \ wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \
apt-get update && \ apt-get update && \
apt-get install -y vitastor-client && \ apt-get install -y vitastor-client && \
wget https://vitastor.io/archive/qemu/qemu-bookworm-9.2.2%2Bds-1%2Bvitastor4/qemu-utils_9.2.2%2Bds-1%2Bvitastor4_amd64.deb && \ wget https://vitastor.io/archive/qemu/qemu-bookworm-8.1.2%2Bds-1%2Bvitastor1/qemu-utils_8.1.2%2Bds-1%2Bvitastor1_amd64.deb && \
wget https://vitastor.io/archive/qemu/qemu-bookworm-9.2.2%2Bds-1%2Bvitastor4/qemu-block-extra_9.2.2%2Bds-1%2Bvitastor4_amd64.deb && \ wget https://vitastor.io/archive/qemu/qemu-bookworm-8.1.2%2Bds-1%2Bvitastor1/qemu-block-extra_8.1.2%2Bds-1%2Bvitastor1_amd64.deb && \
dpkg -x qemu-utils*.deb tmp1 && \ dpkg -x qemu-utils*.deb tmp1 && \
dpkg -x qemu-block-extra*.deb tmp1 && \ dpkg -x qemu-block-extra*.deb tmp1 && \
cp -a tmp1/usr/bin/qemu-storage-daemon /usr/bin/ && \ cp -a tmp1/usr/bin/qemu-storage-daemon /usr/bin/ && \

View File

@ -1,4 +1,4 @@
VITASTOR_VERSION ?= v2.1.0 VITASTOR_VERSION ?= v1.11.0
all: build push all: build push

View File

@ -49,7 +49,7 @@ spec:
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
image: vitalif/vitastor-csi:v2.1.0 image: vitalif/vitastor-csi:v1.11.0
args: args:
- "--node=$(NODE_ID)" - "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

View File

@ -121,7 +121,7 @@ spec:
privileged: true privileged: true
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
image: vitalif/vitastor-csi:v2.1.0 image: vitalif/vitastor-csi:v1.11.0
args: args:
- "--node=$(NODE_ID)" - "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

View File

@ -5,7 +5,7 @@ package vitastor
const ( const (
vitastorCSIDriverName = "csi.vitastor.io" vitastorCSIDriverName = "csi.vitastor.io"
vitastorCSIDriverVersion = "2.1.0" vitastorCSIDriverVersion = "1.11.0"
) )
// Config struct fills the parameters of request or user input // Config struct fills the parameters of request or user input

2
debian/changelog vendored
View File

@ -1,4 +1,4 @@
vitastor (2.1.0-1) unstable; urgency=medium vitastor (1.11.0-1) unstable; urgency=medium
* Bugfixes * Bugfixes

11
debian/control vendored
View File

@ -2,10 +2,7 @@ Source: vitastor
Section: admin Section: admin
Priority: optional Priority: optional
Maintainer: Vitaliy Filippov <vitalif@yourcmc.ru> Maintainer: Vitaliy Filippov <vitalif@yourcmc.ru>
Build-Depends: debhelper, liburing-dev (>= 0.6), g++ (>= 8), libstdc++6 (>= 8), Build-Depends: debhelper, liburing-dev (>= 0.6), g++ (>= 8), libstdc++6 (>= 8), linux-libc-dev, libgoogle-perftools-dev, libjerasure-dev, libgf-complete-dev, libibverbs-dev, libisal-dev, cmake, pkg-config, libnl-3-dev, libnl-genl-3-dev
linux-libc-dev, libgoogle-perftools-dev, libjerasure-dev, libgf-complete-dev,
libibverbs-dev, libisal-dev, cmake, pkg-config, libnl-3-dev, libnl-genl-3-dev,
node-bindings <!nocheck>, node-gyp, node-nan
Standards-Version: 4.5.0 Standards-Version: 4.5.0
Homepage: https://vitastor.io/ Homepage: https://vitastor.io/
Rules-Requires-Root: no Rules-Requires-Root: no
@ -62,9 +59,3 @@ Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, vitastor-client, patch, python3, jq Depends: ${shlibs:Depends}, ${misc:Depends}, vitastor-client, patch, python3, jq
Description: Vitastor OpenNebula storage plugin Description: Vitastor OpenNebula storage plugin
Vitastor storage plugin for OpenNebula. Vitastor storage plugin for OpenNebula.
Package: node-vitastor
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, node-bindings
Description: Node.js bindings for Vitastor client
Node.js native bindings for the Vitastor client library (vitastor-client).

View File

@ -1 +0,0 @@
usr/lib/x86_64-linux-gnu/nodejs/vitastor

View File

@ -10,14 +10,10 @@ ARG REL=
WORKDIR /root WORKDIR /root
RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" -o "$REL" = "bookworm" ]; then \ RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" -o "$REL" = "bookworm" ]; then \
if [ "$REL" = "buster" ]; then \ echo "deb http://deb.debian.org/debian $REL-backports main" >> /etc/apt/sources.list; \
echo "deb http://archive.debian.org/debian $REL-backports main" >> /etc/apt/sources.list; \
else \
echo "deb http://deb.debian.org/debian $REL-backports main" >> /etc/apt/sources.list; \
fi; \
echo >> /etc/apt/preferences; \ echo >> /etc/apt/preferences; \
echo 'Package: *' >> /etc/apt/preferences; \ echo 'Package: *' >> /etc/apt/preferences; \
echo "Pin: release n=$REL-backports" >> /etc/apt/preferences; \ echo "Pin: release a=$REL-backports" >> /etc/apt/preferences; \
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \ echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
fi; \ fi; \
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \ grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
@ -60,7 +56,7 @@ RUN set -e; \
quilt add block/vitastor.c; \ quilt add block/vitastor.c; \
cp /root/qemu_driver.c block/vitastor.c; \ cp /root/qemu_driver.c block/vitastor.c; \
quilt refresh; \ quilt refresh; \
V=$(head -n1 debian/changelog | perl -pe 's/5\.2\+dfsg-9/5.2+dfsg-11/; s/^.*\((.*?)(\+deb\d+u\d+)?(~bpo[\d\+]*)?\).*$/$1/')+vitastor5; \ V=$(head -n1 debian/changelog | perl -pe 's/5\.2\+dfsg-9/5.2+dfsg-11/; s/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor4; \
if [ "$REL" = bullseye ]; then V=${V}bullseye; fi; \ if [ "$REL" = bullseye ]; then V=${V}bullseye; fi; \
DEBEMAIL="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \ DEBEMAIL="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \ DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \

8
debian/rules vendored
View File

@ -4,14 +4,6 @@ export DH_VERBOSE = 1
%: %:
dh $@ dh $@
override_dh_install:
perl -pe 's!prefix=/usr!prefix='`pwd`'/debian/tmp/usr!' < obj-x86_64-linux-gnu/src/client/vitastor.pc > node-binding/vitastor.pc
cd node-binding && PKG_CONFIG_PATH=./ PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 npm install --unsafe-perm || exit 1
mkdir -p debian/tmp/usr/lib/x86_64-linux-gnu/nodejs/vitastor/build/Release
cp -v node-binding/package.json node-binding/index.js node-binding/addon.cc node-binding/addon.h node-binding/client.cc node-binding/client.h debian/tmp/usr/lib/x86_64-linux-gnu/nodejs/vitastor
cp -v node-binding/build/Release/addon.node debian/tmp/usr/lib/x86_64-linux-gnu/nodejs/vitastor/build/Release
dh_install
override_dh_installdeb: override_dh_installdeb:
cat debian/fio_version >> debian/vitastor-fio.substvars cat debian/fio_version >> debian/vitastor-fio.substvars
[ -f debian/qemu_version ] && (cat debian/qemu_version >> debian/vitastor-qemu.substvars) || true [ -f debian/qemu_version ] && (cat debian/qemu_version >> debian/vitastor-qemu.substvars) || true

View File

@ -22,8 +22,7 @@ RUN set -e -x; \
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
RUN apt-get update && \ RUN apt-get update && \
apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts libjerasure-dev cmake \ apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts libjerasure-dev cmake libibverbs-dev librdmacm-dev libisal-dev libnl-3-dev libnl-genl-3-dev curl && \
libibverbs-dev librdmacm-dev libisal-dev libnl-3-dev libnl-genl-3-dev curl nodejs npm node-nan node-bindings && \
apt-get -y build-dep fio && \ apt-get -y build-dep fio && \
apt-get --download-only source fio apt-get --download-only source fio

View File

@ -3,7 +3,7 @@
FROM debian:bookworm FROM debian:bookworm
ADD etc/apt /etc/apt/ ADD etc/apt /etc/apt/
RUN apt-get update && apt-get -y install vitastor udev systemd qemu-system-x86 qemu-system-common qemu-block-extra qemu-utils jq nfs-common && apt-get clean RUN apt-get update && apt-get -y install vitastor qemu-system-x86 qemu-system-common qemu-block-extra qemu-utils jq nfs-common && apt-get clean
ADD sleep.sh /usr/bin/ ADD sleep.sh /usr/bin/
ADD install.sh /usr/bin/ ADD install.sh /usr/bin/
ADD scripts /opt/scripts/ ADD scripts /opt/scripts/

View File

@ -1,9 +1,9 @@
VITASTOR_VERSION ?= v2.1.0 VITASTOR_VERSION ?= v1.11.0
all: build push all: build push
build: build:
@docker build --no-cache --rm -t vitalif/vitastor:$(VITASTOR_VERSION) . @docker build --rm -t vitalif/vitastor:$(VITASTOR_VERSION) .
push: push:
@docker push vitalif/vitastor:$(VITASTOR_VERSION) @docker push vitalif/vitastor:$(VITASTOR_VERSION)

View File

@ -1,2 +1 @@
deb http://vitastor.io/debian bookworm main deb http://vitastor.io/debian bookworm main
deb http://http.debian.net/debian/ bookworm-backports main

View File

@ -7,8 +7,8 @@ PartOf=vitastor.target
[Service] [Service]
Restart=always Restart=always
EnvironmentFile=/etc/vitastor/docker.conf EnvironmentFile=/etc/vitastor/docker.conf
ExecStart=bash -c 'docker run --rm -i -v /etc/vitastor:/etc/vitastor -v /dev:/dev -v /run:/run \ ExecStart=bash -c 'docker run --rm -i -v /etc/vitastor:/etc/vitastor -v /dev:/dev \
--security-opt seccomp=unconfined --privileged --pid=host --log-driver none --network host --name vitastor vitastor:$VITASTOR_VERSION \ --privileged --log-driver none --network host --name vitastor vitastor:$VITASTOR_VERSION \
sleep.sh' sleep.sh'
ExecStartPost=udevadm trigger ExecStartPost=udevadm trigger
ExecStop=docker stop vitastor ExecStop=docker stop vitastor

View File

@ -12,8 +12,7 @@ EnvironmentFile=/etc/vitastor/docker.conf
SyslogIdentifier=vitastor-osd%i SyslogIdentifier=vitastor-osd%i
ExecStart=bash -c 'docker run --rm -i -v /etc/vitastor:/etc/vitastor -v /dev:/dev \ ExecStart=bash -c 'docker run --rm -i -v /etc/vitastor:/etc/vitastor -v /dev:/dev \
$(for i in $(ls /dev/vitastor/osd%i-*); do echo --device $i:$i; done) \ $(for i in $(ls /dev/vitastor/osd%i-*); do echo --device $i:$i; done) \
--log-driver none --network host --ulimit nofile=1048576 --ulimit memlock=-1 \ --log-driver none --network host --ulimit nofile=1048576 --ulimit memlock=-1 $CONTAINER_OPTIONS --name vitastor-osd%i \
--security-opt seccomp=unconfined $CONTAINER_OPTIONS --name vitastor-osd%i \
vitastor:$VITASTOR_VERSION vitastor-disk exec-osd /dev/vitastor/osd%i-data' vitastor:$VITASTOR_VERSION vitastor-disk exec-osd /dev/vitastor/osd%i-data'
ExecStartPre=+docker exec vitastor vitastor-disk pre-exec /dev/vitastor/osd%i-data ExecStartPre=+docker exec vitastor vitastor-disk pre-exec /dev/vitastor/osd%i-data
ExecStop=docker stop vitastor-etcd%i ExecStop=docker stop vitastor-etcd%i

View File

@ -4,7 +4,7 @@
# #
# Desired Vitastor version # Desired Vitastor version
VITASTOR_VERSION=v2.1.0 VITASTOR_VERSION=1.11.0
# Additional arguments for all containers # Additional arguments for all containers
# For example, you may want to specify a custom logging driver here # For example, you may want to specify a custom logging driver here

View File

@ -13,7 +13,6 @@ affect their interaction with the cluster.
- [client_retry_interval](#client_retry_interval) - [client_retry_interval](#client_retry_interval)
- [client_eio_retry_interval](#client_eio_retry_interval) - [client_eio_retry_interval](#client_eio_retry_interval)
- [client_retry_enospc](#client_retry_enospc) - [client_retry_enospc](#client_retry_enospc)
- [client_wait_up_timeout](#client_wait_up_timeout)
- [client_max_dirty_bytes](#client_max_dirty_bytes) - [client_max_dirty_bytes](#client_max_dirty_bytes)
- [client_max_dirty_ops](#client_max_dirty_ops) - [client_max_dirty_ops](#client_max_dirty_ops)
- [client_enable_writeback](#client_enable_writeback) - [client_enable_writeback](#client_enable_writeback)
@ -71,19 +70,6 @@ and clients are not blocked and just get EIO error code instead.
Retry writes on out of space errors to wait until some space is freed on Retry writes on out of space errors to wait until some space is freed on
OSDs. OSDs.
## client_wait_up_timeout
- Type: seconds
- Default: 16
- Can be changed online: yes
Wait for this number of seconds until PGs are up when doing operations
which require all PGs to be up. Currently only used by object listings
in delete and merge-based commands ([vitastor-cli rm](../usage/cli.en.md#rm), merge and so on).
The default value is calculated as `1 + OSD lease timeout`, which is
`1 + etcd_report_interval + max_etcd_attempts*2*etcd_quick_timeout`.
## client_max_dirty_bytes ## client_max_dirty_bytes
- Type: integer - Type: integer

View File

@ -13,7 +13,6 @@
- [client_retry_interval](#client_retry_interval) - [client_retry_interval](#client_retry_interval)
- [client_eio_retry_interval](#client_eio_retry_interval) - [client_eio_retry_interval](#client_eio_retry_interval)
- [client_retry_enospc](#client_retry_enospc) - [client_retry_enospc](#client_retry_enospc)
- [client_wait_up_timeout](#client_wait_up_timeout)
- [client_max_dirty_bytes](#client_max_dirty_bytes) - [client_max_dirty_bytes](#client_max_dirty_bytes)
- [client_max_dirty_ops](#client_max_dirty_ops) - [client_max_dirty_ops](#client_max_dirty_ops)
- [client_enable_writeback](#client_enable_writeback) - [client_enable_writeback](#client_enable_writeback)
@ -73,19 +72,6 @@ RDMA и хотите повысить пиковую производитель
Повторять запросы записи, завершившиеся с ошибками нехватки места, т.е. Повторять запросы записи, завершившиеся с ошибками нехватки места, т.е.
ожидать, пока на OSD не освободится место. ожидать, пока на OSD не освободится место.
## client_wait_up_timeout
- Тип: секунды
- Значение по умолчанию: 16
- Можно менять на лету: да
Время ожидания поднятия PG при операциях, требующих активности всех PG.
В данный момент используется листингами объектов в командах, использующих
удаление и слияние ([vitastor-cli rm](../usage/cli.ru.md#rm), merge и подобные).
Значение по умолчанию вычисляется как `1 + время lease OSD`, равное
`1 + etcd_report_interval + max_etcd_attempts*2*etcd_quick_timeout`.
## client_max_dirty_bytes ## client_max_dirty_bytes
- Тип: целое число - Тип: целое число

View File

@ -74,13 +74,13 @@ Grafana dashboard suitable for this exporter is here: [Vitastor-Grafana-6+.json]
- Type: integer - Type: integer
- Default: 8060 - Default: 8060
HTTP port for monitors to listen to (including metrics exporter) HTTP port for monitors to listen on (including metrics exporter)
## mon_http_ip ## mon_http_ip
- Type: string - Type: string
IP address for monitors to listen to (all addresses by default) IP address for monitors to listen on (all addresses by default)
## mon_https_cert ## mon_https_cert

View File

@ -9,11 +9,9 @@
These parameters apply to clients and OSDs and affect network connection logic These parameters apply to clients and OSDs and affect network connection logic
between clients, OSDs and etcd. between clients, OSDs and etcd.
- [osd_network](#osd_network) - [tcp_header_buffer_size](#tcp_header_buffer_size)
- [osd_cluster_network](#osd_cluster_network) - [use_sync_send_recv](#use_sync_send_recv)
- [use_rdma](#use_rdma) - [use_rdma](#use_rdma)
- [use_rdmacm](#use_rdmacm)
- [disable_tcp](#disable_tcp)
- [rdma_device](#rdma_device) - [rdma_device](#rdma_device)
- [rdma_port_num](#rdma_port_num) - [rdma_port_num](#rdma_port_num)
- [rdma_gid_index](#rdma_gid_index) - [rdma_gid_index](#rdma_gid_index)
@ -32,63 +30,38 @@ between clients, OSDs and etcd.
- [etcd_slow_timeout](#etcd_slow_timeout) - [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout) - [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_interval](#etcd_ws_keepalive_interval) - [etcd_ws_keepalive_interval](#etcd_ws_keepalive_interval)
- [etcd_min_reload_interval](#etcd_min_reload_interval)
- [tcp_header_buffer_size](#tcp_header_buffer_size)
- [min_zerocopy_send_size](#min_zerocopy_send_size)
- [use_sync_send_recv](#use_sync_send_recv)
## osd_network ## tcp_header_buffer_size
- Type: string or array of strings - Type: integer
- Default: 65536
Network mask of public OSD network(s) (IPv4 or IPv6). Each OSD listens to all Size of the buffer used to read data using an additional copy. Vitastor
addresses of UP + RUNNING interfaces matching one of these networks, on the packet headers are 128 bytes, payload is always at least 4 KB, so it is
same port. Port is auto-selected except if [bind_port](osd.en.md#bind_port) is usually beneficial to try to read multiple packets at once even though
explicitly specified. Bind address(es) may also be overridden manually by it requires to copy the data an additional time. The rest of each packet
specifying [bind_address](osd.en.md#bind_address). If OSD networks are not specified is received without an additional copy. You can try to play with this
at all, OSD just listens to a wildcard address (0.0.0.0). parameter and see how it affects random iops and linear bandwidth if you
want.
## osd_cluster_network ## use_sync_send_recv
- Type: string or array of strings - Type: boolean
- Default: false
Network mask of separate network(s) (IPv4 or IPv6) to use for OSD If true, synchronous send/recv syscalls are used instead of io_uring for
cluster connections. I.e. OSDs will always attempt to use these networks socket communication. Useless for OSDs because they require io_uring anyway,
to connect to other OSDs, while clients will attempt to use networks from but may be required for clients with old kernel versions.
[osd_network](#osd_network).
## use_rdma ## use_rdma
- Type: boolean - Type: boolean
- Default: true - Default: true
Try to use RDMA through libibverbs for communication if it's available. Try to use RDMA for communication if it's available. Disable if you don't
Disable if you don't want Vitastor to use RDMA. TCP-only clients can also want Vitastor to use RDMA. TCP-only clients can also talk to an RDMA-enabled
talk to an RDMA-enabled cluster, so disabling RDMA may be needed if clients cluster, so disabling RDMA may be needed if clients have RDMA devices,
have RDMA devices, but they are not connected to the cluster. but they are not connected to the cluster.
`use_rdma` works with RoCEv1/RoCEv2 networks, but not with iWARP and,
maybe, with some Infiniband configurations which require RDMA-CM.
Consider `use_rdmacm` for such networks.
## use_rdmacm
- Type: boolean
- Default: true
Use an alternative implementation of RDMA through RDMA-CM (Connection
Manager). Works with all RDMA networks: Infiniband, iWARP and
RoCEv1/RoCEv2, and even allows to disable TCP and run only with RDMA.
OSDs always use random port numbers for RDMA-CM listeners, different
from their TCP ports. `use_rdma` is automatically disabled when
`use_rdmacm` is enabled.
## disable_tcp
- Type: boolean
- Default: true
Fully disable TCP and only use RDMA-CM for OSD communication.
## rdma_device ## rdma_device
@ -119,13 +92,12 @@ PFC (Priority Flow Control) and ECN (Explicit Congestion Notification).
## rdma_port_num ## rdma_port_num
- Type: integer - Type: integer
- Default: 1
RDMA device port number to use. Only for devices that have more than 1 port. RDMA device port number to use. Only for devices that have more than 1 port.
See `phys_port_cnt` in `ibv_devinfo -v` output to determine how many ports See `phys_port_cnt` in `ibv_devinfo -v` output to determine how many ports
your device has. your device has.
Not relevant for RDMA-CM (use_rdmacm).
## rdma_gid_index ## rdma_gid_index
- Type: integer - Type: integer
@ -141,14 +113,13 @@ GID auto-selection is unsupported with libibverbs < v32.
A correct rdma_gid_index for RoCEv2 is usually 1 (IPv6) or 3 (IPv4). A correct rdma_gid_index for RoCEv2 is usually 1 (IPv6) or 3 (IPv4).
Not relevant for RDMA-CM (use_rdmacm).
## rdma_mtu ## rdma_mtu
- Type: integer - Type: integer
- Default: 4096
RDMA Path MTU to use. Must be 1024, 2048 or 4096. Default is to use the RDMA Path MTU to use. Must be 1024, 2048 or 4096. There is usually no
RDMA device's MTU. sense to change it from the default 4096.
## rdma_max_sge ## rdma_max_sge
@ -290,63 +261,3 @@ etcd_report_interval to guarantee that keepalive actually works.
etcd websocket ping interval required to keep the connection alive and etcd websocket ping interval required to keep the connection alive and
detect disconnections quickly. detect disconnections quickly.
## etcd_min_reload_interval
- Type: milliseconds
- Default: 1000
- Can be changed online: yes
Minimum interval for full etcd state reload. Introduced to prevent
excessive load on etcd during outages when etcd can't keep up with event
streams and cancels them.
## tcp_header_buffer_size
- Type: integer
- Default: 65536
Size of the buffer used to read data using an additional copy. Vitastor
packet headers are 128 bytes, payload is always at least 4 KB, so it is
usually beneficial to try to read multiple packets at once even though
it requires to copy the data an additional time. The rest of each packet
is received without an additional copy. You can try to play with this
parameter and see how it affects random iops and linear bandwidth if you
want.
## min_zerocopy_send_size
- Type: integer
- Default: 32768
OSDs and clients will attempt to use io_uring-based zero-copy TCP send
for buffers larger than this number of bytes. Zero-copy send with io_uring is
supported since Linux kernel version 6.1. Support is auto-detected and disabled
automatically when not available. It can also be disabled explicitly by setting
this parameter to a negative value.
⚠️ Warning! Zero-copy send performance may vary greatly from CPU to CPU and from
one kernel version to another. Generally, it tends to only make benefit with larger
messages. With smaller messages (say, 4 KB), it may actually be slower. 32 KB is
enough for almost all CPUs, but even smaller values are optimal for some of them.
For example, 4 KB is OK for EPYC Milan/Genoa and 12 KB is OK for Xeon Ice Lake
(but verify it yourself please).
Verification instructions:
1. Add `iommu=pt` into your Linux kernel command line and reboot.
2. Upgrade your kernel. For example, it's very important to use 6.11+ with recent AMD EPYCs.
3. Run some tests with the [send-zerocopy liburing example](https://github.com/axboe/liburing/blob/master/examples/send-zerocopy.c)
to find the minimal message size for which zero-copy is optimal.
Use `./send-zerocopy tcp -4 -R` at the server side and
`time ./send-zerocopy tcp -4 -b 0 -s BUFFER_SIZE -D SERVER_IP` at the client side with
`-z 0` (no zero-copy) and `-z 1` (zero-copy), and compare MB/s and used CPU time
(user+system).
## use_sync_send_recv
- Type: boolean
- Default: false
If true, synchronous send/recv syscalls are used instead of io_uring for
socket communication. Useless for OSDs because they require io_uring anyway,
but may be required for clients with old kernel versions.

View File

@ -9,11 +9,9 @@
Данные параметры используются клиентами и OSD и влияют на логику сетевого Данные параметры используются клиентами и OSD и влияют на логику сетевого
взаимодействия между клиентами, OSD, а также etcd. взаимодействия между клиентами, OSD, а также etcd.
- [osd_network](#osd_network) - [tcp_header_buffer_size](#tcp_header_buffer_size)
- [osd_cluster_network](#osd_cluster_network) - [use_sync_send_recv](#use_sync_send_recv)
- [use_rdma](#use_rdma) - [use_rdma](#use_rdma)
- [use_rdmacm](#use_rdmacm)
- [disable_tcp](#disable_tcp)
- [rdma_device](#rdma_device) - [rdma_device](#rdma_device)
- [rdma_port_num](#rdma_port_num) - [rdma_port_num](#rdma_port_num)
- [rdma_gid_index](#rdma_gid_index) - [rdma_gid_index](#rdma_gid_index)
@ -32,63 +30,41 @@
- [etcd_slow_timeout](#etcd_slow_timeout) - [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout) - [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_interval](#etcd_ws_keepalive_interval) - [etcd_ws_keepalive_interval](#etcd_ws_keepalive_interval)
- [etcd_min_reload_interval](#etcd_min_reload_interval)
- [tcp_header_buffer_size](#tcp_header_buffer_size)
- [min_zerocopy_send_size](#min_zerocopy_send_size)
- [use_sync_send_recv](#use_sync_send_recv)
## osd_network ## tcp_header_buffer_size
- Тип: строка или массив строк - Тип: целое число
- Значение по умолчанию: 65536
Маски подсетей (IPv4 или IPv6) публичной сети или сетей OSD. Каждый OSD слушает Размер буфера для чтения данных с дополнительным копированием. Пакеты
один и тот же порт на всех адресах поднятых (UP + RUNNING) сетевых интерфейсов, Vitastor содержат 128-байтные заголовки, за которыми следуют данные размером
соответствующих одной из указанных сетей. Порт выбирается автоматически, если от 4 КБ и для мелких операций ввода-вывода обычно выгодно за 1 вызов читать
только [bind_port](osd.ru.md#bind_port) не задан явно. Адреса для подключений можно сразу несколько пакетов, даже не смотря на то, что это требует лишний раз
также переопределить явно, задав [bind_address](osd.ru.md#bind_address). Если сети OSD скопировать данные. Часть каждого пакета за пределами значения данного
не заданы вообще, OSD слушает все адреса (0.0.0.0). параметра читается без дополнительного копирования. Вы можете попробовать
поменять этот параметр и посмотреть, как он влияет на производительность
случайного и линейного доступа.
## osd_cluster_network ## use_sync_send_recv
- Тип: строка или массив строк - Тип: булево (да/нет)
- Значение по умолчанию: false
Маски подсетей (IPv4 или IPv6) отдельной кластерной сети или сетей OSD. Если установлено в истину, то вместо io_uring для передачи данных по сети
То есть, OSD будут всегда стараться использовать эти сети для соединений будут использоваться обычные синхронные системные вызовы send/recv. Для OSD
с другими OSD, а клиенты будут стараться использовать сети из [osd_network](#osd_network). это бессмысленно, так как OSD в любом случае нуждается в io_uring, но, в
принципе, это может применяться для клиентов со старыми версиями ядра.
## use_rdma ## use_rdma
- Тип: булево (да/нет) - Тип: булево (да/нет)
- Значение по умолчанию: true - Значение по умолчанию: true
Попробовать использовать RDMA через libibverbs для связи при наличии Пытаться использовать RDMA для связи при наличии доступных устройств.
доступных устройств. Отключите, если вы не хотите, чтобы Vitastor Отключите, если вы не хотите, чтобы Vitastor использовал RDMA.
использовал RDMA. TCP-клиенты также могут работать с RDMA-кластером, TCP-клиенты также могут работать с RDMA-кластером, так что отключать
так что отключать RDMA может быть нужно, только если у клиентов есть RDMA может быть нужно только если у клиентов есть RDMA-устройства,
RDMA-устройства, но они не имеют соединения с кластером Vitastor. но они не имеют соединения с кластером Vitastor.
`use_rdma` работает с RoCEv1/RoCEv2 сетями, но не работает с iWARP и
может не работать с частью конфигураций Infiniband, требующих RDMA-CM.
Рассмотрите включение `use_rdmacm` для таких сетей.
## use_rdmacm
- Тип: булево (да/нет)
- Значение по умолчанию: true
Использовать альтернативную реализацию RDMA на основе RDMA-CM (Connection
Manager). Работает со всеми типами RDMA-сетей: Infiniband, iWARP и
RoCEv1/RoCEv2, и даже позволяет полностью отключить TCP и работать
только на RDMA. OSD используют случайные номера портов для ожидания
соединений через RDMA-CM, отличающиеся от их TCP-портов. Также при
включении `use_rdmacm` автоматически отключается опция `use_rdma`.
## disable_tcp
- Тип: булево (да/нет)
- Значение по умолчанию: true
Полностью отключить TCP и использовать только RDMA-CM для соединений с OSD.
## rdma_device ## rdma_device
@ -120,14 +96,13 @@ Control) и ECN (Explicit Congestion Notification).
## rdma_port_num ## rdma_port_num
- Тип: целое число - Тип: целое число
- Значение по умолчанию: 1
Номер порта RDMA-устройства, который следует использовать. Имеет смысл Номер порта RDMA-устройства, который следует использовать. Имеет смысл
только для устройств, у которых более 1 порта. Чтобы узнать, сколько портов только для устройств, у которых более 1 порта. Чтобы узнать, сколько портов
у вашего адаптера, посмотрите `phys_port_cnt` в выводе команды у вашего адаптера, посмотрите `phys_port_cnt` в выводе команды
`ibv_devinfo -v`. `ibv_devinfo -v`.
Опция неприменима к RDMA-CM (use_rdmacm).
## rdma_gid_index ## rdma_gid_index
- Тип: целое число - Тип: целое число
@ -144,14 +119,13 @@ libibverbs < v32.
Правильный rdma_gid_index для RoCEv2, как правило, 1 (IPv6) или 3 (IPv4). Правильный rdma_gid_index для RoCEv2, как правило, 1 (IPv6) или 3 (IPv4).
Опция неприменима к RDMA-CM (use_rdmacm).
## rdma_mtu ## rdma_mtu
- Тип: целое число - Тип: целое число
- Значение по умолчанию: 4096
Максимальная единица передачи (Path MTU) для RDMA. Должно быть равно 1024, Максимальная единица передачи (Path MTU) для RDMA. Должно быть равно 1024,
2048 или 4096. По умолчанию используется значение MTU RDMA-устройства. 2048 или 4096. Обычно нет смысла менять значение по умолчанию, равное 4096.
## rdma_max_sge ## rdma_max_sge
@ -297,65 +271,3 @@ etcd_report_interval, чтобы keepalive гарантированно рабо
- Можно менять на лету: да - Можно менять на лету: да
Интервал проверки живости вебсокет-подключений к etcd. Интервал проверки живости вебсокет-подключений к etcd.
## etcd_min_reload_interval
- Тип: миллисекунды
- Значение по умолчанию: 1000
- Можно менять на лету: да
Минимальный интервал полной перезагрузки состояния из etcd. Добавлено для
предотвращения избыточной нагрузки на etcd во время отказов, когда etcd не
успевает рассылать потоки событий и отменяет их.
## tcp_header_buffer_size
- Тип: целое число
- Значение по умолчанию: 65536
Размер буфера для чтения данных с дополнительным копированием. Пакеты
Vitastor содержат 128-байтные заголовки, за которыми следуют данные размером
от 4 КБ и для мелких операций ввода-вывода обычно выгодно за 1 вызов читать
сразу несколько пакетов, даже не смотря на то, что это требует лишний раз
скопировать данные. Часть каждого пакета за пределами значения данного
параметра читается без дополнительного копирования. Вы можете попробовать
поменять этот параметр и посмотреть, как он влияет на производительность
случайного и линейного доступа.
## min_zerocopy_send_size
- Тип: целое число
- Значение по умолчанию: 32768
OSD и клиенты будут пробовать использовать TCP-отправку без копирования (zero-copy) на
основе io_uring для буферов, больших, чем это число байт. Отправка без копирования
поддерживается в io_uring, начиная с версии ядра Linux 6.1. Наличие поддержки
проверяется автоматически и zero-copy отключается, когда поддержки нет. Также
её можно отключить явно, установив данный параметр в отрицательное значение.
⚠️ Внимание! Производительность данной функции может сильно отличаться на разных
процессорах и на разных версиях ядра Linux. В целом, zero-copy обычно быстрее с
большими сообщениями, а с мелкими (например, 4 КБ) zero-copy может быть даже
медленнее. 32 КБ достаточно почти для всех процессоров, но для каких-то можно
использовать даже меньшие значения. Например, для EPYC Milan/Genoa подходит 4 КБ,
а для Xeon Ice Lake - 12 КБ (но, пожалуйста, перепроверьте это сами).
Инструкция по проверке:
1. Добавьте `iommu=pt` в командную строку загрузки вашего ядра Linux и перезагрузитесь.
2. Обновите ядро. Например, для AMD EPYC очень важно использовать версию 6.11+.
3. Позапускайте тесты с помощью [send-zerocopy из примеров liburing](https://github.com/axboe/liburing/blob/master/examples/send-zerocopy.c),
чтобы найти минимальный размер сообщения, для которого zero-copy отправка оптимальна.
Запускайте `./send-zerocopy tcp -4 -R` на стороне сервера и
`time ./send-zerocopy tcp -4 -b 0 -s РАЗМЕРУФЕРА -D АДРЕС_СЕРВЕРА` на стороне клиента
с опцией `-z 0` (обычная отправка) и `-z 1` (отправка без копирования), и сравнивайте
скорость в МБ/с и занятое процессорное время (user+system).
## use_sync_send_recv
- Тип: булево (да/нет)
- Значение по умолчанию: false
Если установлено в истину, то вместо io_uring для передачи данных по сети
будут использоваться обычные синхронные системные вызовы send/recv. Для OSD
это бессмысленно, так как OSD в любом случае нуждается в io_uring, но, в
принципе, это может применяться для клиентов со старыми версиями ядра.

View File

@ -7,15 +7,16 @@
# Runtime OSD Parameters # Runtime OSD Parameters
These parameters only apply to OSDs, are not fixed at the moment of OSD drive These parameters only apply to OSDs, are not fixed at the moment of OSD drive
initialization and can be changed - in /etc/vitastor/vitastor.conf or [vitastor-disk update-sb](../usage/disk.en.md#update-sb) initialization and can be changed - either with an OSD restart or, for some of
with an OSD restart or, for some of them, even without restarting by updating configuration in etcd. them, even without restarting by updating configuration in etcd.
- [bind_address](#bind_address)
- [bind_port](#bind_port)
- [osd_iothread_count](#osd_iothread_count) - [osd_iothread_count](#osd_iothread_count)
- [etcd_report_interval](#etcd_report_interval) - [etcd_report_interval](#etcd_report_interval)
- [etcd_stats_interval](#etcd_stats_interval) - [etcd_stats_interval](#etcd_stats_interval)
- [run_primary](#run_primary) - [run_primary](#run_primary)
- [osd_network](#osd_network)
- [bind_address](#bind_address)
- [bind_port](#bind_port)
- [autosync_interval](#autosync_interval) - [autosync_interval](#autosync_interval)
- [autosync_writes](#autosync_writes) - [autosync_writes](#autosync_writes)
- [recovery_queue_depth](#recovery_queue_depth) - [recovery_queue_depth](#recovery_queue_depth)
@ -60,26 +61,6 @@ with an OSD restart or, for some of them, even without restarting by updating co
- [recovery_tune_agg_interval](#recovery_tune_agg_interval) - [recovery_tune_agg_interval](#recovery_tune_agg_interval)
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us) - [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us) - [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
- [discard_on_start](#discard_on_start)
- [min_discard_size](#min_discard_size)
- [allow_net_split](#allow_net_split)
## bind_address
- Type: string or array of strings
Instead of the network masks ([osd_network](network.en.md#osd_network) and
[osd_cluster_network](network.en.md#osd_cluster_network)), you can also set
OSD listen addresses explicitly using this parameter. May be useful if you
want to start OSDs on interfaces that are not UP + RUNNING.
## bind_port
- Type: integer
By default, OSDs pick random ports to use for incoming connections
automatically. With this option you can set a specific port for a specific
OSD by hand.
## osd_iothread_count ## osd_iothread_count
@ -123,6 +104,34 @@ debugging purposes. It's possible to implement additional feature for the
monitor which may allow to separate primary and secondary OSDs, but it's monitor which may allow to separate primary and secondary OSDs, but it's
unclear why anyone could need it, so it's not implemented. unclear why anyone could need it, so it's not implemented.
## osd_network
- Type: string or array of strings
Network mask of the network (IPv4 or IPv6) to use for OSDs. Note that
although it's possible to specify multiple networks here, this does not
mean that OSDs will create multiple listening sockets - they'll only
pick the first matching address of an UP + RUNNING interface. Separate
networks for cluster and client connections are also not implemented, but
they are mostly useless anyway, so it's not a big deal.
## bind_address
- Type: string
- Default: 0.0.0.0
Instead of the network mask, you can also set OSD listen address explicitly
using this parameter. May be useful if you want to start OSDs on interfaces
that are not UP + RUNNING.
## bind_port
- Type: integer
By default, OSDs pick random ports to use for incoming connections
automatically. With this option you can set a specific port for a specific
OSD by hand.
## autosync_interval ## autosync_interval
- Type: seconds - Type: seconds
@ -307,7 +316,7 @@ for hot data and slower disks - HDDs and maybe SATA SSDs - but will slightly
decrease write performance for fast disks because page cache is an overhead decrease write performance for fast disks because page cache is an overhead
itself. itself.
Choose "directsync" to use [immediate_commit](layout-cluster.en.md#immediate_commit) Choose "directsync" to use [immediate_commit](layout-cluster.ru.md#immediate_commit)
(which requires disable_data_fsync) with drives having write-back cache (which requires disable_data_fsync) with drives having write-back cache
which can't be turned off, for example, Intel Optane. Also note that *some* which can't be turned off, for example, Intel Optane. Also note that *some*
desktop SSDs (for example, HP EX950) may ignore O_SYNC thus making desktop SSDs (for example, HP EX950) may ignore O_SYNC thus making
@ -620,30 +629,3 @@ are changed to 0.
Maximum possible value for auto-tuned recovery_sleep_us. Higher values Maximum possible value for auto-tuned recovery_sleep_us. Higher values
are treated as outliers and ignored in aggregation. are treated as outliers and ignored in aggregation.
## discard_on_start
- Type: boolean
Discard (SSD TRIM) unused data device blocks on every OSD startup.
## min_discard_size
- Type: integer
- Default: 1048576
Minimum consecutive block size to TRIM it.
## allow_net_split
- Type: boolean
- Default: false
Allow "safe" cases of network splits/partitions - allow to start PGs without
connections to some OSDs currently registered as alive in etcd, if the number
of actually connected PG OSDs is at least pg_minsize. That is, allow some OSDs to lose
connectivity with some other OSDs as long as it doesn't break pg_minsize guarantees.
The downside is that it increases the probability of writing data into just pg_minsize
OSDs during failover which can lead to PGs becoming incomplete after additional outages.
The old behaviour in versions up to 2.0.0 was equal to enabled allow_net_split.

View File

@ -8,15 +8,16 @@
Данные параметры используются только OSD, но, в отличие от дисковых параметров, Данные параметры используются только OSD, но, в отличие от дисковых параметров,
не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой
момент с перезапуском OSD в /etc/vitastor/vitastor.conf или [vitastor-disk update-sb](../usage/disk.ru.md#update-sb), момент с помощью перезапуска OSD, а некоторые и без перезапуска, с помощью
а некоторые и без перезапуска, с помощью изменения конфигурации в etcd. изменения конфигурации в etcd.
- [bind_address](#bind_address)
- [bind_port](#bind_port)
- [osd_iothread_count](#osd_iothread_count) - [osd_iothread_count](#osd_iothread_count)
- [etcd_report_interval](#etcd_report_interval) - [etcd_report_interval](#etcd_report_interval)
- [etcd_stats_interval](#etcd_stats_interval) - [etcd_stats_interval](#etcd_stats_interval)
- [run_primary](#run_primary) - [run_primary](#run_primary)
- [osd_network](#osd_network)
- [bind_address](#bind_address)
- [bind_port](#bind_port)
- [autosync_interval](#autosync_interval) - [autosync_interval](#autosync_interval)
- [autosync_writes](#autosync_writes) - [autosync_writes](#autosync_writes)
- [recovery_queue_depth](#recovery_queue_depth) - [recovery_queue_depth](#recovery_queue_depth)
@ -61,26 +62,6 @@
- [recovery_tune_agg_interval](#recovery_tune_agg_interval) - [recovery_tune_agg_interval](#recovery_tune_agg_interval)
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us) - [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us) - [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
- [discard_on_start](#discard_on_start)
- [min_discard_size](#min_discard_size)
- [allow_net_split](#allow_net_split)
## bind_address
- Тип: строка или массив строк
Вместо использования масок подсети ([osd_network](network.ru.md#osd_network) и
[osd_cluster_network](network.ru.md#osd_cluster_network)), вы также можете явно
задать адрес(а), на которых будут ожидать соединений OSD, с помощью данного
параметра. Это может быть полезно, например, чтобы запускать OSD на неподнятых
интерфейсах (не UP + RUNNING).
## bind_port
- Тип: целое число
По умолчанию OSD сами выбирают случайные порты для входящих подключений.
С помощью данной опции вы можете задать порт для отдельного OSD вручную.
## osd_iothread_count ## osd_iothread_count
@ -126,6 +107,34 @@ max_etcd_attempts * etcd_quick_timeout.
первичные OSD от вторичных, но пока не понятно, зачем это может кому-то первичные OSD от вторичных, но пока не понятно, зачем это может кому-то
понадобиться, поэтому это не реализовано. понадобиться, поэтому это не реализовано.
## osd_network
- Тип: строка или массив строк
Маска подсети (IPv4 или IPv6) для использования для соединений с OSD.
Имейте в виду, что хотя сейчас и можно передать в этот параметр несколько
подсетей, это не означает, что OSD будут создавать несколько слушающих
сокетов - они лишь будут выбирать адрес первого поднятого (состояние UP +
RUNNING), подходящий под заданную маску. Также не реализовано разделение
кластерной и публичной сетей OSD. Правда, от него обычно всё равно довольно
мало толку, так что особенной проблемы в этом нет.
## bind_address
- Тип: строка
- Значение по умолчанию: 0.0.0.0
Этим параметром можно явным образом задать адрес, на котором будет ожидать
соединений OSD (вместо использования маски подсети). Может быть полезно,
например, чтобы запускать OSD на неподнятых интерфейсах (не UP + RUNNING).
## bind_port
- Тип: целое число
По умолчанию OSD сами выбирают случайные порты для входящих подключений.
С помощью данной опции вы можете задать порт для отдельного OSD вручную.
## autosync_interval ## autosync_interval
- Тип: секунды - Тип: секунды
@ -651,31 +660,3 @@ EC (кодов коррекции ошибок) с более, чем 1 диск
Максимальное возможное значение авто-подстроенного recovery_sleep_us. Максимальное возможное значение авто-подстроенного recovery_sleep_us.
Большие значения считаются случайными выбросами и игнорируются в Большие значения считаются случайными выбросами и игнорируются в
усреднении. усреднении.
## discard_on_start
- Тип: булево (да/нет)
Освобождать (SSD TRIM) неиспользуемые блоки диска данных при каждом запуске OSD.
## min_discard_size
- Тип: целое число
- Значение по умолчанию: 1048576
Минимальный размер последовательного блока данных, чтобы освобождать его через TRIM.
## allow_net_split
- Тип: булево (да/нет)
- Значение по умолчанию: false
Разрешить "безопасные" случаи разделений сети - разрешить активировать PG без
соединений к некоторым OSD, помеченным активными в etcd, если общее число активных
OSD в PG составляет как минимум pg_minsize. То есть, разрешать некоторым OSD терять
соединения с некоторыми другими OSD, если это не нарушает гарантий pg_minsize.
Минус такого разрешения в том, что оно повышает вероятность записи данных ровно в
pg_minsize OSD во время переключений, что может потом привести к тому, что PG станут
неполными (incomplete), если упадут ещё какие-то OSD.
Старое поведение в версиях до 2.0.0 было идентично включённому allow_net_split.

View File

@ -43,7 +43,7 @@ Parameters:
- [osd_tags](#osd_tags) - [osd_tags](#osd_tags)
- [primary_affinity_tags](#primary_affinity_tags) - [primary_affinity_tags](#primary_affinity_tags)
- [scrub_interval](#scrub_interval) - [scrub_interval](#scrub_interval)
- [used_for_app](#used_for_app) - [used_for_fs](#used_for_fs)
Examples: Examples:
@ -189,9 +189,6 @@ So, pg_minsize regulates the number of failures that a pool can tolerate
without temporary downtime for [osd_out_time](monitor.en.md#osd_out_time), without temporary downtime for [osd_out_time](monitor.en.md#osd_out_time),
but at a cost of slightly reduced storage reliability. but at a cost of slightly reduced storage reliability.
See also [allow_net_split](osd.en.md#allow_net_split) and
[PG state descriptions](../usage/admin.en.md#pg-states).
FIXME: pg_minsize behaviour may be changed in the future to only make PGs FIXME: pg_minsize behaviour may be changed in the future to only make PGs
read-only instead of deactivating them. read-only instead of deactivating them.
@ -380,37 +377,24 @@ of the OSDs containing a data chunk for a PG.
Automatic scrubbing interval for this pool. Overrides Automatic scrubbing interval for this pool. Overrides
[global scrub_interval setting](osd.en.md#scrub_interval). [global scrub_interval setting](osd.en.md#scrub_interval).
## used_for_app ## used_for_fs
- Type: string - Type: string
If non-empty, the pool is marked as used for a separate application, for example, If non-empty, the pool is marked as used for VitastorFS with metadata stored
VitastorFS or S3, which allocates Vitastor volume IDs by itself and does not use in block image (regular Vitastor volume) named as the value of this pool parameter.
image/inode metadata in etcd.
When a pool is marked as used for such app, regular block volume creation in it When a pool is marked as used for VitastorFS, regular block volume creation in it
is disabled (vitastor-cli refuses to create images without --force) to protect is disabled (vitastor-cli refuses to create images without --force) to protect
the user from block volume and FS/S3 volume ID collisions and data loss. the user from block volume and FS file ID collisions and data loss.
Also such pools do not calculate per-inode space usage statistics in etcd because [vitastor-nfs](../usage/nfs.ru.md), in its turn, refuses to use pools not marked
using it for an external application implies that it may contain a very large
number of volumes and their statistics may take too much space in etcd.
Setting used_for_app to `fs:<name>` tells Vitastor that the pool is used for VitastorFS
with VitastorKV metadata base stored in a block image (regular Vitastor volume) named
`<name>`.
[vitastor-nfs](../usage/nfs.en.md), in its turn, refuses to use pools not marked
for the corresponding FS when starting. This also implies that you can use one for the corresponding FS when starting. This also implies that you can use one
pool only for one VitastorFS. pool only for one VitastorFS.
If you plan to use the pool for S3, set its used_for_app to `s3:<name>`. `<name>` may The second thing that is disabled for VitastorFS pools is reporting per-inode space
be basically anything you want (for example, `s3:standard`) - it's not validated usage statistics in etcd because a FS pool may store a very large number of files
by Vitastor S3 components in any way. and statistics for them all would take a lot of space in etcd.
All other values except prefixed with `fs:` or `s3:` may be used freely and don't
mean anything special for Vitastor core components. For now, you can use them as
you wish.
# Examples # Examples

View File

@ -42,7 +42,7 @@
- [osd_tags](#osd_tags) - [osd_tags](#osd_tags)
- [primary_affinity_tags](#primary_affinity_tags) - [primary_affinity_tags](#primary_affinity_tags)
- [scrub_interval](#scrub_interval) - [scrub_interval](#scrub_interval)
- [used_for_app](#used_for_app) - [used_for_fs](#used_for_fs)
Примеры: Примеры:
@ -256,7 +256,7 @@ PG в Vitastor эферемерны, то есть вы можете менят
## raw_placement ## raw_placement
- Тип: строка - Type: string
Низкоуровневые правила генерации PG в форме DSL (доменно-специфичного языка). Низкоуровневые правила генерации PG в форме DSL (доменно-специфичного языка).
Используйте, только если действительно знаете, зачем вам это надо :) Используйте, только если действительно знаете, зачем вам это надо :)
@ -383,42 +383,26 @@ OSD с "all".
Интервал скраба, то есть, автоматической фоновой проверки данных для данного пула. Интервал скраба, то есть, автоматической фоновой проверки данных для данного пула.
Переопределяет [глобальную настройку scrub_interval](osd.ru.md#scrub_interval). Переопределяет [глобальную настройку scrub_interval](osd.ru.md#scrub_interval).
## used_for_app ## used_for_fs
- Тип: строка - Type: string
Если непусто, пул помечается как используемый для отдельного приложения, например, Если непусто, пул помечается как используемый для файловой системы VitastorFS с
для VitastorFS или S3, которое распределяет ID образов в пуле само и не использует метаданными, хранимыми в блочном образе Vitastor с именем, равным значению
метаданные образов/инодов в etcd. этого параметра.
Когда пул помечается используемым для такого приложения, создание обычных блочных Когда пул помечается как используемый для VitastorFS, создание обычных блочных
образов в нём запрещается (vitastor-cli отказывается создавать образы без --force), образов в нём отключается (vitastor-cli отказывается создавать образы без --force),
чтобы защитить пользователя от коллизий ID блочных образов и томов ФС/S3, и, чтобы защитить пользователя от коллизий ID файлов и блочных образов и, таким
таким образом, от потери данных. образом, от потери данных.
Также для таких пулов отключается передача статистики в etcd по отдельным инодам,
так как использование для внешнего приложения подразумевает, что пул может содержать
очень много томов и их статистика может занять слишком много места в etcd.
Установка used_for_app в значение `fs:<name>` сообщает о том, что пул используется
для VitastorFS с базой метаданных VitastorKV, хранимой в блочном образе с именем
`<name>`.
[vitastor-nfs](../usage/nfs.ru.md), в свою очередь, при запуске отказывается [vitastor-nfs](../usage/nfs.ru.md), в свою очередь, при запуске отказывается
использовать для ФС пулы, не помеченные, как используемые для неё. Это также использовать для ФС пулы, не выделенные для неё. Это также означает, что один
означает, что один пул может использоваться только для одной VitastorFS. пул может использоваться только для одной VitastorFS.
Если же вы планируете использовать пул для данных S3, установите его used_for_app Также для ФС-пулов отключается передача статистики в etcd по отдельным инодам,
в значение `s3:<name>`, где `<name>` - любое название по вашему усмотрению так как ФС-пул может содержать очень много файлов и статистика по ним всем
(например, `s3:standard`) - конкретное содержимое `<name>` пока никак не проверяется заняла бы очень много места в etcd.
компонентами Vitastor S3.
Смотрите также [allow_net_split](osd.ru.md#allow_net_split) и
[документацию по состояниям PG](../usage/admin.ru.md#состояния-pg).
Все остальные значения used_for_app, кроме начинающихся на `fs:` или `s3:`, не
означают ничего особенного для основных компонентов Vitastor. Поэтому сейчас вы
можете использовать их свободно любым желаемым способом.
# Примеры # Примеры

View File

@ -75,11 +75,11 @@
- name: mon_http_port - name: mon_http_port
type: int type: int
default: 8060 default: 8060
info: HTTP port for monitors to listen to (including metrics exporter) info: HTTP port for monitors to listen on (including metrics exporter)
info_ru: Порт, на котором мониторы принимают HTTP-соединения (в том числе для отдачи метрик) info_ru: Порт, на котором мониторы принимают HTTP-соединения (в том числе для отдачи метрик)
- name: mon_http_ip - name: mon_http_ip
type: string type: string
info: IP address for monitors to listen to (all addresses by default) info: IP address for monitors to listen on (all addresses by default)
info_ru: IP-адрес, на котором мониторы принимают HTTP-соединения (по умолчанию все адреса) info_ru: IP-адрес, на котором мониторы принимают HTTP-соединения (по умолчанию все адреса)
- name: mon_https_cert - name: mon_https_cert
type: string type: string

View File

@ -1,78 +1,49 @@
- name: osd_network - name: tcp_header_buffer_size
type: string or array of strings type: int
type_ru: строка или массив строк default: 65536
info: | info: |
Network mask of public OSD network(s) (IPv4 or IPv6). Each OSD listens to all Size of the buffer used to read data using an additional copy. Vitastor
addresses of UP + RUNNING interfaces matching one of these networks, on the packet headers are 128 bytes, payload is always at least 4 KB, so it is
same port. Port is auto-selected except if [bind_port](osd.en.md#bind_port) is usually beneficial to try to read multiple packets at once even though
explicitly specified. Bind address(es) may also be overridden manually by it requires to copy the data an additional time. The rest of each packet
specifying [bind_address](osd.en.md#bind_address). If OSD networks are not specified is received without an additional copy. You can try to play with this
at all, OSD just listens to a wildcard address (0.0.0.0). parameter and see how it affects random iops and linear bandwidth if you
want.
info_ru: | info_ru: |
Маски подсетей (IPv4 или IPv6) публичной сети или сетей OSD. Каждый OSD слушает Размер буфера для чтения данных с дополнительным копированием. Пакеты
один и тот же порт на всех адресах поднятых (UP + RUNNING) сетевых интерфейсов, Vitastor содержат 128-байтные заголовки, за которыми следуют данные размером
соответствующих одной из указанных сетей. Порт выбирается автоматически, если от 4 КБ и для мелких операций ввода-вывода обычно выгодно за 1 вызов читать
только [bind_port](osd.ru.md#bind_port) не задан явно. Адреса для подключений можно сразу несколько пакетов, даже не смотря на то, что это требует лишний раз
также переопределить явно, задав [bind_address](osd.ru.md#bind_address). Если сети OSD скопировать данные. Часть каждого пакета за пределами значения данного
не заданы вообще, OSD слушает все адреса (0.0.0.0). параметра читается без дополнительного копирования. Вы можете попробовать
- name: osd_cluster_network поменять этот параметр и посмотреть, как он влияет на производительность
type: string or array of strings случайного и линейного доступа.
type_ru: строка или массив строк - name: use_sync_send_recv
type: bool
default: false
info: | info: |
Network mask of separate network(s) (IPv4 or IPv6) to use for OSD If true, synchronous send/recv syscalls are used instead of io_uring for
cluster connections. I.e. OSDs will always attempt to use these networks socket communication. Useless for OSDs because they require io_uring anyway,
to connect to other OSDs, while clients will attempt to use networks from but may be required for clients with old kernel versions.
[osd_network](#osd_network).
info_ru: | info_ru: |
Маски подсетей (IPv4 или IPv6) отдельной кластерной сети или сетей OSD. Если установлено в истину, то вместо io_uring для передачи данных по сети
То есть, OSD будут всегда стараться использовать эти сети для соединений будут использоваться обычные синхронные системные вызовы send/recv. Для OSD
с другими OSD, а клиенты будут стараться использовать сети из [osd_network](#osd_network). это бессмысленно, так как OSD в любом случае нуждается в io_uring, но, в
принципе, это может применяться для клиентов со старыми версиями ядра.
- name: use_rdma - name: use_rdma
type: bool type: bool
default: true default: true
info: | info: |
Try to use RDMA through libibverbs for communication if it's available. Try to use RDMA for communication if it's available. Disable if you don't
Disable if you don't want Vitastor to use RDMA. TCP-only clients can also want Vitastor to use RDMA. TCP-only clients can also talk to an RDMA-enabled
talk to an RDMA-enabled cluster, so disabling RDMA may be needed if clients cluster, so disabling RDMA may be needed if clients have RDMA devices,
have RDMA devices, but they are not connected to the cluster. but they are not connected to the cluster.
`use_rdma` works with RoCEv1/RoCEv2 networks, but not with iWARP and,
maybe, with some Infiniband configurations which require RDMA-CM.
Consider `use_rdmacm` for such networks.
info_ru: | info_ru: |
Попробовать использовать RDMA через libibverbs для связи при наличии Пытаться использовать RDMA для связи при наличии доступных устройств.
доступных устройств. Отключите, если вы не хотите, чтобы Vitastor Отключите, если вы не хотите, чтобы Vitastor использовал RDMA.
использовал RDMA. TCP-клиенты также могут работать с RDMA-кластером, TCP-клиенты также могут работать с RDMA-кластером, так что отключать
так что отключать RDMA может быть нужно, только если у клиентов есть RDMA может быть нужно только если у клиентов есть RDMA-устройства,
RDMA-устройства, но они не имеют соединения с кластером Vitastor. но они не имеют соединения с кластером Vitastor.
`use_rdma` работает с RoCEv1/RoCEv2 сетями, но не работает с iWARP и
может не работать с частью конфигураций Infiniband, требующих RDMA-CM.
Рассмотрите включение `use_rdmacm` для таких сетей.
- name: use_rdmacm
type: bool
default: true
info: |
Use an alternative implementation of RDMA through RDMA-CM (Connection
Manager). Works with all RDMA networks: Infiniband, iWARP and
RoCEv1/RoCEv2, and even allows to disable TCP and run only with RDMA.
OSDs always use random port numbers for RDMA-CM listeners, different
from their TCP ports. `use_rdma` is automatically disabled when
`use_rdmacm` is enabled.
info_ru: |
Использовать альтернативную реализацию RDMA на основе RDMA-CM (Connection
Manager). Работает со всеми типами RDMA-сетей: Infiniband, iWARP и
RoCEv1/RoCEv2, и даже позволяет полностью отключить TCP и работать
только на RDMA. OSD используют случайные номера портов для ожидания
соединений через RDMA-CM, отличающиеся от их TCP-портов. Также при
включении `use_rdmacm` автоматически отключается опция `use_rdma`.
- name: disable_tcp
type: bool
default: true
info: |
Fully disable TCP and only use RDMA-CM for OSD communication.
info_ru: |
Полностью отключить TCP и использовать только RDMA-CM для соединений с OSD.
- name: rdma_device - name: rdma_device
type: string type: string
info: | info: |
@ -122,19 +93,16 @@
Control) и ECN (Explicit Congestion Notification). Control) и ECN (Explicit Congestion Notification).
- name: rdma_port_num - name: rdma_port_num
type: int type: int
default: 1
info: | info: |
RDMA device port number to use. Only for devices that have more than 1 port. RDMA device port number to use. Only for devices that have more than 1 port.
See `phys_port_cnt` in `ibv_devinfo -v` output to determine how many ports See `phys_port_cnt` in `ibv_devinfo -v` output to determine how many ports
your device has. your device has.
Not relevant for RDMA-CM (use_rdmacm).
info_ru: | info_ru: |
Номер порта RDMA-устройства, который следует использовать. Имеет смысл Номер порта RDMA-устройства, который следует использовать. Имеет смысл
только для устройств, у которых более 1 порта. Чтобы узнать, сколько портов только для устройств, у которых более 1 порта. Чтобы узнать, сколько портов
у вашего адаптера, посмотрите `phys_port_cnt` в выводе команды у вашего адаптера, посмотрите `phys_port_cnt` в выводе команды
`ibv_devinfo -v`. `ibv_devinfo -v`.
Опция неприменима к RDMA-CM (use_rdmacm).
- name: rdma_gid_index - name: rdma_gid_index
type: int type: int
info: | info: |
@ -148,8 +116,6 @@
GID auto-selection is unsupported with libibverbs < v32. GID auto-selection is unsupported with libibverbs < v32.
A correct rdma_gid_index for RoCEv2 is usually 1 (IPv6) or 3 (IPv4). A correct rdma_gid_index for RoCEv2 is usually 1 (IPv6) or 3 (IPv4).
Not relevant for RDMA-CM (use_rdmacm).
info_ru: | info_ru: |
Номер глобального идентификатора адреса RDMA-устройства, который следует Номер глобального идентификатора адреса RDMA-устройства, который следует
использовать. Разным gid_index могут соответствовать разные протоколы связи: использовать. Разным gid_index могут соответствовать разные протоколы связи:
@ -162,16 +128,15 @@
libibverbs < v32. libibverbs < v32.
Правильный rdma_gid_index для RoCEv2, как правило, 1 (IPv6) или 3 (IPv4). Правильный rdma_gid_index для RoCEv2, как правило, 1 (IPv6) или 3 (IPv4).
Опция неприменима к RDMA-CM (use_rdmacm).
- name: rdma_mtu - name: rdma_mtu
type: int type: int
default: 4096
info: | info: |
RDMA Path MTU to use. Must be 1024, 2048 or 4096. Default is to use the RDMA Path MTU to use. Must be 1024, 2048 or 4096. There is usually no
RDMA device's MTU. sense to change it from the default 4096.
info_ru: | info_ru: |
Максимальная единица передачи (Path MTU) для RDMA. Должно быть равно 1024, Максимальная единица передачи (Path MTU) для RDMA. Должно быть равно 1024,
2048 или 4096. По умолчанию используется значение MTU RDMA-устройства. 2048 или 4096. Обычно нет смысла менять значение по умолчанию, равное 4096.
- name: rdma_max_sge - name: rdma_max_sge
type: int type: int
default: 128 default: 128
@ -341,96 +306,3 @@
detect disconnections quickly. detect disconnections quickly.
info_ru: | info_ru: |
Интервал проверки живости вебсокет-подключений к etcd. Интервал проверки живости вебсокет-подключений к etcd.
- name: etcd_min_reload_interval
type: ms
default: 1000
online: true
info: |
Minimum interval for full etcd state reload. Introduced to prevent
excessive load on etcd during outages when etcd can't keep up with event
streams and cancels them.
info_ru: |
Минимальный интервал полной перезагрузки состояния из etcd. Добавлено для
предотвращения избыточной нагрузки на etcd во время отказов, когда etcd не
успевает рассылать потоки событий и отменяет их.
- name: tcp_header_buffer_size
type: int
default: 65536
info: |
Size of the buffer used to read data using an additional copy. Vitastor
packet headers are 128 bytes, payload is always at least 4 KB, so it is
usually beneficial to try to read multiple packets at once even though
it requires to copy the data an additional time. The rest of each packet
is received without an additional copy. You can try to play with this
parameter and see how it affects random iops and linear bandwidth if you
want.
info_ru: |
Размер буфера для чтения данных с дополнительным копированием. Пакеты
Vitastor содержат 128-байтные заголовки, за которыми следуют данные размером
от 4 КБ и для мелких операций ввода-вывода обычно выгодно за 1 вызов читать
сразу несколько пакетов, даже не смотря на то, что это требует лишний раз
скопировать данные. Часть каждого пакета за пределами значения данного
параметра читается без дополнительного копирования. Вы можете попробовать
поменять этот параметр и посмотреть, как он влияет на производительность
случайного и линейного доступа.
- name: min_zerocopy_send_size
type: int
default: 32768
info: |
OSDs and clients will attempt to use io_uring-based zero-copy TCP send
for buffers larger than this number of bytes. Zero-copy send with io_uring is
supported since Linux kernel version 6.1. Support is auto-detected and disabled
automatically when not available. It can also be disabled explicitly by setting
this parameter to a negative value.
⚠️ Warning! Zero-copy send performance may vary greatly from CPU to CPU and from
one kernel version to another. Generally, it tends to only make benefit with larger
messages. With smaller messages (say, 4 KB), it may actually be slower. 32 KB is
enough for almost all CPUs, but even smaller values are optimal for some of them.
For example, 4 KB is OK for EPYC Milan/Genoa and 12 KB is OK for Xeon Ice Lake
(but verify it yourself please).
Verification instructions:
1. Add `iommu=pt` into your Linux kernel command line and reboot.
2. Upgrade your kernel. For example, it's very important to use 6.11+ with recent AMD EPYCs.
3. Run some tests with the [send-zerocopy liburing example](https://github.com/axboe/liburing/blob/master/examples/send-zerocopy.c)
to find the minimal message size for which zero-copy is optimal.
Use `./send-zerocopy tcp -4 -R` at the server side and
`time ./send-zerocopy tcp -4 -b 0 -s BUFFER_SIZE -D SERVER_IP` at the client side with
`-z 0` (no zero-copy) and `-z 1` (zero-copy), and compare MB/s and used CPU time
(user+system).
info_ru: |
OSD и клиенты будут пробовать использовать TCP-отправку без копирования (zero-copy) на
основе io_uring для буферов, больших, чем это число байт. Отправка без копирования
поддерживается в io_uring, начиная с версии ядра Linux 6.1. Наличие поддержки
проверяется автоматически и zero-copy отключается, когда поддержки нет. Также
её можно отключить явно, установив данный параметр в отрицательное значение.
⚠️ Внимание! Производительность данной функции может сильно отличаться на разных
процессорах и на разных версиях ядра Linux. В целом, zero-copy обычно быстрее с
большими сообщениями, а с мелкими (например, 4 КБ) zero-copy может быть даже
медленнее. 32 КБ достаточно почти для всех процессоров, но для каких-то можно
использовать даже меньшие значения. Например, для EPYC Milan/Genoa подходит 4 КБ,
а для Xeon Ice Lake - 12 КБ (но, пожалуйста, перепроверьте это сами).
Инструкция по проверке:
1. Добавьте `iommu=pt` в командную строку загрузки вашего ядра Linux и перезагрузитесь.
2. Обновите ядро. Например, для AMD EPYC очень важно использовать версию 6.11+.
3. Позапускайте тесты с помощью [send-zerocopy из примеров liburing](https://github.com/axboe/liburing/blob/master/examples/send-zerocopy.c),
чтобы найти минимальный размер сообщения, для которого zero-copy отправка оптимальна.
Запускайте `./send-zerocopy tcp -4 -R` на стороне сервера и
`time ./send-zerocopy tcp -4 -b 0 -s РАЗМЕРУФЕРА -D АДРЕС_СЕРВЕРА` на стороне клиента
с опцией `-z 0` (обычная отправка) и `-z 1` (отправка без копирования), и сравнивайте
скорость в МБ/с и занятое процессорное время (user+system).
- name: use_sync_send_recv
type: bool
default: false
info: |
If true, synchronous send/recv syscalls are used instead of io_uring for
socket communication. Useless for OSDs because they require io_uring anyway,
but may be required for clients with old kernel versions.
info_ru: |
Если установлено в истину, то вместо io_uring для передачи данных по сети
будут использоваться обычные синхронные системные вызовы send/recv. Для OSD
это бессмысленно, так как OSD в любом случае нуждается в io_uring, но, в
принципе, это может применяться для клиентов со старыми версиями ядра.

View File

@ -1,5 +1,5 @@
# Runtime OSD Parameters # Runtime OSD Parameters
These parameters only apply to OSDs, are not fixed at the moment of OSD drive These parameters only apply to OSDs, are not fixed at the moment of OSD drive
initialization and can be changed - in /etc/vitastor/vitastor.conf or [vitastor-disk update-sb](../usage/disk.en.md#update-sb) initialization and can be changed - either with an OSD restart or, for some of
with an OSD restart or, for some of them, even without restarting by updating configuration in etcd. them, even without restarting by updating configuration in etcd.

View File

@ -2,5 +2,5 @@
Данные параметры используются только OSD, но, в отличие от дисковых параметров, Данные параметры используются только OSD, но, в отличие от дисковых параметров,
не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой
момент с перезапуском OSD в /etc/vitastor/vitastor.conf или [vitastor-disk update-sb](../usage/disk.ru.md#update-sb), момент с помощью перезапуска OSD, а некоторые и без перезапуска, с помощью
а некоторые и без перезапуска, с помощью изменения конфигурации в etcd. изменения конфигурации в etcd.

View File

@ -1,26 +1,3 @@
- name: bind_address
type: string or array of strings
type_ru: строка или массив строк
info: |
Instead of the network masks ([osd_network](network.en.md#osd_network) and
[osd_cluster_network](network.en.md#osd_cluster_network)), you can also set
OSD listen addresses explicitly using this parameter. May be useful if you
want to start OSDs on interfaces that are not UP + RUNNING.
info_ru: |
Вместо использования масок подсети ([osd_network](network.ru.md#osd_network) и
[osd_cluster_network](network.ru.md#osd_cluster_network)), вы также можете явно
задать адрес(а), на которых будут ожидать соединений OSD, с помощью данного
параметра. Это может быть полезно, например, чтобы запускать OSD на неподнятых
интерфейсах (не UP + RUNNING).
- name: bind_port
type: int
info: |
By default, OSDs pick random ports to use for incoming connections
automatically. With this option you can set a specific port for a specific
OSD by hand.
info_ru: |
По умолчанию OSD сами выбирают случайные порты для входящих подключений.
С помощью данной опции вы можете задать порт для отдельного OSD вручную.
- name: osd_iothread_count - name: osd_iothread_count
type: int type: int
default: 0 default: 0
@ -79,6 +56,44 @@
реализовать дополнительный режим для монитора, который позволит отделять реализовать дополнительный режим для монитора, который позволит отделять
первичные OSD от вторичных, но пока не понятно, зачем это может кому-то первичные OSD от вторичных, но пока не понятно, зачем это может кому-то
понадобиться, поэтому это не реализовано. понадобиться, поэтому это не реализовано.
- name: osd_network
type: string or array of strings
type_ru: строка или массив строк
info: |
Network mask of the network (IPv4 or IPv6) to use for OSDs. Note that
although it's possible to specify multiple networks here, this does not
mean that OSDs will create multiple listening sockets - they'll only
pick the first matching address of an UP + RUNNING interface. Separate
networks for cluster and client connections are also not implemented, but
they are mostly useless anyway, so it's not a big deal.
info_ru: |
Маска подсети (IPv4 или IPv6) для использования для соединений с OSD.
Имейте в виду, что хотя сейчас и можно передать в этот параметр несколько
подсетей, это не означает, что OSD будут создавать несколько слушающих
сокетов - они лишь будут выбирать адрес первого поднятого (состояние UP +
RUNNING), подходящий под заданную маску. Также не реализовано разделение
кластерной и публичной сетей OSD. Правда, от него обычно всё равно довольно
мало толку, так что особенной проблемы в этом нет.
- name: bind_address
type: string
default: "0.0.0.0"
info: |
Instead of the network mask, you can also set OSD listen address explicitly
using this parameter. May be useful if you want to start OSDs on interfaces
that are not UP + RUNNING.
info_ru: |
Этим параметром можно явным образом задать адрес, на котором будет ожидать
соединений OSD (вместо использования маски подсети). Может быть полезно,
например, чтобы запускать OSD на неподнятых интерфейсах (не UP + RUNNING).
- name: bind_port
type: int
info: |
By default, OSDs pick random ports to use for incoming connections
automatically. With this option you can set a specific port for a specific
OSD by hand.
info_ru: |
По умолчанию OSD сами выбирают случайные порты для входящих подключений.
С помощью данной опции вы можете задать порт для отдельного OSD вручную.
- name: autosync_interval - name: autosync_interval
type: sec type: sec
default: 5 default: 5
@ -300,7 +315,7 @@
decrease write performance for fast disks because page cache is an overhead decrease write performance for fast disks because page cache is an overhead
itself. itself.
Choose "directsync" to use [immediate_commit](layout-cluster.en.md#immediate_commit) Choose "directsync" to use [immediate_commit](layout-cluster.ru.md#immediate_commit)
(which requires disable_data_fsync) with drives having write-back cache (which requires disable_data_fsync) with drives having write-back cache
which can't be turned off, for example, Intel Optane. Also note that *some* which can't be turned off, for example, Intel Optane. Also note that *some*
desktop SSDs (for example, HP EX950) may ignore O_SYNC thus making desktop SSDs (for example, HP EX950) may ignore O_SYNC thus making
@ -750,34 +765,3 @@
Максимальное возможное значение авто-подстроенного recovery_sleep_us. Максимальное возможное значение авто-подстроенного recovery_sleep_us.
Большие значения считаются случайными выбросами и игнорируются в Большие значения считаются случайными выбросами и игнорируются в
усреднении. усреднении.
- name: discard_on_start
type: bool
info: Discard (SSD TRIM) unused data device blocks on every OSD startup.
info_ru: Освобождать (SSD TRIM) неиспользуемые блоки диска данных при каждом запуске OSD.
- name: min_discard_size
type: int
default: 1048576
info: Minimum consecutive block size to TRIM it.
info_ru: Минимальный размер последовательного блока данных, чтобы освобождать его через TRIM.
- name: allow_net_split
type: bool
default: false
info: |
Allow "safe" cases of network splits/partitions - allow to start PGs without
connections to some OSDs currently registered as alive in etcd, if the number
of actually connected PG OSDs is at least pg_minsize. That is, allow some OSDs to lose
connectivity with some other OSDs as long as it doesn't break pg_minsize guarantees.
The downside is that it increases the probability of writing data into just pg_minsize
OSDs during failover which can lead to PGs becoming incomplete after additional outages.
The old behaviour in versions up to 2.0.0 was equal to enabled allow_net_split.
info_ru: |
Разрешить "безопасные" случаи разделений сети - разрешить активировать PG без
соединений к некоторым OSD, помеченным активными в etcd, если общее число активных
OSD в PG составляет как минимум pg_minsize. То есть, разрешать некоторым OSD терять
соединения с некоторыми другими OSD, если это не нарушает гарантий pg_minsize.
Минус такого разрешения в том, что оно повышает вероятность записи данных ровно в
pg_minsize OSD во время переключений, что может потом привести к тому, что PG станут
неполными (incomplete), если упадут ещё какие-то OSD.
Старое поведение в версиях до 2.0.0 было идентично включённому allow_net_split.

View File

@ -26,9 +26,9 @@ at Vitastor Kubernetes operator: https://github.com/Antilles7227/vitastor-operat
The instruction is very simple. The instruction is very simple.
1. Download a Docker image of the desired version: \ 1. Download a Docker image of the desired version: \
`docker pull vitastor:v2.1.0` `docker pull vitastor:1.10.2`
2. Install scripts to the host system: \ 2. Install scripts to the host system: \
`docker run --rm -it -v /etc:/host-etc -v /usr/bin:/host-bin vitastor:v2.1.0 install.sh` `docker run --rm -it -v /etc:/host-etc -v /usr/bin:/host-bin vitastor:1.10.2 install.sh`
3. Reload udev rules: \ 3. Reload udev rules: \
`udevadm control --reload-rules` `udevadm control --reload-rules`

View File

@ -25,9 +25,9 @@ Vitastor можно установить в Docker/Podman. При этом etcd,
Инструкция по установке максимально простая. Инструкция по установке максимально простая.
1. Скачайте Docker-образ желаемой версии: \ 1. Скачайте Docker-образ желаемой версии: \
`docker pull vitastor:v2.1.0` `docker pull vitastor:1.10.2`
2. Установите скрипты в хост-систему командой: \ 2. Установите скрипты в хост-систему командой: \
`docker run --rm -it -v /etc:/host-etc -v /usr/bin:/host-bin vitastor:v2.1.0 install.sh` `docker run --rm -it -v /etc:/host-etc -v /usr/bin:/host-bin vitastor:1.10.2 install.sh`
3. Перезагрузите правила udev: \ 3. Перезагрузите правила udev: \
`udevadm control --reload-rules` `udevadm control --reload-rules`

View File

@ -1,191 +0,0 @@
[Documentation](../../README.md#documentation) → Installation → S3 for Vitastor
-----
[Читать на русском](s3.ru.md)
# S3 for Vitastor
The moment has come - Vitastor S3 implementation based on Zenko CloudServer is released.
## Highlights
- Zenko CloudServer is implemented in node.js.
- Object metadata is stored in MongoDB.
- Modified Zenko CloudServer version is used for Vitastor. It is slightly different from
the original, has an optimised build and unneeded dependencies are stripped off.
- Object data is stored in Vitastor block volumes, but the volume metadata is stored in
the same MongoDB, not in Vitastor etcd.
- Objects are written to volumes sequentially one after another. The space is allocated
with rounding to the sector size (4 KB), so each object takes at least 4 KB.
- An important property of such storage scheme is that small objects aren't chunked into
parts in Vitastor EC N+K pools and thus don't require reads from all N disks when
downloading.
- Deleted objects are marked as deleted, but the space is only actually freed during
asynchronously executed "defragmentation" process. Defragmentation runs automatically
in the background when a volume reaches configured amount of "garbage" (20% by default).
Defragmentation copies actual objects to new volume(s) and then removes the old volume.
Defragmentation can be configured in locationConfig.json.
## Plans for future development
- User account storage in the DB instead of a static file. Original Zenko uses
a separate closed-source "Scality Vault" service for it, that's why we use
a static file for now.
- More detailed documentation.
- Support for other (and faster) key-value DBMS for object metadata storage.
- Other performance optimisations, for example, related to the used hash function -
MD5 used for Amazon compatibility purposes is relatively slow.
- Object Lifecycle support. There is a Lifecycle implementation for Zenko called
[Backbeat](https://github.com/scality/backbeat) but it's not adapted for Vitastor yet.
- Quota support. Original Zenko uses a separate "SCUBA" service for quotas, but
it's also proprietary and not available publicly.
## Installation
In a few words:
- Install MongoDB, create a user for S3 metadata DB.
- Create a Vitastor pool for S3 data.
- Download and setup the Docker container `vitalif/vitastor-zenko`.
### Setup MongoDB
You can setup MongoDB yourself, following the [MongoDB manual](https://www.mongodb.com/docs/manual/installation/).
Or you can follow the instructions below - it describes a simple example of MongoDB setup
in Docker (through docker-compose) with 3 replicas.
1. On each host, create a file `docker-compose.yml` with the content listed below.
Replace `<YOUR_PASSWORD>` with your future mongodb administrator password, and optionally
replace `0.0.0.0` with `localhost,<server_IP>`. It's recommended to either use a private IP
or [setup TLS](https://www.mongodb.com/docs/manual/tutorial/configure-ssl/) afterwards.
```
version: '3.1'
services:
mongo:
container_name: mongo
image: mongo:7-jammy
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: <YOUR_PASSWORD>
network_mode: host
volumes:
- ./keyfile:/opt/keyfile
- ./mongo-data/db:/data/db
- ./mongo-data/configdb:/data/configdb
entrypoint: /bin/bash -c
command: [ "chown mongodb /opt/keyfile && chmod 600 /opt/keyfile && . /usr/local/bin/docker-entrypoint.sh mongod --replSet rs0 --keyFile /opt/keyfile --bind_ip 0.0.0.0" ]
```
2. Generate a shared cluster key using `openssl rand -base64 756 > ./keyfile` and copy
that `keyfile` to all hosts.
3. Start MongoDB on all hosts with `docker compose up -d mongo`.
4. Enter Mongo Shell with `docker exec -it mongo mongosh -u root -p <YOUR_PASSWORD> localhost/admin`
and execute the following command (replace IP addresses `10.10.10.{1,2,3}` with your host IPs):
`rs.initiate({ _id: 'rs0', members: [
{ _id: 1, host: '10.10.10.1:27017' },
{ _id: 2, host: '10.10.10.2:27017' },
{ _id: 3, host: '10.10.10.3:27017' }
] })`
5. Stay in Mongo Shell and create a user for the future S3 database:
`db.createUser({ user: 's3', pwd: '<YOUR_S3_PASSWORD>', roles: [
{ role: 'readWrite', db: 's3' },
{ role: 'dbAdmin', db: 's3' },
{ role: 'readWrite', db: 'vitastor' },
{ role: 'dbAdmin', db: 'vitastor' }
] })`
### Setup Vitastor
Create a pool in Vitastor for S3 object data, for example:
`vitastor-cli create-pool --ec 2+1 -n 512 s3-data --used_for_app s3:standard`
The `--used_for_app` options works as fool-proofing and prevents you from
accidentally creating a regular block volume in the S3 pool and overwriting some S3 data.
Also it hides inode space statistics from Vitastor etcd.
Retrieve the ID of your pool with `vitastor-cli ls-pools s3-data --detail`.
### Setup Vitastor S3
1. Add the following lines to `docker-compose.yml` (instead of `network_mode: host`,
you can use `ports: [ "8000:8000", "8002:8002" ]`):
```
zenko:
container_name: zenko
image: vitalif/vitastor-zenko
restart: always
security_opt:
- seccomp:unconfined
ulimits:
memlock: -1
network_mode: host
volumes:
- /etc/vitastor:/etc/vitastor
- /etc/vitastor/s3:/conf
```
2. Download Docker image: `docker pull vitalif/vitastor-zenko`
3. Extract configuration file examples from the Docker image:
```
docker run --rm -it -v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf vitalif/vitastor-zenko configure.sh
```
4. Edit configuration files in `/etc/vitastor/s3/`:
- `config.json` - common settings.
- `authdata.json` - user accounts and access keys.
- `locationConfig.json` - S3 storage class list with placement settings.
Note: it actually contains storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as in the original Zenko CloudServer.
- Put your MongoDB connection data into `config.json` and `locationConfig.json`.
- Put your Vitastor pool ID into `locationConfig.json`.
- For now, the complete list of Vitastor backend settings is only available [in the code](https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts#L94).
### Start Zenko
Start the S3 server with:
```
docker run --restart always --security-opt seccomp:unconfined --ulimit memlock=-1 --network=host \
-v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf --name zenko vitalif/vitastor-zenko
```
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with, for example, [s3cmd](https://s3tools.org/s3cmd):
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
Or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs):
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
## Author & License
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality,
licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is
Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
- Vitastor S3 repository: https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor
- Vitastor S3 backend code: https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts

View File

@ -1,171 +0,0 @@
[Документация](../../README-ru.md#документация) → Установка → S3 на базе Vitastor
-----
[Read in English](s3.en.md)
# S3 на базе Vitastor
Итак, свершилось - реализация Vitastor S3 на базе Zenko CloudServer достигла
состояния готовности к публикации и использованию.
## Ключевые особенности
- Zenko CloudServer реализован на node.js.
- Метаданные объектов хранятся в MongoDB.
- Поставляется модифицированная версия Zenko CloudServer, отвязанная от лишних зависимостей,
с оптимизированной сборкой и немного отличающаяся от оригинала.
- Данные объектов хранятся в блочных томах Vitastor, однако информация о самих томах
сохраняется не в etcd Vitastor, а тоже в БД на основе MongoDB.
- Объекты записываются в тома последовательно друг за другом. Место выделяется с округлением
до размера сектора (до 4 килобайт), поэтому каждый объект занимает как минимум 4 КБ.
- Благодаря такой схеме записи объектов мелкие объекты не нарезаются на части и поэтому не
требуют чтения с N дисков данных в EC N+K пулах Vitastor.
- При удалении объекты помечаются удалёнными, но место освобождается не сразу, а при
запускаемой асинхронно "дефрагментации". Дефрагментация запускается автоматически в фоне
при достижении заданного объёма "мусора" в томе (по умолчанию 20%), копирует актуальные
объекты в новые тома, после чего очищает старый том полностью. Дефрагментацию можно
настраивать в locationConfig.json.
## Планы развития
- Хранение учётных записей в БД, а не в статическом файле (в оригинальном Zenko для
этого используется отдельный закрытый сервис "Scality Vault").
- Более подробная документация.
- Поддержка других (и более производительных) key-value СУБД для хранения метаданных.
- Другие оптимизации производительности, например, в области используемой хеш-функции
(хеш MD5, используемый в целях совместимости, относительно медленный).
- Поддержка Object Lifecycle. Реализация Lifecycle для Zenko существует и называется
[Backbeat](https://github.com/scality/backbeat), но она ещё не адаптирована для Vitastor.
- Квоты. В оригинальном Zenko для этого используется отдельный сервис "SCUBA", однако
он тоже является закрытым и недоступен для публичного использования.
## Установка
Кратко:
- Установите MongoDB, создайте пользователя для БД метаданных S3.
- Создайте в Vitastor пул для хранения данных объектов.
- Скачайте и настройте Docker-контейнер `vitalif/vitastor-zenko`.
### Установка MongoDB
Вы можете установить MongoDB сами, следуя [официальному руководству MongoDB](https://www.mongodb.com/docs/manual/installation/).
Либо вы можете последовать инструкции, приведённой ниже - здесь описан простейший пример
установки MongoDB в Docker (docker-compose) в конфигурации с 3 репликами.
1. На всех 3 серверах создайте файл `docker-compose.yml`, заменив `<ВАШ_ПАРОЛЬ>`
на собственный будущий пароль администратора mongodb, а `0.0.0.0` по желанию
заменив на на `localhost,<IP_сервера>` - желательно либо использовать публично не доступный IP,
либо потом [настроить TLS](https://www.mongodb.com/docs/manual/tutorial/configure-ssl/).
```
version: '3.1'
services:
mongo:
container_name: mongo
image: mongo:7-jammy
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: <ВАШ_ПАРОЛЬ>
network_mode: host
volumes:
- ./keyfile:/opt/keyfile
- ./mongo-data/db:/data/db
- ./mongo-data/configdb:/data/configdb
entrypoint: /bin/bash -c
command: [ "chown mongodb /opt/keyfile && chmod 600 /opt/keyfile && . /usr/local/bin/docker-entrypoint.sh mongod --replSet rs0 --keyFile /opt/keyfile --bind_ip 0.0.0.0" ]
```
2. В той же директории сгенерируйте общий ключ кластера командой `openssl rand -base64 756 > ./keyfile`
и скопируйте этот файл на все 3 сервера.
3. На всех 3 серверах запустите MongoDB командой `docker compose up -d mongo`.
4. Зайдите в Mongo Shell с помощью команды `docker exec -it mongo mongosh -u root -p <ВАШ_ПАРОЛЬ> localhost/admin`
и там выполните команду (заменив IP-адреса `10.10.10.{1,2,3}` на адреса своих серверов):
`rs.initiate({ _id: 'rs0', members: [
{ _id: 1, host: '10.10.10.1:27017' },
{ _id: 2, host: '10.10.10.2:27017' },
{ _id: 3, host: '10.10.10.3:27017' }
] })`
5. Находясь там же, в Mongo Shell, создайте пользователя с доступом к будущей базе данных S3:
`db.createUser({ user: 's3', pwd: '<ВАШ_ПАРОЛЬ_S3>', roles: [
{ role: 'readWrite', db: 's3' },
{ role: 'dbAdmin', db: 's3' },
{ role: 'readWrite', db: 'vitastor' },
{ role: 'dbAdmin', db: 'vitastor' }
] })`
### Настройка Vitastor
Создайте в Vitastor отдельный пул для данных объектов S3, например:
`vitastor-cli create-pool --ec 2+1 -n 512 s3-data --used_for_app s3:standard`
Опция `--used_for_app` работает как "защита от дурака" и не даёт вам случайно создать
в этом пуле обычный блочный том и перезаписать им какие-то данные S3, а также скрывает
статистику занятого места по томам S3 из etcd.
Получите ID своего пула с помощью команды `vitastor-cli ls-pools --detail`.
### Установка Vitastor S3
1. Добавьте в `docker-compose.yml` строки (альтернативно вместо `network_mode: host`
можно использовать `ports: [ "8000:8000", "8002:8002" ]`):
```
zenko:
container_name: zenko
image: vitalif/vitastor-zenko
restart: always
security_opt:
- seccomp:unconfined
ulimits:
memlock: -1
network_mode: host
volumes:
- /etc/vitastor:/etc/vitastor
- /etc/vitastor/s3:/conf
```
2. Извлеките из Docker-образа Vitastor примеры файлов конфигурации:
`docker run --rm -it -v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf vitalif/vitastor-zenko configure.sh`
3. Отредактируйте файлы конфигурации в `/etc/vitastor/s3/`:
- `config.json` - общие настройки.
- `authdata.json` - учётные записи и ключи доступа.
- `locationConfig.json` - список классов хранения S3 с настройками расположения.
Внимание: в данной версии это именно список S3 storage class-ов (STANDARD, COLD и т.п.),
а не зон (подобных us-east-1), как в оригинальном Zenko CloudServer.
- В `config.json` и в `locationConfig.json` пропишите свои данные подключения к MongoDB.
- В `locationConfig.json` укажите ID пула Vitastor для хранения данных.
- Полный перечень настроек Vitastor-бэкенда пока можно посмотреть [в коде](https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts#L94).
### Запуск
Запустите S3-сервер: `docker-compose up -d zenko`
Готово! Вы получили S3-сервер, работающий на порту 8000.
Можете попробовать обратиться к нему с помощью, например, [s3cmd](https://s3tools.org/s3cmd):
`s3cmd --host-bucket= --no-ssl --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket`
Или смонтировать его с помощью [GeeseFS](https://github.com/yandex-cloud/geesefs):
`AWS_ACCESS_KEY_ID=accessKey1 AWS_SECRET_ACCESS_KEY=verySecretKey1 geesefs --endpoint http://localhost:8000 testbucket /mnt/geesefs`
## Лицензия
- Автор [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) - Scality, лицензия [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- Vitastor-бэкенд для S3, как и сам Vitastor, лицензируется на условиях [VNPL 1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
- Репозиторий сборки: https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor
- Бэкенд хранения данных: https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts

View File

@ -16,7 +16,7 @@
designated initializers support from C++20 designated initializers support from C++20
- CMake - CMake
- liburing, jerasure headers and libraries - liburing, jerasure headers and libraries
- ISA-L, libibverbs and librdmacm headers and libraries (optional) - ISA-L, libibverbs headers and libraries (optional)
- tcmalloc (google-perftools-dev) - tcmalloc (google-perftools-dev)
## Basic instructions ## Basic instructions

View File

@ -16,7 +16,7 @@
назначенных инициализаторов (designated initializers) из C++20 назначенных инициализаторов (designated initializers) из C++20
- CMake - CMake
- Заголовки и библиотеки liburing, jerasure - Заголовки и библиотеки liburing, jerasure
- Опционально - заголовки и библиотеки ISA-L, libibverbs, librdmacm - Опционально - заголовки и библиотеки ISA-L, libibverbs
- tcmalloc (google-perftools-dev) - tcmalloc (google-perftools-dev)
## Базовая инструкция ## Базовая инструкция

View File

@ -28,7 +28,7 @@
- Per-OSD and per-image I/O and space usage statistics in etcd - Per-OSD and per-image I/O and space usage statistics in etcd
- Snapshots and copy-on-write image clones - Snapshots and copy-on-write image clones
- [Write throttling to smooth random write workloads in SSD+HDD configurations](../config/osd.en.md#throttle_small_writes) - [Write throttling to smooth random write workloads in SSD+HDD configurations](../config/osd.en.md#throttle_small_writes)
- RDMA/RoCEv2 support [via libibverbs](../config/network.en.md#use_rdma) or [RDMA-CM](../config/network.en.md#use_rdmacm) - [RDMA/RoCEv2 support via libibverbs](../config/network.en.md#rdma_device)
- [Scrubbing](../config/osd.en.md#auto_scrub) (verification of copies) - [Scrubbing](../config/osd.en.md#auto_scrub) (verification of copies)
- [Checksums](../config/layout-osd.en.md#data_csum_type) - [Checksums](../config/layout-osd.en.md#data_csum_type)
- [Client write-back cache](../config/client.en.md#client_enable_writeback) - [Client write-back cache](../config/client.en.md#client_enable_writeback)
@ -37,7 +37,6 @@
- [Experimental internal etcd replacement - antietcd](../config/monitor.en.md#use_antietcd) - [Experimental internal etcd replacement - antietcd](../config/monitor.en.md#use_antietcd)
- [Built-in Prometheus metric exporter](../config/monitor.en.md#enable_prometheus) - [Built-in Prometheus metric exporter](../config/monitor.en.md#enable_prometheus)
- [NFS RDMA support](../usage/nfs.en.md#rdma) (probably also usable for GPUDirect) - [NFS RDMA support](../usage/nfs.en.md#rdma) (probably also usable for GPUDirect)
- [S3](../installation/s3.en.md)
## Plugins and tools ## Plugins and tools
@ -64,6 +63,7 @@ The following features are planned for the future:
- iSCSI and NVMeoF gateways - iSCSI and NVMeoF gateways
- Multi-threaded client - Multi-threaded client
- Faster failover - Faster failover
- S3
- Tiered storage (SSD caching) - Tiered storage (SSD caching)
- NVDIMM support - NVDIMM support
- Compression (possibly) - Compression (possibly)

View File

@ -30,7 +30,7 @@
- Именование инодов через хранение их метаданных в etcd - Именование инодов через хранение их метаданных в etcd
- Снапшоты и copy-on-write клоны - Снапшоты и copy-on-write клоны
- [Сглаживание производительности случайной записи в SSD+HDD конфигурациях](../config/osd.ru.md#throttle_small_writes) - [Сглаживание производительности случайной записи в SSD+HDD конфигурациях](../config/osd.ru.md#throttle_small_writes)
- Поддержка RDMA/RoCEv2 [через libibverbs](../config/network.ru.md#use_rdma) или [RDMA-CM](../config/network.ru.md#use_rdmacm) - [Поддержка RDMA/RoCEv2 через libibverbs](../config/network.ru.md#rdma_device)
- [Фоновая проверка целостности](../config/osd.ru.md#auto_scrub) (сверка копий) - [Фоновая проверка целостности](../config/osd.ru.md#auto_scrub) (сверка копий)
- [Контрольные суммы](../config/layout-osd.ru.md#data_csum_type) - [Контрольные суммы](../config/layout-osd.ru.md#data_csum_type)
- [Буферизация записи на стороне клиента](../config/client.ru.md#client_enable_writeback) - [Буферизация записи на стороне клиента](../config/client.ru.md#client_enable_writeback)
@ -39,7 +39,6 @@
- [Экспериментальная встроенная замена etcd - antietcd](../config/monitor.ru.md#use_antietcd) - [Экспериментальная встроенная замена etcd - antietcd](../config/monitor.ru.md#use_antietcd)
- [Встроенный Prometheus-экспортер метрик](../config/monitor.ru.md#enable_prometheus) - [Встроенный Prometheus-экспортер метрик](../config/monitor.ru.md#enable_prometheus)
- [Поддержка NFS RDMA](../usage/nfs.ru.md#rdma) (вероятно, также подходящая для GPUDirect) - [Поддержка NFS RDMA](../usage/nfs.ru.md#rdma) (вероятно, также подходящая для GPUDirect)
- [S3](../installation/s3.ru.md)
## Драйверы и инструменты ## Драйверы и инструменты
@ -64,6 +63,7 @@
- iSCSI и NVMeoF прокси - iSCSI и NVMeoF прокси
- Многопоточный клиент - Многопоточный клиент
- Более быстрое переключение при отказах - Более быстрое переключение при отказах
- S3
- Поддержка SSD-кэширования (tiered storage) - Поддержка SSD-кэширования (tiered storage)
- Поддержка NVDIMM - Поддержка NVDIMM
- Возможно, сжатие - Возможно, сжатие

View File

@ -50,7 +50,7 @@ On the monitor hosts:
## Configure OSDs ## Configure OSDs
- Put etcd_address and [osd_network](../config/network.en.md#osd_network) into `/etc/vitastor/vitastor.conf`. Example: - Put etcd_address and osd_network into `/etc/vitastor/vitastor.conf`. Example:
``` ```
{ {
"etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"], "etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"],

View File

@ -50,7 +50,7 @@
## Настройте OSD ## Настройте OSD
- Пропишите etcd_address и [osd_network](../config/network.ru.md#osd_network) в `/etc/vitastor/vitastor.conf`. Например: - Пропишите etcd_address и osd_network в `/etc/vitastor/vitastor.conf`. Например:
``` ```
{ {
"etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"], "etcd_address": ["10.200.1.10:2379","10.200.1.11:2379","10.200.1.12:2379"],

View File

@ -14,7 +14,6 @@
- [Removing a failed disk](#removing-a-failed-disk) - [Removing a failed disk](#removing-a-failed-disk)
- [Adding a disk](#adding-a-disk) - [Adding a disk](#adding-a-disk)
- [Restoring from lost pool configuration](#restoring-from-lost-pool-configuration) - [Restoring from lost pool configuration](#restoring-from-lost-pool-configuration)
- [Incompatibility problems](#Incompatibility-problems)
- [Upgrading Vitastor](#upgrading-vitastor) - [Upgrading Vitastor](#upgrading-vitastor)
- [OSD memory usage](#osd-memory-usage) - [OSD memory usage](#osd-memory-usage)
@ -36,19 +35,10 @@ PG state consists of exactly 1 base state and an arbitrary number of additional
PG state always includes exactly 1 of the following base states: PG state always includes exactly 1 of the following base states:
- **active** — PG is active and handles user I/O. - **active** — PG is active and handles user I/O.
- **incomplete** — Not enough OSDs are available to activate this PG. More exactly, that - **incomplete** — Not enough OSDs are available to activate this PG. That is, more disks
means one of the following: are lost than it's allowed by the pool's redundancy scheme. For example, if the pool has
- Less than pg_minsize current target OSDs are available for the PG. I.e. more disks pg_size=3 and pg_minsize=1, part of the data may be written only to 1 OSD. If that exact
are lost than allowed by the pool's redundancy scheme. OSD is lost, PG will become **incomplete**.
- All OSDs of some of PG's history records are unavailable, or, for EC pools, less
than (pg_size-parity_chunks) OSDs are available in one of the history records.
In other words it means that some data in this PG was written to an OSD set such that
it's currently impossible to read it back because these OSDs are down. For example,
if the pool has pg_size=3 and pg_minsize=1, part of the data may be written only to
1 OSD. If that exact OSD is lost, PG becomes **incomplete**.
- [allow_net_split](../config/osd.en.md#allow_net_split) is disabled (default) and
primary OSD of the PG can't connect to some secondary OSDs marked as alive in etcd.
I.e. a network partition happened: OSDs can talk to etcd, but not to some other OSDs.
- **offline** — PG isn't activated by any OSD at all. Either primary OSD isn't set for - **offline** — PG isn't activated by any OSD at all. Either primary OSD isn't set for
this PG at all (if the pool is just created), or an unavailable OSD is set as primary, this PG at all (if the pool is just created), or an unavailable OSD is set as primary,
or the primary OSD refuses to start this PG (for example, because of wrong block_size), or the primary OSD refuses to start this PG (for example, because of wrong block_size),
@ -167,17 +157,6 @@ done
After that all PGs should peer and find all previous data. After that all PGs should peer and find all previous data.
## Incompatibility problems
### ISA-L 2.31
⚠ It is FORBIDDEN to use Vitastor 2.1.0 and earlier versions with ISA-L 2.31 and newer if
you use EC N+K pools and K > 1 on a CPU with GF-NI instruction support, because it WILL
lead to **data loss** during EC recovery.
If you accidentally upgraded ISA-L to 2.31 but didn't upgrade Vitastor and restarted OSDs,
then stop them as soon as possible and either update Vitastor or roll back ISA-L.
## Upgrading Vitastor ## Upgrading Vitastor
Every upcoming Vitastor version is usually compatible with previous both forward Every upcoming Vitastor version is usually compatible with previous both forward

View File

@ -14,7 +14,6 @@
- [Удаление неисправного диска](#удаление-неисправного-диска) - [Удаление неисправного диска](#удаление-неисправного-диска)
- [Добавление диска](#добавление-диска) - [Добавление диска](#добавление-диска)
- [Восстановление потерянной конфигурации пулов](#восстановление-потерянной-конфигурации-пулов) - [Восстановление потерянной конфигурации пулов](#восстановление-потерянной-конфигурации-пулов)
- [Проблемы несовместимости](#проблемы-несовместимости)
- [Обновление Vitastor](#обновление-vitastor) - [Обновление Vitastor](#обновление-vitastor)
- [Потребление памяти OSD](#потребление-памяти-osd) - [Потребление памяти OSD](#потребление-памяти-osd)
@ -36,20 +35,10 @@
Состояние PG включает в себя ровно 1 флаг из следующих: Состояние PG включает в себя ровно 1 флаг из следующих:
- **active** — PG активна и обрабатывает запросы ввода-вывода от пользователей. - **active** — PG активна и обрабатывает запросы ввода-вывода от пользователей.
- **incomplete** — Недостаточно живых OSD, чтобы включить эту PG. Если точнее, то это - **incomplete** — Недостаточно живых OSD, чтобы включить эту PG.
означает один из следующих вариантов: То есть, дисков потеряно больше, чем разрешено схемой отказоустойчивости пула и pg_minsize.
- Доступно менее, чем pg_minsize текущих целевых OSD данной PG. Иными словами, потеряно Например, если у пула pg_size=3 и pg_minsize=1, то часть данных может записаться всего на 1 OSD.
больше дисков, чем это разрешает схема отказоустойчивости пула. Если потом конкретно этот OSD упадёт, PG окажется **incomplete**.
- Все OSD одной из исторических записей PG недоступны, или, для EC-пулов, в одной
из исторических записей PG доступно менее, чем (pg_size-parity_chunks) OSD. Другими
словами это означает, что часть данных этой PG была записана в такой набор OSD, из
которого их сейчас невозможно прочитать обратно, так как OSD не включены. Например,
если у пула pg_size=3 и pg_minsize=1, то часть данных может записаться всего на 1 OSD.
Если потом конкретно этот OSD упадёт, PG окажется **incomplete**.
- [allow_net_split](../config/osd.ru.md#allow_net_split) отключено (по умолчанию) и
первичный OSD данной PG не может соединиться с частью вторичных OSD этой PG, помеченных
как живых в etcd. Это означает, что произошло разделение сети: OSD могут общаться с etcd,
но не могут общаться с частью других OSD.
- **offline** — PG вообще не активирована ни одним OSD. Либо первичный OSD не назначен вообще - **offline** — PG вообще не активирована ни одним OSD. Либо первичный OSD не назначен вообще
(если пул только создан), либо в качестве первичного назначен недоступный OSD, либо (если пул только создан), либо в качестве первичного назначен недоступный OSD, либо
назначенный OSD отказывается запускать эту PG (например, из-за несовпадения block_size), назначенный OSD отказывается запускать эту PG (например, из-за несовпадения block_size),
@ -164,17 +153,6 @@ done
После этого все PG должны пройти peering и найти все предыдущие данные. После этого все PG должны пройти peering и найти все предыдущие данные.
## Проблемы несовместимости
### ISA-L 2.31
⚠ ЗАПРЕЩЕНО использовать Vitastor 2.1.0 и более ранних версий с библиотекой ISA-L версии 2.31
или более новой, если вы используете EC-пулы N+K и K > 1 на CPU с поддержкой инструкций GF-NI,
так как это приведёт к **потере данных** при восстановлении из EC.
Если вы случайно обновили ISA-L до 2.31, но не обновили Vitastor, и успели перезапустить OSD,
то как можно скорее остановите их все и либо обновите Vitastor, либо откатите ISA-L.
## Обновление Vitastor ## Обновление Vitastor
Обычно каждая следующая версия Vitastor совместима с предыдущими и "вперёд", и "назад" Обычно каждая следующая версия Vitastor совместима с предыдущими и "вперёд", и "назад"

View File

@ -355,7 +355,7 @@ Set OSD reweight, tags or noout flag. See detail description in [OSD config docu
## pg-list ## pg-list
`vitastor-cli pg-list|pg-ls|list-pg|ls-pg|ls-pgs|pgs [OPTIONS] [state1+state2] [^state3] [...]` `vitastor-cli pg-list|pg-ls|list-pg|ls-pg|ls-pgs [OPTIONS] [state1+state2] [^state3] [...]`
List PGs with any of listed state filters (^ or ! in the beginning is negation). Options: List PGs with any of listed state filters (^ or ! in the beginning is negation). Options:
@ -363,7 +363,6 @@ List PGs with any of listed state filters (^ or ! in the beginning is negation).
--pool <pool name or number> Only list PGs of the given pool. --pool <pool name or number> Only list PGs of the given pool.
--min <min pg number> Only list PGs with number >= min. --min <min pg number> Only list PGs with number >= min.
--max <max pg number> Only list PGs with number <= max. --max <max pg number> Only list PGs with number <= max.
--osd 1,2,... Only list PGs with some data on specified OSD(s).
``` ```
Examples: Examples:
@ -378,11 +377,11 @@ Examples:
Create a pool. Required parameters: Create a pool. Required parameters:
| <!-- --> | <!-- --> | | <!-- --> | <!-- --> |
|--------------------------|-----------------------------------------------------------------------------------------| |--------------------------|---------------------------------------------------------------------------------------|
| `-s R` or `--pg_size R` | Number of replicas for replicated pools | | `-s R` or `--pg_size R` | Number of replicas for replicated pools |
| `--ec N+K` | Number of data (N) and parity (K) chunks for erasure-coded pools | | `--ec N+K` | Number of data (N) and parity (K) chunks for erasure-coded pools |
| `-n N` or `--pg_count N` | PG count for the new pool (start with 10*\<OSD count\>/pg_size rounded to a power of 2) | | `-n N` or `--pg_count N` | PG count for the new pool (start with 10*<OSD count>/pg_size rounded to a power of 2) |
Optional parameters: Optional parameters:
@ -399,8 +398,7 @@ Optional parameters:
| `--raw_placement <rules>` | Specify raw PG generation rules ([details](../config/pool.en.md#raw_placement)) | | `--raw_placement <rules>` | Specify raw PG generation rules ([details](../config/pool.en.md#raw_placement)) |
| `--primary_affinity_tags tags` | Prefer to put primary copies on OSDs with all specified tags | | `--primary_affinity_tags tags` | Prefer to put primary copies on OSDs with all specified tags |
| `--scrub_interval <time>` | Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y | | `--scrub_interval <time>` | Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y |
| `--used_for_app fs:<name>` | Mark pool as used for VitastorFS with metadata in image `<name>` | | `--used_for_fs <name>` | Mark pool as used for VitastorFS with metadata in image <name> |
| `--used_for_app s3:<name>` | Mark pool as used for S3 location with name `<name>` |
| `--pg_stripe_size <number>` | Increase object grouping stripe | | `--pg_stripe_size <number>` | Increase object grouping stripe |
| `--max_osd_combinations 10000` | Maximum number of random combinations for LP solver input | | `--max_osd_combinations 10000` | Maximum number of random combinations for LP solver input |
| `--wait` | Wait for the new pool to come online | | `--wait` | Wait for the new pool to come online |

View File

@ -22,8 +22,6 @@ vitastor-cli - интерфейс командной строки для адм
- [flatten](#flatten) - [flatten](#flatten)
- [rm-data](#rm-data) - [rm-data](#rm-data)
- [merge-data](#merge-data) - [merge-data](#merge-data)
- [describe](#describe)
- [fix](#fix)
- [alloc-osd](#alloc-osd) - [alloc-osd](#alloc-osd)
- [rm-osd](#rm-osd) - [rm-osd](#rm-osd)
- [osd-tree](#osd-tree) - [osd-tree](#osd-tree)
@ -377,10 +375,9 @@ OSD PARENT UP SIZE USED% TAGS WEIGHT BLOCK BITMAP
в начале фильтра означает отрицание). Опции: в начале фильтра означает отрицание). Опции:
``` ```
--pool <pool name or number> Вывести только PG в заданном пуле. --pool <pool name or number> Only list PGs of the given pool.
--min <min pg number> Вывести только PG с номерами >= min. --min <min pg number> Only list PGs with number >= min.
--max <max pg number> Вывести только PG с номерами <= max. --max <max pg number> Only list PGs with number <= max.
--osd 1,2,... Вывести только PG с данными на заданных OSD.
``` ```
Примеры: Примеры:
@ -395,11 +392,11 @@ OSD PARENT UP SIZE USED% TAGS WEIGHT BLOCK BITMAP
Создать пул. Обязательные параметры: Создать пул. Обязательные параметры:
| <!-- --> | <!-- --> | | <!-- --> | <!-- --> |
|---------------------------|-----------------------------------------------------------------------------------------------| |---------------------------|---------------------------------------------------------------------------------------------|
| `-s R` или `--pg_size R` | Число копий данных для реплицированных пулов | | `-s R` или `--pg_size R` | Число копий данных для реплицированных пулов |
| `--ec N+K` | Число частей данных (N) и чётности (K) для пулов с кодами коррекции ошибок | | `--ec N+K` | Число частей данных (N) и чётности (K) для пулов с кодами коррекции ошибок |
| `-n N` или `--pg_count N` | Число PG для нового пула (начните с 10*\<число OSD\>/pg_size, округлённого до степени двойки) | | `-n N` или `--pg_count N` | Число PG для нового пула (начните с 10*<число OSD>/pg_size, округлённого до степени двойки) |
Необязательные параметры: Необязательные параметры:

View File

@ -14,7 +14,6 @@ It supports the following commands:
- [upgrade-simple](#upgrade-simple) - [upgrade-simple](#upgrade-simple)
- [resize](#resize) - [resize](#resize)
- [raw-resize](#raw-resize) - [raw-resize](#raw-resize)
- [trim](#trim)
- [start/stop/restart/enable/disable](#start/stop/restart/enable/disable) - [start/stop/restart/enable/disable](#start/stop/restart/enable/disable)
- [purge](#purge) - [purge](#purge)
- [read-sb](#read-sb) - [read-sb](#read-sb)
@ -98,9 +97,6 @@ Options (both modes):
--data_device_block 4k Override data device block size --data_device_block 4k Override data device block size
--meta_device_block 4k Override metadata device block size --meta_device_block 4k Override metadata device block size
--journal_device_block 4k Override journal device block size --journal_device_block 4k Override journal device block size
--discard_on_start 0 TRIM unused data device blocks every OSD start (default off)
--min_discard_size 1M Minimum TRIM block size
--json Enable JSON output
``` ```
[immediate_commit](../config/layout-cluster.en.md#immediate_commit) setting is [immediate_commit](../config/layout-cluster.en.md#immediate_commit) setting is
@ -183,19 +179,6 @@ parameters from OSD command line (i.e. from systemd unit or superblock).
SIZE may include k/m/g/t suffixes. If any of the new layout parameter SIZE may include k/m/g/t suffixes. If any of the new layout parameter
options are not specified, old values will be used. options are not specified, old values will be used.
## trim
`vitastor-disk trim <osd_num>|<osd_device> [<osd_num>|<osd_device>...]`
Try to discard unused blocks (SSD TRIM) on the data device of each of the OSD(s).
May only be used on stopped OSDs. Options:
```
--min_discard_size 1M Minimum TRIM block size
--discard_granularity 0 Override device's discard granularity
```
## start/stop/restart/enable/disable ## start/stop/restart/enable/disable
`vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]` `vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]`

View File

@ -99,9 +99,6 @@ vitastor-disk - инструмент командной строки для уп
--data_device_block 4k Задать размер блока устройства данных --data_device_block 4k Задать размер блока устройства данных
--meta_device_block 4k Задать размер блока метаданных --meta_device_block 4k Задать размер блока метаданных
--journal_device_block 4k Задать размер блока журнала --journal_device_block 4k Задать размер блока журнала
--discard_on_start 0 Выполнять TRIM пустых блоков данных при запуске OSD (по умолчанию нет)
--min_discard_size 1M Минимальный размер блока для TRIM
--json Включить JSON-вывод
``` ```
Настройка [immediate_commit](../config/layout-cluster.ru.md#immediate_commit) Настройка [immediate_commit](../config/layout-cluster.ru.md#immediate_commit)
@ -185,20 +182,6 @@ throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
`РАЗМЕР` может быть указан с суффиксами k/m/g/t. Если любой из новых параметров `РАЗМЕР` может быть указан с суффиксами k/m/g/t. Если любой из новых параметров
расположения не указан, он принимается равным старому значению. расположения не указан, он принимается равным старому значению.
## trim
`vitastor-disk trim <osd_num>|<osd_device> [<osd_num>|<osd_device>...]`
Попробовать пометить пустые блоки дисков данных всех указанных OSD неиспользуемыми
(выполнить команду SSD TRIM).
Можно использовать только с остановленными OSD. Опции:
```
--min_discard_size 1M Минимальный размер блока для TRIM
--discard_granularity 0 Кратность размера блока для TRIM
```
## start/stop/restart/enable/disable ## start/stop/restart/enable/disable
`vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]` `vitastor-disk start|stop|restart|enable|disable [--now] <device> [device2 device3 ...]`

View File

@ -14,9 +14,6 @@ Commands:
- [upgrade](#upgrade) - [upgrade](#upgrade)
- [defrag](#defrag) - [defrag](#defrag)
⚠️ Important: follow the instructions from [Linux NFS write size](#linux-nfs-write-size)
for optimal Vitastor NFS performance if you use EC and HDD and mount your NFS from Linux.
## Pseudo-FS ## Pseudo-FS
Simplified pseudo-FS proxy is used for file-based image access emulation. It's not Simplified pseudo-FS proxy is used for file-based image access emulation. It's not
@ -61,7 +58,7 @@ To use VitastorFS:
2. Create an image for FS metadata, preferably in a faster (SSD or replica-HDD) pool, 2. Create an image for FS metadata, preferably in a faster (SSD or replica-HDD) pool,
but you can create it in the data pool too if you want (image size doesn't matter): but you can create it in the data pool too if you want (image size doesn't matter):
`vitastor-cli create -s 10G -p fastpool testfs` `vitastor-cli create -s 10G -p fastpool testfs`
3. Mark data pool as an FS pool: `vitastor-cli modify-pool --used-for-app fs:testfs data-pool` 3. Mark data pool as an FS pool: `vitastor-cli modify-pool --used-for-fs testfs data-pool`
4. Either mount the FS: `vitastor-nfs mount --fs testfs --pool data-pool /mnt/vita` 4. Either mount the FS: `vitastor-nfs mount --fs testfs --pool data-pool /mnt/vita`
5. Or start the NFS server: `vitastor-nfs start --fs testfs --pool data-pool` 5. Or start the NFS server: `vitastor-nfs start --fs testfs --pool data-pool`
@ -103,62 +100,6 @@ Other notable missing features which should be addressed in the future:
in the DB. The FS is implemented is such way that this garbage doesn't affect its in the DB. The FS is implemented is such way that this garbage doesn't affect its
function, but having a tool to clean it up still seems a right thing to do. function, but having a tool to clean it up still seems a right thing to do.
## Linux NFS write size
Linux NFS client (nfs/nfsv3/nfsv4 kernel modules) has a hard-coded maximum I/O size,
currently set to 1 MB - see `rsize` and `wsize` in [man 5 nfs](https://linux.die.net/man/5/nfs).
This means that when you write to a file in an FS mounted over NFS, the maximum write
request size is 1 MB, even in the O_DIRECT mode and even if the original write request
is larger.
However, for optimal linear write performance in Vitastor EC (erasure-coded) pools,
the size of write requests should be a multiple of [block_size](../config/layout-cluster.en.md#block_size),
multiplied by the data chunk count of the pool ([pg_size](../config/pool.en.md#pg_size)-[parity_chunks](../config/pool.en.md#parity_chunks)).
When write requests are smaller or not a multiple of this number, Vitastor has to first
read paired data blocks from disks, calculate new parity blocks and only then write them
back. Obviously this is 2-3 times slower than a simple disk write.
Vitastor HDD setups use 1 MB block_size by default. So, for optimal performance, if
you use EC 2+1 and HDD, you need your NFS client to send 2 MB write requests, if you
use EC 4+1 - 4 MB and so on.
But Linux NFS client only writes in 1 MB chunks. 😢
The good news is that you can fix it by rebuilding Linux NFS kernel modules 😉 🤩!
You need to change NFS_MAX_FILE_IO_SIZE in nfs_xdr.h and then rebuild and reload modules.
The instruction, using Debian as an example (should be ran under root):
```
# download current Linux kernel headers required to build modules
apt-get install linux-headers-`uname -r`
# replace NFS_MAX_FILE_IO_SIZE with a desired number (here it's 4194304 - 4 MB)
sed -i 's/NFS_MAX_FILE_IO_SIZE\s*.*/NFS_MAX_FILE_IO_SIZE\t(4194304U)/' /lib/modules/`uname -r`/source/include/linux/nfs_xdr.h
# download current Linux kernel source
mkdir linux_src
cd linux_src
apt-get source linux-image-`uname -r`-unsigned
# build NFS modules
cd linux-*/fs/nfs
make -C /lib/modules/`uname -r`/build M=$PWD -j8 modules
make -C /lib/modules/`uname -r`/build M=$PWD modules_install
# move default NFS modules away
mv /lib/modules/`uname -r`/kernel/fs/nfs ~/nfs_orig_`uname -r`
depmod -a
# unload old modules and load the new ones
rmmod nfsv3 nfs
modprobe nfsv3
```
After these (not much complicated 🙂) manipulations NFS begins to be mounted
with new wsize and rsize by default and it fixes Vitastor-NFS linear write performance.
## Horizontal scaling ## Horizontal scaling
Linux NFS 3.0 client doesn't support built-in scaling or failover, i.e. you can't Linux NFS 3.0 client doesn't support built-in scaling or failover, i.e. you can't

View File

@ -14,9 +14,6 @@
- [upgrade](#upgrade) - [upgrade](#upgrade)
- [defrag](#defrag) - [defrag](#defrag)
⚠️ Важно: для оптимальной производительности Vitastor NFS в Linux при использовании
HDD и EC (erasure кодов) выполните инструкции из раздела [Размер записи Linux NFS](#размер-записи-linux-nfs).
## Псевдо-ФС ## Псевдо-ФС
Упрощённая реализация псевдо-ФС используется для эмуляции файлового доступа к блочным Упрощённая реализация псевдо-ФС используется для эмуляции файлового доступа к блочным
@ -63,7 +60,7 @@ JSON-формате :-). Для инспекции содержимого БД
или по крайней мере на HDD, но без EC), но можно и в том же пуле, что данные или по крайней мере на HDD, но без EC), но можно и в том же пуле, что данные
(размер образа значения не имеет): (размер образа значения не имеет):
`vitastor-cli create -s 10G -p fastpool testfs` `vitastor-cli create -s 10G -p fastpool testfs`
3. Пометьте пул данных как ФС-пул: `vitastor-cli modify-pool --used-for-app fs:testfs data-pool` 3. Пометьте пул данных как ФС-пул: `vitastor-cli modify-pool --used-for-fs testfs data-pool`
4. Либо примонтируйте ФС: `vitastor-nfs mount --fs testfs --pool data-pool /mnt/vita` 4. Либо примонтируйте ФС: `vitastor-nfs mount --fs testfs --pool data-pool /mnt/vita`
5. Либо запустите сетевой NFS-сервер: `vitastor-nfs start --fs testfs --pool data-pool` 5. Либо запустите сетевой NFS-сервер: `vitastor-nfs start --fs testfs --pool data-pool`
@ -107,66 +104,6 @@ JSON-формате :-). Для инспекции содержимого БД
записи. ФС устроена так, что на работу они не влияют, но для порядка и их стоит записи. ФС устроена так, что на работу они не влияют, но для порядка и их стоит
уметь подчищать. уметь подчищать.
## Размер записи Linux NFS
Клиент Linux NFS (модули ядра nfs/nfsv3/nfsv4) имеет фиксированный в коде максимальный
размер запроса ввода-вывода, равный 1 МБ - см. `rsize` и `wsize` в [man 5 nfs](https://linux.die.net/man/5/nfs).
Это означает, что когда вы записываете в файл в примонтированной по NFS файловой системе,
максимальный размер запроса записи составляет 1 МБ, даже в режиме O_DIRECT и даже если
исходный запрос записи был больше.
Однако для оптимальной скорости линейной записи в Vitastor при использовании EC-пулов
(пулов с кодами коррекции ошибок) запросы записи должны быть по размеру кратны
[block_size](../config/layout-cluster.ru.md#block_size), умноженному на число частей
данных пула ([pg_size](../config/pool.ru.md#pg_size)-[parity_chunks](../config/pool.ru.md#parity_chunks)).
Если запросы записи меньше или не кратны, то Vitastor приходится сначала прочитать
с дисков старые версии парных блоков данных, рассчитать новые блоки чётности и только
после этого записать их на диски. Естественно, это в 2-3 раза медленнее простой записи
на диск.
При этом block_size на жёстких дисках по умолчанию устанавливается равным 1 МБ.
Таким образом, если вы используете EC 2+1 и HDD, для оптимальной скорости записи вам
нужно, чтобы NFS-клиент писал по 2 МБ, если EC 4+1 и HDD - то по 4 МБ, и т.п.
А Linux NFS-клиент пишет только по 1 МБ. 😢
Но это можно исправить, пересобрав модули ядра Linux NFS 😉 🤩! Для этого нужно
поменять значение переменной NFS_MAX_FILE_IO_SIZE в заголовочном файле nfs_xdr.h,
после чего пересобрать модули NFS.
Инструкция по пересборке на примере Debian (выполнять под root):
```
# скачиваем заголовки для сборки модулей для текущего ядра Linux
apt-get install linux-headers-`uname -r`
# заменяем в заголовках NFS_MAX_FILE_IO_SIZE на желаемый (здесь 4194304 - 4 МБ)
sed -i 's/NFS_MAX_FILE_IO_SIZE\s*.*/NFS_MAX_FILE_IO_SIZE\t(4194304U)/' /lib/modules/`uname -r`/source/include/linux/nfs_xdr.h
# скачиваем исходный код текущего ядра
mkdir linux_src
cd linux_src
apt-get source linux-image-`uname -r`-unsigned
# собираем модули NFS
cd linux-*/fs/nfs
make -C /lib/modules/`uname -r`/build M=$PWD -j8 modules
make -C /lib/modules/`uname -r`/build M=$PWD modules_install
# убираем в сторону штатные модули NFS
mv /lib/modules/`uname -r`/kernel/fs/nfs ~/nfs_orig_`uname -r`
depmod -a
# выгружаем старые модули и загружаем новые
rmmod nfsv3 nfs
modprobe nfsv3
```
После такой (относительно нехитрой 🙂) манипуляции NFS начинает по умолчанию
монтироваться с новыми wsize и rsize, и производительность линейной записи в Vitastor-NFS
исправляется.
## Горизонтальное масштабирование ## Горизонтальное масштабирование
Клиент Linux NFS 3.0 не поддерживает встроенное масштабирование или отказоустойчивость. Клиент Linux NFS 3.0 не поддерживает встроенное масштабирование или отказоустойчивость.

View File

@ -162,12 +162,10 @@ apt-get install linux-headers-`uname -r`
apt-get build-dep linux-image-`uname -r`-unsigned apt-get build-dep linux-image-`uname -r`-unsigned
apt-get source linux-image-`uname -r`-unsigned apt-get source linux-image-`uname -r`-unsigned
cd linux*/drivers/vdpa cd linux*/drivers/vdpa
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules modules_install
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m modules_install
cat Module.symvers >> /lib/modules/`uname -r`/build/Module.symvers cat Module.symvers >> /lib/modules/`uname -r`/build/Module.symvers
cd ../virtio cd ../virtio
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules modules_install
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m modules_install
depmod -a depmod -a
``` ```

View File

@ -165,12 +165,10 @@ apt-get install linux-headers-`uname -r`
apt-get build-dep linux-image-`uname -r`-unsigned apt-get build-dep linux-image-`uname -r`-unsigned
apt-get source linux-image-`uname -r`-unsigned apt-get source linux-image-`uname -r`-unsigned
cd linux*/drivers/vdpa cd linux*/drivers/vdpa
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules modules_install
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m modules_install
cat Module.symvers >> /lib/modules/`uname -r`/build/Module.symvers cat Module.symvers >> /lib/modules/`uname -r`/build/Module.symvers
cd ../virtio cd ../virtio
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m -j8 modules modules_install
make -C /lib/modules/`uname -r`/build M=$PWD CONFIG_VDPA=m CONFIG_VDPA_USER=m CONFIG_VIRTIO_VDPA=m modules_install
depmod -a depmod -a
``` ```

View File

@ -253,7 +253,7 @@ function random_custom_combinations(osd_tree, rules, count, ordered)
for (let i = 1; i < rules.length; i++) for (let i = 1; i < rules.length; i++)
{ {
const filtered = filter_tree_by_rules(osd_tree, rules[i], selected); const filtered = filter_tree_by_rules(osd_tree, rules[i], selected);
const idx = select_murmur3(filtered.length, i => 'p:'+f.id+':'+(filtered[i].name || filtered[i].id)); const idx = select_murmur3(filtered.length, i => 'p:'+f.id+':'+filtered[i].id);
selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]); selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]);
} }
const size = selected.filter(s => s.id !== null).length; const size = selected.filter(s => s.id !== null).length;
@ -270,7 +270,7 @@ function random_custom_combinations(osd_tree, rules, count, ordered)
for (const item_rules of rules) for (const item_rules of rules)
{ {
const filtered = selected.length ? filter_tree_by_rules(osd_tree, item_rules, selected) : first; const filtered = selected.length ? filter_tree_by_rules(osd_tree, item_rules, selected) : first;
const idx = select_murmur3(filtered.length, i => n+':'+(filtered[i].name || filtered[i].id)); const idx = select_murmur3(filtered.length, i => n+':'+filtered[i].id);
selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]); selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]);
} }
const size = selected.filter(s => s.id !== null).length; const size = selected.filter(s => s.id !== null).length;
@ -340,9 +340,9 @@ function filter_tree_by_rules(osd_tree, rules, selected)
} }
// Convert from // Convert from
// node_list = { id: string|number, name?: string, level: string, size?: number, parent?: string|number }[] // node_list = { id: string|number, level: string, size?: number, parent?: string|number }[]
// to // to
// node_tree = { [node_id]: { id, name?, level, size?, parent?, children?: child_node[], levels: { [level]: id, ... } } } // node_tree = { [node_id]: { id, level, size?, parent?, children?: child_node_id[], levels: { [level]: id, ... } } }
function index_tree(node_list) function index_tree(node_list)
{ {
const tree = { '': { children: [], levels: {} } }; const tree = { '': { children: [], levels: {} } };
@ -357,7 +357,7 @@ function index_tree(node_list)
tree[parent_id].children = tree[parent_id].children || []; tree[parent_id].children = tree[parent_id].children || [];
tree[parent_id].children.push(tree[node.id]); tree[parent_id].children.push(tree[node.id]);
} }
const cur = [ ...tree[''].children ]; const cur = tree[''].children;
for (let i = 0; i < cur.length; i++) for (let i = 0; i < cur.length; i++)
{ {
cur[i].levels[cur[i].level] = cur[i].id; cur[i].levels[cur[i].level] = cur[i].id;

View File

@ -1,244 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
// Extract OSDs from the lowest affected tree level into a separate (flat) map
// to run PG optimisation on failure domains instead of individual OSDs
//
// node_list = same input as for index_tree()
// rules = [ level, operator, value ][][]
// returns { nodes: new_node_list, leaves: { new_folded_node_id: [ extracted_leaf_nodes... ] } }
function fold_failure_domains(node_list, rules)
{
const interest = {};
for (const level_rules of rules)
{
for (const rule of level_rules)
interest[rule[0]] = true;
}
const max_numeric_id = node_list.reduce((a, c) => a < (0|c.id) ? (0|c.id) : a, 0);
let next_id = max_numeric_id;
const node_map = node_list.reduce((a, c) => { a[c.id||''] = c; return a; }, {});
const old_ids_by_new = {};
const extracted_nodes = {};
let folded = true;
while (folded)
{
const per_parent = {};
for (const node_id in node_map)
{
const node = node_map[node_id];
const p = node.parent || '';
per_parent[p] = per_parent[p]||[];
per_parent[p].push(node);
}
folded = false;
for (const node_id in per_parent)
{
const fold_node = node_id !== '' && per_parent[node_id].length > 0 && per_parent[node_id].filter(child => per_parent[child.id||''] || interest[child.level]).length == 0;
if (fold_node)
{
const old_node = node_map[node_id];
const new_id = ++next_id;
node_map[new_id] = {
...old_node,
id: new_id,
name: node_id, // for use in murmur3 hashes
size: per_parent[node_id].reduce((a, c) => a + (Number(c.size)||0), 0),
};
delete node_map[node_id];
old_ids_by_new[new_id] = node_id;
extracted_nodes[new_id] = [];
for (const child of per_parent[node_id])
{
if (old_ids_by_new[child.id])
{
extracted_nodes[new_id].push(...extracted_nodes[child.id]);
delete extracted_nodes[child.id];
}
else
extracted_nodes[new_id].push(child);
delete node_map[child.id];
}
folded = true;
}
}
}
return { nodes: Object.values(node_map), leaves: extracted_nodes };
}
// Distribute PGs mapped to "folded" nodes to individual OSDs according to their weights
// folded_pgs = optimize_result.int_pgs before folding
// prev_pgs = optional previous PGs from optimize_change() input
// extracted_nodes = output from fold_failure_domains
function unfold_failure_domains(folded_pgs, prev_pgs, extracted_nodes)
{
const maps = {};
let found = false;
for (const new_id in extracted_nodes)
{
const weights = {};
for (const sub_node of extracted_nodes[new_id])
{
weights[sub_node.id] = sub_node.size;
}
maps[new_id] = { weights, prev: [], next: [], pos: 0 };
found = true;
}
if (!found)
{
return folded_pgs;
}
for (let i = 0; i < folded_pgs.length; i++)
{
for (let j = 0; j < folded_pgs[i].length; j++)
{
if (maps[folded_pgs[i][j]])
{
maps[folded_pgs[i][j]].prev.push(prev_pgs && prev_pgs[i] && prev_pgs[i][j] || 0);
}
}
}
for (const new_id in maps)
{
maps[new_id].next = adjust_distribution(maps[new_id].weights, maps[new_id].prev);
}
const mapped_pgs = [];
for (let i = 0; i < folded_pgs.length; i++)
{
mapped_pgs.push(folded_pgs[i].map(osd => (maps[osd] ? maps[osd].next[maps[osd].pos++] : osd)));
}
return mapped_pgs;
}
// Return the new array of items re-distributed as close as possible to weights in wanted_weights
// wanted_weights = { [key]: weight }
// cur_items = key[]
function adjust_distribution(wanted_weights, cur_items)
{
const item_map = {};
for (let i = 0; i < cur_items.length; i++)
{
const item = cur_items[i];
item_map[item] = (item_map[item] || { target: 0, cur: [] });
item_map[item].cur.push(i);
}
let total_weight = 0;
for (const item in wanted_weights)
{
total_weight += Number(wanted_weights[item]) || 0;
}
for (const item in wanted_weights)
{
const weight = wanted_weights[item] / total_weight * cur_items.length;
if (weight > 0)
{
item_map[item] = (item_map[item] || { target: 0, cur: [] });
item_map[item].target = weight;
}
}
const diff = (item) => (item_map[item].cur.length - item_map[item].target);
const most_underweighted = Object.keys(item_map)
.filter(item => item_map[item].target > 0)
.sort((a, b) => diff(a) - diff(b));
// Items with zero target weight MUST never be selected - remove them
// and remap each of them to a most underweighted item
for (const item in item_map)
{
if (!item_map[item].target)
{
const prev = item_map[item];
delete item_map[item];
for (const idx of prev.cur)
{
const move_to = most_underweighted[0];
item_map[move_to].cur.push(idx);
move_leftmost(most_underweighted, diff);
}
}
}
// Other over-weighted items are only moved if it improves the distribution
while (most_underweighted.length > 1)
{
const first = most_underweighted[0];
const last = most_underweighted[most_underweighted.length-1];
const first_diff = diff(first);
const last_diff = diff(last);
if (Math.abs(first_diff+1)+Math.abs(last_diff-1) < Math.abs(first_diff)+Math.abs(last_diff))
{
item_map[first].cur.push(item_map[last].cur.pop());
move_leftmost(most_underweighted, diff);
move_rightmost(most_underweighted, diff);
}
else
{
break;
}
}
const new_items = new Array(cur_items.length);
for (const item in item_map)
{
for (const idx of item_map[item].cur)
{
new_items[idx] = item;
}
}
return new_items;
}
function move_leftmost(sorted_array, diff)
{
// Re-sort by moving the leftmost item to the right if it changes position
const first = sorted_array[0];
const new_diff = diff(first);
let r = 0;
while (r < sorted_array.length-1 && diff(sorted_array[r+1]) <= new_diff)
r++;
if (r > 0)
{
for (let i = 0; i < r; i++)
sorted_array[i] = sorted_array[i+1];
sorted_array[r] = first;
}
}
function move_rightmost(sorted_array, diff)
{
// Re-sort by moving the rightmost item to the left if it changes position
const last = sorted_array[sorted_array.length-1];
const new_diff = diff(last);
let r = sorted_array.length-1;
while (r > 0 && diff(sorted_array[r-1]) > new_diff)
r--;
if (r < sorted_array.length-1)
{
for (let i = sorted_array.length-1; i > r; i--)
sorted_array[i] = sorted_array[i-1];
sorted_array[r] = last;
}
}
// map previous PGs to folded nodes
function fold_prev_pgs(pgs, extracted_nodes)
{
const unmap = {};
for (const new_id in extracted_nodes)
{
for (const sub_node of extracted_nodes[new_id])
{
unmap[sub_node.id] = new_id;
}
}
const mapped_pgs = [];
for (let i = 0; i < pgs.length; i++)
{
mapped_pgs.push(pgs[i].map(osd => (unmap[osd] || osd)));
}
return mapped_pgs;
}
module.exports = {
fold_failure_domains,
unfold_failure_domains,
adjust_distribution,
fold_prev_pgs,
};

View File

@ -98,7 +98,6 @@ async function optimize_initial({ osd_weights, combinator, pg_count, pg_size = 3
score: lp_result.score, score: lp_result.score,
weights: lp_result.vars, weights: lp_result.vars,
int_pgs, int_pgs,
pg_effsize,
space: eff * pg_effsize, space: eff * pg_effsize,
total_space: total_weight, total_space: total_weight,
}; };
@ -410,7 +409,6 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_weights, combinator
int_pgs: new_pgs, int_pgs: new_pgs,
differs, differs,
osd_differs, osd_differs,
pg_effsize,
space: pg_effsize * pg_list_space_efficiency(new_pgs, osd_weights, pg_minsize, parity_space), space: pg_effsize * pg_list_space_efficiency(new_pgs, osd_weights, pg_minsize, parity_space),
total_space: total_weight, total_space: total_weight,
}; };

View File

@ -1,108 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const assert = require('assert');
const { fold_failure_domains, unfold_failure_domains, adjust_distribution } = require('./fold.js');
const DSL = require('./dsl_pgs.js');
const LPOptimizer = require('./lp_optimizer.js');
const stableStringify = require('../stable-stringify.js');
async function run()
{
// Test run adjust_distribution
console.log('adjust_distribution');
const rand = [];
for (let i = 0; i < 100; i++)
{
rand.push(1 + Math.floor(10*Math.random()));
// or rand.push(0);
}
const adj = adjust_distribution({ 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1 }, rand);
//console.log(rand.join(' '));
console.log(rand.reduce((a, c) => { a[c] = (a[c]||0)+1; return a; }, {}));
//console.log(adj.join(' '));
console.log(adj.reduce((a, c) => { a[c] = (a[c]||0)+1; return a; }, {}));
console.log('Movement: '+rand.reduce((a, c, i) => a+(rand[i] != adj[i] ? 1 : 0), 0)+'/'+rand.length);
console.log('\nfold_failure_domains');
console.log(JSON.stringify(fold_failure_domains(
[
{ id: 1, level: 'osd', size: 1, parent: 'disk1' },
{ id: 2, level: 'osd', size: 2, parent: 'disk1' },
{ id: 'disk1', level: 'disk', parent: 'host1' },
{ id: 'host1', level: 'host', parent: 'dc1' },
{ id: 'dc1', level: 'dc' },
],
[ [ [ 'dc' ], [ 'host' ] ] ]
), 0, 2));
console.log('\nfold_failure_domains empty rules');
console.log(JSON.stringify(fold_failure_domains(
[
{ id: 1, level: 'osd', size: 1, parent: 'disk1' },
{ id: 2, level: 'osd', size: 2, parent: 'disk1' },
{ id: 'disk1', level: 'disk', parent: 'host1' },
{ id: 'host1', level: 'host', parent: 'dc1' },
{ id: 'dc1', level: 'dc' },
],
[]
), 0, 2));
console.log('\noptimize_folded');
// 5 DCs, 2 hosts per DC, 10 OSD per host
const nodes = [];
for (let i = 1; i <= 100; i++)
{
nodes.push({ id: i, level: 'osd', size: 1, parent: 'host'+(1+(0|((i-1)/10))) });
}
for (let i = 1; i <= 10; i++)
{
nodes.push({ id: 'host'+i, level: 'host', parent: 'dc'+(1+(0|((i-1)/2))) });
}
for (let i = 1; i <= 5; i++)
{
nodes.push({ id: 'dc'+i, level: 'dc' });
}
// Check rules
const rules = DSL.parse_level_indexes({ dc: '112233', host: '123456' }, [ 'dc', 'host', 'osd' ]);
assert.deepEqual(rules, [[],[["dc","=",1],["host","!=",[1]]],[["dc","!=",[1]]],[["dc","=",3],["host","!=",[3]]],[["dc","!=",[1,3]]],[["dc","=",5],["host","!=",[5]]]]);
// Check tree folding
const { nodes: folded_nodes, leaves: folded_leaves } = fold_failure_domains(nodes, rules);
const expected_folded = [];
const expected_leaves = {};
for (let i = 1; i <= 10; i++)
{
expected_folded.push({ id: 100+i, name: 'host'+i, level: 'host', size: 10, parent: 'dc'+(1+(0|((i-1)/2))) });
expected_leaves[100+i] = [ ...new Array(10).keys() ].map(k => ({ id: 10*(i-1)+k+1, level: 'osd', size: 1, parent: 'host'+i }));
}
for (let i = 1; i <= 5; i++)
{
expected_folded.push({ id: 'dc'+i, level: 'dc' });
}
assert.equal(stableStringify(folded_nodes), stableStringify(expected_folded));
assert.equal(stableStringify(folded_leaves), stableStringify(expected_leaves));
// Now optimise it
console.log('1000 PGs, EC 112233');
const leaf_weights = folded_nodes.reduce((a, c) => { if (Number(c.id)) { a[c.id] = c.size; } return a; }, {});
let res = await LPOptimizer.optimize_initial({
osd_weights: leaf_weights,
combinator: new DSL.RuleCombinator(folded_nodes, rules, 10000, false),
pg_size: 6,
pg_count: 1000,
ordered: false,
});
LPOptimizer.print_change_stats(res, false);
assert.equal(res.space, 100, 'Initial distribution');
const unfolded_res = { ...res };
unfolded_res.int_pgs = unfold_failure_domains(res.int_pgs, null, folded_leaves);
const osd_weights = nodes.reduce((a, c) => { if (Number(c.id)) { a[c.id] = c.size; } return a; }, {});
unfolded_res.space = unfolded_res.pg_effsize * LPOptimizer.pg_list_space_efficiency(unfolded_res.int_pgs, osd_weights, 0, 1);
LPOptimizer.print_change_stats(unfolded_res, false);
assert.equal(res.space, 100, 'Initial distribution');
}
run().catch(console.error);

View File

@ -15,7 +15,7 @@ function get_osd_tree(global_config, state)
const stat = state.osd.stats[osd_num]; const stat = state.osd.stats[osd_num];
const osd_cfg = state.config.osd[osd_num]; const osd_cfg = state.config.osd[osd_num];
let reweight = osd_cfg == null ? 1 : Number(osd_cfg.reweight); let reweight = osd_cfg == null ? 1 : Number(osd_cfg.reweight);
if (isNaN(reweight) || reweight < 0 || reweight > 0) if (reweight < 0 || isNaN(reweight))
reweight = 1; reweight = 1;
if (stat && stat.size && reweight && (state.osd.state[osd_num] || Number(stat.time) >= down_time || if (stat && stat.size && reweight && (state.osd.state[osd_num] || Number(stat.time) >= down_time ||
osd_cfg && osd_cfg.noout)) osd_cfg && osd_cfg.noout))
@ -87,7 +87,7 @@ function make_hier_tree(global_config, tree)
tree[''] = { children: [] }; tree[''] = { children: [] };
for (const node_id in tree) for (const node_id in tree)
{ {
if (node_id === '' || !(tree[node_id].children||[]).length && (tree[node_id].size||0) <= 0) if (node_id === '' || tree[node_id].level === 'osd' && (!tree[node_id].size || tree[node_id].size <= 0))
{ {
continue; continue;
} }
@ -107,10 +107,10 @@ function make_hier_tree(global_config, tree)
deleted = 0; deleted = 0;
for (const node_id in tree) for (const node_id in tree)
{ {
if (!(tree[node_id].children||[]).length && (tree[node_id].size||0) <= 0) if (tree[node_id].level !== 'osd' && (!tree[node_id].children || !tree[node_id].children.length))
{ {
const parent = tree[node_id].parent; const parent = tree[node_id].parent;
if (parent && tree[parent]) if (parent)
{ {
tree[parent].children = tree[parent].children.filter(c => c != tree[node_id]); tree[parent].children = tree[parent].children.filter(c => c != tree[node_id]);
} }

View File

@ -1,6 +1,6 @@
{ {
"name": "vitastor-mon", "name": "vitastor-mon",
"version": "2.1.0", "version": "1.11.0",
"description": "Vitastor SDS monitor service", "description": "Vitastor SDS monitor service",
"main": "mon-main.js", "main": "mon-main.js",
"scripts": { "scripts": {
@ -19,6 +19,6 @@
"eslint-plugin-node": "^11.1.0" "eslint-plugin-node": "^11.1.0"
}, },
"engines": { "engines": {
"node": ">=12.1.0" "node": ">=12.0.0"
} }
} }

View File

@ -3,7 +3,6 @@
const { RuleCombinator } = require('./lp_optimizer/dsl_pgs.js'); const { RuleCombinator } = require('./lp_optimizer/dsl_pgs.js');
const { SimpleCombinator, flatten_tree } = require('./lp_optimizer/simple_pgs.js'); const { SimpleCombinator, flatten_tree } = require('./lp_optimizer/simple_pgs.js');
const { fold_failure_domains, unfold_failure_domains, fold_prev_pgs } = require('./lp_optimizer/fold.js');
const { validate_pool_cfg, get_pg_rules } = require('./pool_config.js'); const { validate_pool_cfg, get_pg_rules } = require('./pool_config.js');
const LPOptimizer = require('./lp_optimizer/lp_optimizer.js'); const LPOptimizer = require('./lp_optimizer/lp_optimizer.js');
const { scale_pg_count } = require('./pg_utils.js'); const { scale_pg_count } = require('./pg_utils.js');
@ -161,6 +160,7 @@ async function generate_pool_pgs(state, global_config, pool_id, osd_tree, levels
pool_cfg.bitmap_granularity || global_config.bitmap_granularity || 4096, pool_cfg.bitmap_granularity || global_config.bitmap_granularity || 4096,
pool_cfg.immediate_commit || global_config.immediate_commit || 'all' pool_cfg.immediate_commit || global_config.immediate_commit || 'all'
); );
pool_tree = make_hier_tree(global_config, pool_tree);
// First try last_clean_pgs to minimize data movement // First try last_clean_pgs to minimize data movement
let prev_pgs = []; let prev_pgs = [];
for (const pg in ((state.history.last_clean_pgs.items||{})[pool_id]||{})) for (const pg in ((state.history.last_clean_pgs.items||{})[pool_id]||{}))
@ -175,19 +175,14 @@ async function generate_pool_pgs(state, global_config, pool_id, osd_tree, levels
prev_pgs[pg-1] = [ ...state.pg.config.items[pool_id][pg].osd_set ]; prev_pgs[pg-1] = [ ...state.pg.config.items[pool_id][pg].osd_set ];
} }
} }
const use_rules = !global_config.use_old_pg_combinator || pool_cfg.level_placement || pool_cfg.raw_placement;
const rules = use_rules ? get_pg_rules(pool_id, pool_cfg, global_config.placement_levels) : null;
const folded = fold_failure_domains(Object.values(pool_tree), use_rules ? rules : [ [ [ pool_cfg.failure_domain ] ] ]);
// FIXME: Remove/merge make_hier_tree() step somewhere, however it's needed to remove empty nodes
const folded_tree = make_hier_tree(global_config, folded.nodes);
const old_pg_count = prev_pgs.length; const old_pg_count = prev_pgs.length;
const optimize_cfg = { const optimize_cfg = {
osd_weights: folded.nodes.reduce((a, c) => { if (Number(c.id)) { a[c.id] = c.size; } return a; }, {}), osd_weights: Object.values(pool_tree).filter(item => item.level === 'osd').reduce((a, c) => { a[c.id] = c.size; return a; }, {}),
combinator: use_rules combinator: !global_config.use_old_pg_combinator || pool_cfg.level_placement || pool_cfg.raw_placement
// new algorithm: // new algorithm:
? new RuleCombinator(folded_tree, rules, pool_cfg.max_osd_combinations) ? new RuleCombinator(pool_tree, get_pg_rules(pool_id, pool_cfg, global_config.placement_levels), pool_cfg.max_osd_combinations)
// old algorithm: // old algorithm:
: new SimpleCombinator(flatten_tree(folded_tree[''].children, levels, pool_cfg.failure_domain, 'osd'), pool_cfg.pg_size, pool_cfg.max_osd_combinations), : new SimpleCombinator(flatten_tree(pool_tree[''].children, levels, pool_cfg.failure_domain, 'osd'), pool_cfg.pg_size, pool_cfg.max_osd_combinations),
pg_count: pool_cfg.pg_count, pg_count: pool_cfg.pg_count,
pg_size: pool_cfg.pg_size, pg_size: pool_cfg.pg_size,
pg_minsize: pool_cfg.pg_minsize, pg_minsize: pool_cfg.pg_minsize,
@ -207,11 +202,12 @@ async function generate_pool_pgs(state, global_config, pool_id, osd_tree, levels
for (const pg of prev_pgs) for (const pg of prev_pgs)
{ {
while (pg.length < pool_cfg.pg_size) while (pg.length < pool_cfg.pg_size)
{
pg.push(0); pg.push(0);
}
} }
const folded_prev_pgs = fold_prev_pgs(prev_pgs, folded.leaves);
optimize_result = await LPOptimizer.optimize_change({ optimize_result = await LPOptimizer.optimize_change({
prev_pgs: folded_prev_pgs, prev_pgs,
...optimize_cfg, ...optimize_cfg,
}); });
} }
@ -219,10 +215,6 @@ async function generate_pool_pgs(state, global_config, pool_id, osd_tree, levels
{ {
optimize_result = await LPOptimizer.optimize_initial(optimize_cfg); optimize_result = await LPOptimizer.optimize_initial(optimize_cfg);
} }
optimize_result.int_pgs = unfold_failure_domains(optimize_result.int_pgs, prev_pgs, folded.leaves);
const osd_weights = Object.values(pool_tree).reduce((a, c) => { if (c.level === 'osd') { a[c.id] = c.size; } return a; }, {});
optimize_result.space = optimize_result.pg_effsize * LPOptimizer.pg_list_space_efficiency(optimize_result.int_pgs,
osd_weights, optimize_cfg.pg_minsize, 1);
console.log(`Pool ${pool_id} (${pool_cfg.name || 'unnamed'}):`); console.log(`Pool ${pool_id} (${pool_cfg.name || 'unnamed'}):`);
LPOptimizer.print_change_stats(optimize_result); LPOptimizer.print_change_stats(optimize_result);
let pg_effsize = pool_cfg.pg_size; let pg_effsize = pool_cfg.pg_size;

View File

@ -40,11 +40,6 @@ async function run()
console.log("/etc/systemd/system/vitastor-etcd.service already exists"); console.log("/etc/systemd/system/vitastor-etcd.service already exists");
process.exit(1); process.exit(1);
} }
if (!in_docker && fs.existsSync("/etc/systemd/system/etcd.service"))
{
console.log("/etc/systemd/system/etcd.service already exists");
process.exit(1);
}
const config = JSON.parse(fs.readFileSync(config_path, { encoding: 'utf-8' })); const config = JSON.parse(fs.readFileSync(config_path, { encoding: 'utf-8' }));
if (!config.etcd_address) if (!config.etcd_address)
{ {
@ -71,7 +66,7 @@ async function run()
console.log('etcd for Vitastor configured. Run `systemctl enable --now vitastor-etcd` to start etcd'); console.log('etcd for Vitastor configured. Run `systemctl enable --now vitastor-etcd` to start etcd');
process.exit(0); process.exit(0);
} }
await system(`mkdir -p /var/lib/etcd/vitastor`); await system(`mkdir -p /var/lib/etcd`);
fs.writeFileSync( fs.writeFileSync(
"/etc/systemd/system/vitastor-etcd.service", "/etc/systemd/system/vitastor-etcd.service",
`[Unit] `[Unit]
@ -82,14 +77,14 @@ Wants=network-online.target local-fs.target time-sync.target
[Service] [Service]
Restart=always Restart=always
Environment=GOGC=50 Environment=GOGC=50
ExecStart=etcd --name ${etcd_name} --data-dir /var/lib/etcd/vitastor \\ ExecStart=etcd --name ${etcd_name} --data-dir /var/lib/etcd \\
--snapshot-count 10000 --advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\ --snapshot-count 10000 --advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
--initial-advertise-peer-urls http://${etcds[num]}:2380 --listen-peer-urls http://${etcds[num]}:2380 \\ --initial-advertise-peer-urls http://${etcds[num]}:2380 --listen-peer-urls http://${etcds[num]}:2380 \\
--initial-cluster-token vitastor-etcd-1 --initial-cluster ${etcd_cluster} \\ --initial-cluster-token vitastor-etcd-1 --initial-cluster ${etcd_cluster} \\
--initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\ --initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\
--auto-compaction-retention=10 --auto-compaction-mode=revision --auto-compaction-retention=10 --auto-compaction-mode=revision
WorkingDirectory=/var/lib/etcd/vitastor WorkingDirectory=/var/lib/etcd
ExecStartPre=+chown -R etcd /var/lib/etcd/vitastor ExecStartPre=+chown -R etcd /var/lib/etcd
User=etcd User=etcd
PrivateTmp=false PrivateTmp=false
TasksMax=infinity TasksMax=infinity
@ -102,9 +97,8 @@ WantedBy=multi-user.target
`); `);
await system(`useradd etcd`); await system(`useradd etcd`);
await system(`systemctl daemon-reload`); await system(`systemctl daemon-reload`);
// Disable distribution etcd unit and enable our one await system(`systemctl enable etcd`);
await system(`systemctl disable --now etcd`); await system(`systemctl start etcd`);
await system(`systemctl enable --now vitastor-etcd`);
process.exit(0); process.exit(0);
} }

View File

@ -87,25 +87,11 @@ function sum_op_stats(all_osd, prev_stats)
for (const k in derived[type][op]) for (const k in derived[type][op])
{ {
sum_diff[type][op] = sum_diff[type][op] || {}; sum_diff[type][op] = sum_diff[type][op] || {};
if (k == 'lat') sum_diff[type][op][k] = (sum_diff[type][op][k] || 0n) + derived[type][op][k];
sum_diff[type][op].lat = (sum_diff[type][op].lat || 0n) + derived[type][op].lat*derived[type][op].iops;
else
sum_diff[type][op][k] = (sum_diff[type][op][k] || 0n) + derived[type][op][k];
} }
} }
} }
} }
// Calculate average (weighted by iops) op latency across all OSDs
for (const type in sum_diff)
{
for (const op in sum_diff[type])
{
if (sum_diff[type][op].lat)
{
sum_diff[type][op].lat /= sum_diff[type][op].iops;
}
}
}
return sum_diff; return sum_diff;
} }
@ -285,7 +271,8 @@ function sum_inode_stats(state, prev_stats)
const op_st = inode_stats[pool_id][inode_num][op]; const op_st = inode_stats[pool_id][inode_num][op];
op_st.bps += op_diff.bps; op_st.bps += op_diff.bps;
op_st.iops += op_diff.iops; op_st.iops += op_diff.iops;
op_st.lat += op_diff.lat*op_diff.iops; op_st.lat += op_diff.lat;
op_st.n_osd = (op_st.n_osd || 0) + 1;
} }
} }
} }
@ -298,8 +285,11 @@ function sum_inode_stats(state, prev_stats)
for (const op of [ 'read', 'write', 'delete' ]) for (const op of [ 'read', 'write', 'delete' ])
{ {
const op_st = inode_stats[pool_id][inode_num][op]; const op_st = inode_stats[pool_id][inode_num][op];
if (op_st.lat) if (op_st.n_osd)
op_st.lat /= op_st.iops; {
op_st.lat /= BigInt(op_st.n_osd);
delete op_st.n_osd;
}
if (op_st.bps > 0 || op_st.iops > 0) if (op_st.bps > 0 || op_st.iops > 0)
nonzero = true; nonzero = true;
} }

View File

@ -19,16 +19,11 @@
class NodeVitastorRequest: public Nan::AsyncResource class NodeVitastorRequest: public Nan::AsyncResource
{ {
public: public:
NodeVitastorRequest(NodeVitastor *cli, const v8::Local<v8::Function> & cb): Nan::AsyncResource("NodeVitastorRequest") NodeVitastorRequest(NodeVitastor *cli, v8::Local<v8::Function> cb): Nan::AsyncResource("NodeVitastorRequest")
{ {
this->cli = cli; this->cli = cli;
callback.Reset(cb); callback.Reset(cb);
} }
~NodeVitastorRequest()
{
callback.Reset();
buffer_ref.Reset();
}
iovec iov; iovec iov;
std::vector<iovec> iov_list; std::vector<iovec> iov_list;
@ -38,7 +33,6 @@ public:
uint64_t offset = 0, len = 0, version = 0; uint64_t offset = 0, len = 0, version = 0;
bool with_parents = false; bool with_parents = false;
Nan::Persistent<v8::Function> callback; Nan::Persistent<v8::Function> callback;
Nan::Persistent<v8::Value> buffer_ref;
}; };
static uint64_t get_ui64(const v8::Local<v8::Value> & val) static uint64_t get_ui64(const v8::Local<v8::Value> & val)
@ -89,6 +83,7 @@ NAN_METHOD(NodeVitastor::Create)
delete[] c_cfg; delete[] c_cfg;
if (!cli->c) if (!cli->c)
{ {
ERRORF("NodeVitastor: failed to initialize io_uring (old kernel or insufficient ulimit -l?)");
Nan::ThrowError("failed to initialize io_uring (old kernel or insufficient ulimit -l?)"); Nan::ThrowError("failed to initialize io_uring (old kernel or insufficient ulimit -l?)");
return; return;
} }
@ -135,8 +130,8 @@ NodeVitastorRequest* NodeVitastor::get_read_request(const Nan::FunctionCallbackI
Nan::ThrowError("failed to allocate memory"); Nan::ThrowError("failed to allocate memory");
return NULL; return NULL;
} }
v8::Local<v8::Function> callback = info[argpos+2].As<v8::Function>();
auto req = new NodeVitastorRequest(this, info[argpos+2].As<v8::Function>()); auto req = new NodeVitastorRequest(this, callback);
req->offset = offset; req->offset = offset;
req->len = len; req->len = len;
@ -161,9 +156,6 @@ NAN_METHOD(NodeVitastor::Read)
self->Ref(); self->Ref();
vitastor_c_read(self->c, ((pool << (64-POOL_ID_BITS)) | inode), req->offset, req->len, &req->iov, 1, on_read_finish, req); vitastor_c_read(self->c, ((pool << (64-POOL_ID_BITS)) | inode), req->offset, req->len, &req->iov, 1, on_read_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
NodeVitastorRequest* NodeVitastor::get_write_request(const Nan::FunctionCallbackInfo<v8::Value> & info, int argpos) NodeVitastorRequest* NodeVitastor::get_write_request(const Nan::FunctionCallbackInfo<v8::Value> & info, int argpos)
@ -183,11 +175,11 @@ NodeVitastorRequest* NodeVitastor::get_write_request(const Nan::FunctionCallback
argpos++; argpos++;
} }
auto req = new NodeVitastorRequest(this, info[argpos+2].As<v8::Function>()); v8::Local<v8::Function> callback = info[argpos+2].As<v8::Function>();
auto req = new NodeVitastorRequest(this, callback);
req->offset = offset; req->offset = offset;
req->version = version; req->version = version;
req->buffer_ref.Reset(bufarg);
if (bufarg->IsArray()) if (bufarg->IsArray())
{ {
@ -232,9 +224,6 @@ NAN_METHOD(NodeVitastor::Write)
req->iov_list.size() ? req->iov_list.data() : &req->iov, req->iov_list.size() ? req->iov_list.data() : &req->iov,
req->iov_list.size() ? req->iov_list.size() : 1, req->iov_list.size() ? req->iov_list.size() : 1,
on_write_finish, req); on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
NodeVitastorRequest* NodeVitastor::get_delete_request(const Nan::FunctionCallbackInfo<v8::Value> & info, int argpos) NodeVitastorRequest* NodeVitastor::get_delete_request(const Nan::FunctionCallbackInfo<v8::Value> & info, int argpos)
@ -254,7 +243,8 @@ NodeVitastorRequest* NodeVitastor::get_delete_request(const Nan::FunctionCallbac
argpos++; argpos++;
} }
auto req = new NodeVitastorRequest(this, info[argpos+2].As<v8::Function>()); v8::Local<v8::Function> callback = info[argpos+2].As<v8::Function>();
auto req = new NodeVitastorRequest(this, callback);
req->offset = offset; req->offset = offset;
req->len = len; req->len = len;
@ -280,9 +270,6 @@ NAN_METHOD(NodeVitastor::Delete)
self->Ref(); self->Ref();
vitastor_c_delete(self->c, ((pool << (64-POOL_ID_BITS)) | inode), req->offset, req->len, req->version, vitastor_c_delete(self->c, ((pool << (64-POOL_ID_BITS)) | inode), req->offset, req->len, req->version,
on_write_finish, req); on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
// sync(callback(err)) // sync(callback(err))
@ -294,13 +281,11 @@ NAN_METHOD(NodeVitastor::Sync)
NodeVitastor* self = Nan::ObjectWrap::Unwrap<NodeVitastor>(info.This()); NodeVitastor* self = Nan::ObjectWrap::Unwrap<NodeVitastor>(info.This());
auto req = new NodeVitastorRequest(self, info[0].As<v8::Function>()); v8::Local<v8::Function> callback = info[0].As<v8::Function>();
auto req = new NodeVitastorRequest(self, callback);
self->Ref(); self->Ref();
vitastor_c_sync(self->c, on_write_finish, req); vitastor_c_sync(self->c, on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
// read_bitmap(pool, inode, offset, length, with_parents, callback(err, bitmap_buffer)) // read_bitmap(pool, inode, offset, length, with_parents, callback(err, bitmap_buffer))
@ -317,13 +302,11 @@ NAN_METHOD(NodeVitastor::ReadBitmap)
uint64_t offset = get_ui64(info[2]); uint64_t offset = get_ui64(info[2]);
uint64_t len = get_ui64(info[3]); uint64_t len = get_ui64(info[3]);
bool with_parents = Nan::To<bool>(info[4]).FromJust(); bool with_parents = Nan::To<bool>(info[4]).FromJust();
v8::Local<v8::Function> callback = info[5].As<v8::Function>();
auto req = new NodeVitastorRequest(self, info[5].As<v8::Function>()); auto req = new NodeVitastorRequest(self, callback);
self->Ref(); self->Ref();
vitastor_c_read_bitmap(self->c, ((pool << (64-POOL_ID_BITS)) | inode), offset, len, with_parents, on_read_bitmap_finish, req); vitastor_c_read_bitmap(self->c, ((pool << (64-POOL_ID_BITS)) | inode), offset, len, with_parents, on_read_bitmap_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
static void on_error(NodeVitastorRequest *req, Nan::Callback & nanCallback, long retval) static void on_error(NodeVitastorRequest *req, Nan::Callback & nanCallback, long retval)
@ -344,12 +327,10 @@ NAN_METHOD(NodeVitastor::OnReady)
if (info.Length() < 1) if (info.Length() < 1)
Nan::ThrowError("Not enough arguments to on_ready(callback(err))"); Nan::ThrowError("Not enough arguments to on_ready(callback(err))");
NodeVitastor* self = Nan::ObjectWrap::Unwrap<NodeVitastor>(info.This()); NodeVitastor* self = Nan::ObjectWrap::Unwrap<NodeVitastor>(info.This());
auto req = new NodeVitastorRequest(self, info[0].As<v8::Function>()); v8::Local<v8::Function> callback = info[0].As<v8::Function>();
auto req = new NodeVitastorRequest(self, callback);
self->Ref(); self->Ref();
vitastor_c_on_ready(self->c, on_ready_finish, req); vitastor_c_on_ready(self->c, on_ready_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(self->c);
#endif
} }
void NodeVitastor::on_ready_finish(void *opaque, long retval) void NodeVitastor::on_ready_finish(void *opaque, long retval)
@ -494,9 +475,6 @@ NAN_METHOD(NodeVitastorImage::Create)
img->Ref(); img->Ref();
cli->Ref(); cli->Ref();
vitastor_c_watch_inode(cli->c, (char*)img->name.c_str(), on_watch_start, img); vitastor_c_watch_inode(cli->c, (char*)img->name.c_str(), on_watch_start, img);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
info.GetReturnValue().Set(info.This()); info.GetReturnValue().Set(info.This());
} }
@ -568,7 +546,8 @@ NAN_METHOD(NodeVitastorImage::Sync)
NodeVitastorImage* img = Nan::ObjectWrap::Unwrap<NodeVitastorImage>(info.This()); NodeVitastorImage* img = Nan::ObjectWrap::Unwrap<NodeVitastorImage>(info.This());
auto req = new NodeVitastorRequest(img->cli, info[0].As<v8::Function>()); v8::Local<v8::Function> callback = info[0].As<v8::Function>();
auto req = new NodeVitastorRequest(img->cli, callback);
req->img = img; req->img = img;
req->op = NODE_VITASTOR_SYNC; req->op = NODE_VITASTOR_SYNC;
@ -587,8 +566,9 @@ NAN_METHOD(NodeVitastorImage::ReadBitmap)
uint64_t offset = get_ui64(info[0]); uint64_t offset = get_ui64(info[0]);
uint64_t len = get_ui64(info[1]); uint64_t len = get_ui64(info[1]);
bool with_parents = Nan::To<bool>(info[2]).FromJust(); bool with_parents = Nan::To<bool>(info[2]).FromJust();
v8::Local<v8::Function> callback = info[3].As<v8::Function>();
auto req = new NodeVitastorRequest(img->cli, info[3].As<v8::Function>()); auto req = new NodeVitastorRequest(img->cli, callback);
req->img = img; req->img = img;
req->op = NODE_VITASTOR_READ_BITMAP; req->op = NODE_VITASTOR_READ_BITMAP;
req->offset = offset; req->offset = offset;
@ -607,7 +587,8 @@ NAN_METHOD(NodeVitastorImage::GetInfo)
NodeVitastorImage* img = Nan::ObjectWrap::Unwrap<NodeVitastorImage>(info.This()); NodeVitastorImage* img = Nan::ObjectWrap::Unwrap<NodeVitastorImage>(info.This());
auto req = new NodeVitastorRequest(img->cli, info[0].As<v8::Function>()); v8::Local<v8::Function> callback = info[0].As<v8::Function>();
auto req = new NodeVitastorRequest(img->cli, callback);
req->img = img; req->img = img;
req->op = NODE_VITASTOR_GET_INFO; req->op = NODE_VITASTOR_GET_INFO;
@ -634,9 +615,6 @@ void NodeVitastorImage::exec_request(NodeVitastorRequest *req)
uint64_t ino = vitastor_c_inode_get_num(watch); uint64_t ino = vitastor_c_inode_get_num(watch);
cli->Ref(); cli->Ref();
vitastor_c_read(cli->c, ino, req->offset, req->len, &req->iov, 1, NodeVitastor::on_read_finish, req); vitastor_c_read(cli->c, ino, req->offset, req->len, &req->iov, 1, NodeVitastor::on_read_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
} }
else if (req->op == NODE_VITASTOR_WRITE) else if (req->op == NODE_VITASTOR_WRITE)
{ {
@ -646,9 +624,6 @@ void NodeVitastorImage::exec_request(NodeVitastorRequest *req)
req->iov_list.size() ? req->iov_list.data() : &req->iov, req->iov_list.size() ? req->iov_list.data() : &req->iov,
req->iov_list.size() ? req->iov_list.size() : 1, req->iov_list.size() ? req->iov_list.size() : 1,
NodeVitastor::on_write_finish, req); NodeVitastor::on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
} }
else if (req->op == NODE_VITASTOR_DELETE) else if (req->op == NODE_VITASTOR_DELETE)
{ {
@ -656,9 +631,6 @@ void NodeVitastorImage::exec_request(NodeVitastorRequest *req)
cli->Ref(); cli->Ref();
vitastor_c_delete(cli->c, ino, req->offset, req->len, req->version, vitastor_c_delete(cli->c, ino, req->offset, req->len, req->version,
NodeVitastor::on_write_finish, req); NodeVitastor::on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
} }
else if (req->op == NODE_VITASTOR_SYNC) else if (req->op == NODE_VITASTOR_SYNC)
{ {
@ -668,9 +640,6 @@ void NodeVitastorImage::exec_request(NodeVitastorRequest *req)
if (imm != IMMEDIATE_ALL) if (imm != IMMEDIATE_ALL)
{ {
vitastor_c_sync(cli->c, NodeVitastor::on_write_finish, req); vitastor_c_sync(cli->c, NodeVitastor::on_write_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
} }
else else
{ {
@ -682,9 +651,6 @@ void NodeVitastorImage::exec_request(NodeVitastorRequest *req)
uint64_t ino = vitastor_c_inode_get_num(watch); uint64_t ino = vitastor_c_inode_get_num(watch);
cli->Ref(); cli->Ref();
vitastor_c_read_bitmap(cli->c, ino, req->offset, req->len, req->with_parents, NodeVitastor::on_read_bitmap_finish, req); vitastor_c_read_bitmap(cli->c, ino, req->offset, req->len, req->with_parents, NodeVitastor::on_read_bitmap_finish, req);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(cli->c);
#endif
} }
else if (req->op == NODE_VITASTOR_GET_INFO) else if (req->op == NODE_VITASTOR_GET_INFO)
{ {
@ -802,7 +768,8 @@ NAN_METHOD(NodeVitastorKV::Open)
cfg[std::string(*Nan::Utf8String(key))] = std::string(*Nan::Utf8String(Nan::Get(jsParams, key).ToLocalChecked())); cfg[std::string(*Nan::Utf8String(key))] = std::string(*Nan::Utf8String(Nan::Get(jsParams, key).ToLocalChecked()));
} }
auto req = new NodeVitastorRequest(kv->cli, info[3].As<v8::Function>()); v8::Local<v8::Function> callback = info[3].As<v8::Function>();
auto req = new NodeVitastorRequest(kv->cli, callback);
kv->Ref(); kv->Ref();
kv->dbw->open(inode_id, cfg, [kv, req](int res) kv->dbw->open(inode_id, cfg, [kv, req](int res)
@ -815,9 +782,6 @@ NAN_METHOD(NodeVitastorKV::Open)
delete req; delete req;
kv->Unref(); kv->Unref();
}); });
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(kv->cli->c);
#endif
} }
// close(callback(err)) // close(callback(err))
@ -829,7 +793,8 @@ NAN_METHOD(NodeVitastorKV::Close)
NodeVitastorKV* kv = Nan::ObjectWrap::Unwrap<NodeVitastorKV>(info.This()); NodeVitastorKV* kv = Nan::ObjectWrap::Unwrap<NodeVitastorKV>(info.This());
auto req = new NodeVitastorRequest(kv->cli, info[0].As<v8::Function>()); v8::Local<v8::Function> callback = info[0].As<v8::Function>();
auto req = new NodeVitastorRequest(kv->cli, callback);
kv->Ref(); kv->Ref();
kv->dbw->close([kv, req]() kv->dbw->close([kv, req]()
@ -840,9 +805,6 @@ NAN_METHOD(NodeVitastorKV::Close)
delete req; delete req;
kv->Unref(); kv->Unref();
}); });
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(kv->cli->c);
#endif
} }
// set_config({ ...config }) // set_config({ ...config })
@ -886,7 +848,8 @@ void NodeVitastorKV::get_impl(const Nan::FunctionCallbackInfo<v8::Value> & info,
// FIXME: Handle Buffer too // FIXME: Handle Buffer too
std::string key(*Nan::Utf8String(info[0].As<v8::String>())); std::string key(*Nan::Utf8String(info[0].As<v8::String>()));
auto req = new NodeVitastorRequest(kv->cli, info[1].As<v8::Function>()); v8::Local<v8::Function> callback = info[1].As<v8::Function>();
auto req = new NodeVitastorRequest(kv->cli, callback);
kv->Ref(); kv->Ref();
kv->dbw->get(key, [kv, req](int res, const std::string & value) kv->dbw->get(key, [kv, req](int res, const std::string & value)
@ -900,9 +863,6 @@ void NodeVitastorKV::get_impl(const Nan::FunctionCallbackInfo<v8::Value> & info,
delete req; delete req;
kv->Unref(); kv->Unref();
}, allow_cache); }, allow_cache);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(kv->cli->c);
#endif
} }
// get(key, callback(err, value)) // get(key, callback(err, value))
@ -951,12 +911,14 @@ NAN_METHOD(NodeVitastorKV::Set)
std::string key(*Nan::Utf8String(info[0].As<v8::String>())); std::string key(*Nan::Utf8String(info[0].As<v8::String>()));
std::string value(*Nan::Utf8String(info[1].As<v8::String>())); std::string value(*Nan::Utf8String(info[1].As<v8::String>()));
NodeVitastorRequest *req = new NodeVitastorRequest(kv->cli, info[2].As<v8::Function>()), *cas_req = NULL; v8::Local<v8::Function> callback = info[2].As<v8::Function>();
NodeVitastorRequest *req = new NodeVitastorRequest(kv->cli, callback), *cas_req = NULL;
std::function<bool(int, const std::string &)> cas_cb; std::function<bool(int, const std::string &)> cas_cb;
if (info.Length() > 3 && info[3]->IsObject()) if (info.Length() > 3 && info[3]->IsObject())
{ {
cas_req = new NodeVitastorRequest(kv->cli, info[3].As<v8::Function>()); v8::Local<v8::Function> cas_callback = info[3].As<v8::Function>();
cas_req = new NodeVitastorRequest(kv->cli, cas_callback);
cas_cb = make_cas_callback(cas_req); cas_cb = make_cas_callback(cas_req);
} }
@ -973,9 +935,6 @@ NAN_METHOD(NodeVitastorKV::Set)
delete cas_req; delete cas_req;
kv->Unref(); kv->Unref();
}, cas_cb); }, cas_cb);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(kv->cli->c);
#endif
} }
// del(key, callback(err), cas_compare(old_value)?) // del(key, callback(err), cas_compare(old_value)?)
@ -990,12 +949,14 @@ NAN_METHOD(NodeVitastorKV::Del)
// FIXME: Handle Buffer too // FIXME: Handle Buffer too
std::string key(*Nan::Utf8String(info[0].As<v8::String>())); std::string key(*Nan::Utf8String(info[0].As<v8::String>()));
NodeVitastorRequest *req = new NodeVitastorRequest(kv->cli, info[1].As<v8::Function>()), *cas_req = NULL; v8::Local<v8::Function> callback = info[1].As<v8::Function>();
NodeVitastorRequest *req = new NodeVitastorRequest(kv->cli, callback), *cas_req = NULL;
std::function<bool(int, const std::string &)> cas_cb; std::function<bool(int, const std::string &)> cas_cb;
if (info.Length() > 2 && info[2]->IsObject()) if (info.Length() > 2 && info[2]->IsObject())
{ {
cas_req = new NodeVitastorRequest(kv->cli, info[2].As<v8::Function>()); v8::Local<v8::Function> cas_callback = info[2].As<v8::Function>();
cas_req = new NodeVitastorRequest(kv->cli, cas_callback);
cas_cb = make_cas_callback(cas_req); cas_cb = make_cas_callback(cas_req);
} }
@ -1012,9 +973,6 @@ NAN_METHOD(NodeVitastorKV::Del)
delete cas_req; delete cas_req;
kv->Unref(); kv->Unref();
}, cas_cb); }, cas_cb);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(kv->cli->c);
#endif
} }
// list(start_key?) // list(start_key?)
@ -1094,11 +1052,12 @@ NAN_METHOD(NodeVitastorKVListing::Next)
if (info.Length() > 0) if (info.Length() > 0)
{ {
v8::Local<v8::Function> callback = info[0].As<v8::Function>();
if (list->iter) if (list->iter)
{ {
delete list->iter; delete list->iter;
} }
list->iter = new NodeVitastorRequest(list->kv->cli, info[0].As<v8::Function>()); list->iter = new NodeVitastorRequest(list->kv->cli, callback);
} }
if (!list->handle) if (!list->handle)
{ {
@ -1134,9 +1093,6 @@ NAN_METHOD(NodeVitastorKVListing::Next)
list->iter = req; list->iter = req;
list->kv->Unref(); list->kv->Unref();
}); });
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_c_uring_handle_events(list->kv->cli->c);
#endif
} }
// close() // close()

View File

@ -1,6 +1,6 @@
{ {
"name": "vitastor", "name": "vitastor",
"version": "2.1.0", "version": "1.7.0",
"description": "Low-level native bindings to Vitastor client library", "description": "Low-level native bindings to Vitastor client library",
"main": "index.js", "main": "index.js",
"keywords": [ "keywords": [
@ -16,7 +16,7 @@
"build": "node-gyp rebuild" "build": "node-gyp rebuild"
}, },
"author": "Vitaliy Filippov", "author": "Vitaliy Filippov",
"license": "VNPL-1.1", "license": "VNPL-2.0",
"dependencies": { "dependencies": {
"bindings": "1.5.0", "bindings": "1.5.0",
"nan": "^2.19.0" "nan": "^2.19.0"

View File

@ -50,7 +50,7 @@ from cinder.volume import configuration
from cinder.volume import driver from cinder.volume import driver
from cinder.volume import volume_utils from cinder.volume import volume_utils
VITASTOR_VERSION = '2.1.0' VITASTOR_VERSION = '1.11.0'
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -1,172 +0,0 @@
Index: pve-qemu-kvm-9.2.0/block/meson.build
===================================================================
--- pve-qemu-kvm-9.2.0.orig/block/meson.build
+++ pve-qemu-kvm-9.2.0/block/meson.build
@@ -126,6 +126,7 @@ foreach m : [
[libnfs, 'nfs', files('nfs.c')],
[libssh, 'ssh', files('ssh.c')],
[rbd, 'rbd', files('rbd.c')],
+ [vitastor, 'vitastor', files('vitastor.c')],
]
if m[0].found()
module_ss = ss.source_set()
Index: pve-qemu-kvm-9.2.0/meson.build
===================================================================
--- pve-qemu-kvm-9.2.0.orig/meson.build
+++ pve-qemu-kvm-9.2.0/meson.build
@@ -1590,6 +1590,26 @@ if not get_option('rbd').auto() or have_
endif
endif
+vitastor = not_found
+if not get_option('vitastor').auto() or have_block
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
+ required: get_option('vitastor'))
+ if libvitastor_client.found()
+ if cc.links('''
+ #include <vitastor_c.h>
+ int main(void) {
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ return 0;
+ }''', dependencies: libvitastor_client)
+ vitastor = declare_dependency(dependencies: libvitastor_client)
+ elif get_option('vitastor').enabled()
+ error('could not link libvitastor_client')
+ else
+ warning('could not link libvitastor_client, disabling')
+ endif
+ endif
+endif
+
glusterfs = not_found
glusterfs_ftruncate_has_stat = false
glusterfs_iocb_has_stat = false
@@ -2478,6 +2498,7 @@ endif
config_host_data.set('CONFIG_OPENGL', opengl.found())
config_host_data.set('CONFIG_PLUGIN', get_option('plugins'))
config_host_data.set('CONFIG_RBD', rbd.found())
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
config_host_data.set('CONFIG_RDMA', rdma.found())
config_host_data.set('CONFIG_RELOCATABLE', get_option('relocatable'))
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
@@ -4789,6 +4810,7 @@ summary_info += {'fdt support': fd
summary_info += {'libcap-ng support': libcap_ng}
summary_info += {'bpf support': libbpf}
summary_info += {'rbd support': rbd}
+summary_info += {'vitastor support': vitastor}
summary_info += {'smartcard support': cacard}
summary_info += {'U2F support': u2f}
summary_info += {'libusb': libusb}
Index: pve-qemu-kvm-9.2.0/meson_options.txt
===================================================================
--- pve-qemu-kvm-9.2.0.orig/meson_options.txt
+++ pve-qemu-kvm-9.2.0/meson_options.txt
@@ -200,6 +200,8 @@ option('lzo', type : 'feature', value :
description: 'lzo compression support')
option('rbd', type : 'feature', value : 'auto',
description: 'Ceph block device driver')
+option('vitastor', type : 'feature', value : 'auto',
+ description: 'Vitastor block device driver')
option('opengl', type : 'feature', value : 'auto',
description: 'OpenGL support')
option('rdma', type : 'feature', value : 'auto',
Index: pve-qemu-kvm-9.2.0/qapi/block-core.json
===================================================================
--- pve-qemu-kvm-9.2.0.orig/qapi/block-core.json
+++ pve-qemu-kvm-9.2.0/qapi/block-core.json
@@ -3481,7 +3481,7 @@
'raw', 'rbd',
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
'pbs',
- 'ssh', 'throttle', 'vdi', 'vhdx',
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor',
{ 'name': 'virtio-blk-vfio-pci', 'if': 'CONFIG_BLKIO' },
{ 'name': 'virtio-blk-vhost-user', 'if': 'CONFIG_BLKIO' },
{ 'name': 'virtio-blk-vhost-vdpa', 'if': 'CONFIG_BLKIO' },
@@ -4592,6 +4592,28 @@
'*server': ['InetSocketAddressBase'] } }
##
+# @BlockdevOptionsVitastor:
+#
+# Driver specific block device options for vitastor
+#
+# @image: Image name
+# @inode: Inode number
+# @pool: Pool ID
+# @size: Desired image size in bytes
+# @config-path: Path to Vitastor configuration
+# @etcd-host: etcd connection address(es)
+# @etcd-prefix: etcd key/value prefix
+##
+{ 'struct': 'BlockdevOptionsVitastor',
+ 'data': { '*inode': 'uint64',
+ '*pool': 'uint64',
+ '*size': 'uint64',
+ '*image': 'str',
+ '*config-path': 'str',
+ '*etcd-host': 'str',
+ '*etcd-prefix': 'str' } }
+
+##
# @ReplicationMode:
#
# An enumeration of replication modes.
@@ -5054,6 +5076,7 @@
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
+ 'vitastor': 'BlockdevOptionsVitastor',
'virtio-blk-vfio-pci':
{ 'type': 'BlockdevOptionsVirtioBlkVfioPci',
'if': 'CONFIG_BLKIO' },
@@ -5501,6 +5524,20 @@
'*encrypt' : 'RbdEncryptionCreateOptions' } }
##
+# @BlockdevCreateOptionsVitastor:
+#
+# Driver specific image creation options for Vitastor.
+#
+# @location: Where to store the new image file. This location cannot
+# point to a snapshot.
+#
+# @size: Size of the virtual disk in bytes
+##
+{ 'struct': 'BlockdevCreateOptionsVitastor',
+ 'data': { 'location': 'BlockdevOptionsVitastor',
+ 'size': 'size' } }
+
+##
# @BlockdevVmdkSubformat:
#
# Subformat options for VMDK images
@@ -5722,6 +5759,7 @@
'ssh': 'BlockdevCreateOptionsSsh',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
'vmdk': 'BlockdevCreateOptionsVmdk',
'vpc': 'BlockdevCreateOptionsVpc'
} }
Index: pve-qemu-kvm-9.2.0/scripts/meson-buildoptions.sh
===================================================================
--- pve-qemu-kvm-9.2.0.orig/scripts/meson-buildoptions.sh
+++ pve-qemu-kvm-9.2.0/scripts/meson-buildoptions.sh
@@ -174,6 +174,7 @@ meson_options_help() {
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
printf "%s\n" ' qpl Query Processing Library support'
printf "%s\n" ' rbd Ceph block device driver'
+ printf "%s\n" ' vitastor Vitastor block device driver'
printf "%s\n" ' rdma Enable RDMA-based migration'
printf "%s\n" ' replication replication support'
printf "%s\n" ' rust Rust support'
@@ -455,6 +456,8 @@ _meson_option_parse() {
--disable-qpl) printf "%s" -Dqpl=disabled ;;
--enable-rbd) printf "%s" -Drbd=enabled ;;
--disable-rbd) printf "%s" -Drbd=disabled ;;
+ --enable-vitastor) printf "%s" -Dvitastor=enabled ;;
+ --disable-vitastor) printf "%s" -Dvitastor=disabled ;;
--enable-rdma) printf "%s" -Drdma=enabled ;;
--disable-rdma) printf "%s" -Drdma=disabled ;;
--enable-relocatable) printf "%s" -Drelocatable=true ;;

View File

@ -1,172 +0,0 @@
diff --git a/block/meson.build b/block/meson.build
index f1262ec2ba..3cf3e23f16 100644
--- a/block/meson.build
+++ b/block/meson.build
@@ -114,6 +114,7 @@ foreach m : [
[libnfs, 'nfs', files('nfs.c')],
[libssh, 'ssh', files('ssh.c')],
[rbd, 'rbd', files('rbd.c')],
+ [vitastor, 'vitastor', files('vitastor.c')],
]
if m[0].found()
module_ss = ss.source_set()
diff --git a/meson.build b/meson.build
index 147097c652..2486b3aeb5 100644
--- a/meson.build
+++ b/meson.build
@@ -1590,6 +1590,26 @@ if not get_option('rbd').auto() or have_block
endif
endif
+vitastor = not_found
+if not get_option('vitastor').auto() or have_block
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
+ required: get_option('vitastor'))
+ if libvitastor_client.found()
+ if cc.links('''
+ #include <vitastor_c.h>
+ int main(void) {
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ return 0;
+ }''', dependencies: libvitastor_client)
+ vitastor = declare_dependency(dependencies: libvitastor_client)
+ elif get_option('vitastor').enabled()
+ error('could not link libvitastor_client')
+ else
+ warning('could not link libvitastor_client, disabling')
+ endif
+ endif
+endif
+
glusterfs = not_found
glusterfs_ftruncate_has_stat = false
glusterfs_iocb_has_stat = false
@@ -2474,6 +2494,7 @@ endif
config_host_data.set('CONFIG_OPENGL', opengl.found())
config_host_data.set('CONFIG_PLUGIN', get_option('plugins'))
config_host_data.set('CONFIG_RBD', rbd.found())
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
config_host_data.set('CONFIG_RDMA', rdma.found())
config_host_data.set('CONFIG_RELOCATABLE', get_option('relocatable'))
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
@@ -4778,6 +4799,7 @@ summary_info += {'fdt support': fdt_opt == 'internal' ? 'internal' : fdt}
summary_info += {'libcap-ng support': libcap_ng}
summary_info += {'bpf support': libbpf}
summary_info += {'rbd support': rbd}
+summary_info += {'vitastor support': vitastor}
summary_info += {'smartcard support': cacard}
summary_info += {'U2F support': u2f}
summary_info += {'libusb': libusb}
diff --git a/meson_options.txt b/meson_options.txt
index 5eeaf3eee5..b04eda29f9 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -200,6 +200,8 @@ option('lzo', type : 'feature', value : 'auto',
description: 'lzo compression support')
option('rbd', type : 'feature', value : 'auto',
description: 'Ceph block device driver')
+option('vitastor', type : 'feature', value : 'auto',
+ description: 'Vitastor block device driver')
option('opengl', type : 'feature', value : 'auto',
description: 'OpenGL support')
option('rdma', type : 'feature', value : 'auto',
diff --git a/qapi/block-core.json b/qapi/block-core.json
index fd3bcc1c17..41571ac3f9 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -3212,7 +3212,7 @@
'parallels', 'preallocate', 'qcow', 'qcow2', 'qed', 'quorum',
'raw', 'rbd',
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
- 'ssh', 'throttle', 'vdi', 'vhdx',
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor',
{ 'name': 'virtio-blk-vfio-pci', 'if': 'CONFIG_BLKIO' },
{ 'name': 'virtio-blk-vhost-user', 'if': 'CONFIG_BLKIO' },
{ 'name': 'virtio-blk-vhost-vdpa', 'if': 'CONFIG_BLKIO' },
@@ -4295,6 +4295,28 @@
'*key-secret': 'str',
'*server': ['InetSocketAddressBase'] } }
+##
+# @BlockdevOptionsVitastor:
+#
+# Driver specific block device options for vitastor
+#
+# @image: Image name
+# @inode: Inode number
+# @pool: Pool ID
+# @size: Desired image size in bytes
+# @config-path: Path to Vitastor configuration
+# @etcd-host: etcd connection address(es)
+# @etcd-prefix: etcd key/value prefix
+##
+{ 'struct': 'BlockdevOptionsVitastor',
+ 'data': { '*inode': 'uint64',
+ '*pool': 'uint64',
+ '*size': 'uint64',
+ '*image': 'str',
+ '*config-path': 'str',
+ '*etcd-host': 'str',
+ '*etcd-prefix': 'str' } }
+
##
# @ReplicationMode:
#
@@ -4757,6 +4779,7 @@
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
+ 'vitastor': 'BlockdevOptionsVitastor',
'virtio-blk-vfio-pci':
{ 'type': 'BlockdevOptionsVirtioBlkVfioPci',
'if': 'CONFIG_BLKIO' },
@@ -5198,6 +5221,20 @@
'*cluster-size' : 'size',
'*encrypt' : 'RbdEncryptionCreateOptions' } }
+##
+# @BlockdevCreateOptionsVitastor:
+#
+# Driver specific image creation options for Vitastor.
+#
+# @location: Where to store the new image file. This location cannot
+# point to a snapshot.
+#
+# @size: Size of the virtual disk in bytes
+##
+{ 'struct': 'BlockdevCreateOptionsVitastor',
+ 'data': { 'location': 'BlockdevOptionsVitastor',
+ 'size': 'size' } }
+
##
# @BlockdevVmdkSubformat:
#
@@ -5420,6 +5457,7 @@
'ssh': 'BlockdevCreateOptionsSsh',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
'vmdk': 'BlockdevCreateOptionsVmdk',
'vpc': 'BlockdevCreateOptionsVpc'
} }
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
index a8066aab03..12e650e7d4 100644
--- a/scripts/meson-buildoptions.sh
+++ b/scripts/meson-buildoptions.sh
@@ -174,6 +174,7 @@ meson_options_help() {
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
printf "%s\n" ' qpl Query Processing Library support'
printf "%s\n" ' rbd Ceph block device driver'
+ printf "%s\n" ' vitastor Vitastor block device driver'
printf "%s\n" ' rdma Enable RDMA-based migration'
printf "%s\n" ' replication replication support'
printf "%s\n" ' rust Rust support'
@@ -455,6 +456,8 @@ _meson_option_parse() {
--disable-qpl) printf "%s" -Dqpl=disabled ;;
--enable-rbd) printf "%s" -Drbd=enabled ;;
--disable-rbd) printf "%s" -Drbd=disabled ;;
+ --enable-vitastor) printf "%s" -Dvitastor=enabled ;;
+ --disable-vitastor) printf "%s" -Dvitastor=disabled ;;
--enable-rdma) printf "%s" -Drdma=enabled ;;
--disable-rdma) printf "%s" -Drdma=disabled ;;
--enable-relocatable) printf "%s" -Drelocatable=true ;;

View File

@ -1,11 +1,11 @@
Name: vitastor Name: vitastor
Version: 2.1.0 Version: 1.11.0
Release: 1%{?dist} Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1 License: Vitastor Network Public License 1.1
URL: https://vitastor.io/ URL: https://vitastor.io/
Source0: vitastor-2.1.0.el7.tar.gz Source0: vitastor-1.11.0.el7.tar.gz
BuildRequires: liburing-devel >= 0.6 BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel BuildRequires: gperftools-devel

View File

@ -1,11 +1,11 @@
Name: vitastor Name: vitastor
Version: 2.1.0 Version: 1.11.0
Release: 1%{?dist} Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1 License: Vitastor Network Public License 1.1
URL: https://vitastor.io/ URL: https://vitastor.io/
Source0: vitastor-2.1.0.el8.tar.gz Source0: vitastor-1.11.0.el8.tar.gz
BuildRequires: liburing-devel >= 0.6 BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel BuildRequires: gperftools-devel

View File

@ -1,11 +1,11 @@
Name: vitastor Name: vitastor
Version: 2.1.0 Version: 1.11.0
Release: 1%{?dist} Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1 License: Vitastor Network Public License 1.1
URL: https://vitastor.io/ URL: https://vitastor.io/
Source0: vitastor-2.1.0.el9.tar.gz Source0: vitastor-1.11.0.el9.tar.gz
BuildRequires: liburing-devel >= 0.6 BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel BuildRequires: gperftools-devel

View File

@ -19,7 +19,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif() endif()
add_definitions(-DVITASTOR_VERSION="2.1.0") add_definitions(-DVITASTOR_VERSION="1.11.0")
add_definitions(-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src) add_definitions(-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
add_link_options(-fno-omit-frame-pointer) add_link_options(-fno-omit-frame-pointer)
if (${WITH_ASAN}) if (${WITH_ASAN})

View File

@ -8,7 +8,6 @@
#include "blockstore_impl.h" #include "blockstore_impl.h"
#include "blockstore_disk.h" #include "blockstore_disk.h"
#include "str_util.h" #include "str_util.h"
#include "allocator.h"
static uint32_t is_power_of_two(uint64_t value) static uint32_t is_power_of_two(uint64_t value)
{ {
@ -84,12 +83,6 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
throw std::runtime_error("data_csum_type="+config["data_csum_type"]+" is unsupported, only \"crc32c\" and \"none\" are supported"); throw std::runtime_error("data_csum_type="+config["data_csum_type"]+" is unsupported, only \"crc32c\" and \"none\" are supported");
} }
csum_block_size = parse_size(config["csum_block_size"]); csum_block_size = parse_size(config["csum_block_size"]);
discard_on_start = config.find("discard_on_start") != config.end() &&
(config["discard_on_start"] == "true" || config["discard_on_start"] == "1" || config["discard_on_start"] == "yes");
min_discard_size = parse_size(config["min_discard_size"]);
if (!min_discard_size)
min_discard_size = 1024*1024;
discard_granularity = parse_size(config["discard_granularity"]);
// Validate // Validate
if (!data_block_size) if (!data_block_size)
{ {
@ -179,6 +172,10 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
{ {
throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size)); throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size));
} }
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
clean_dyn_size = clean_entry_bitmap_size*2 + (csum_block_size
? data_block_size/csum_block_size*(data_csum_type & 0xFF) : 0);
clean_entry_size = sizeof(clean_disk_entry) + clean_dyn_size + 4 /*entry_csum*/;
} }
void blockstore_disk_t::calc_lengths(bool skip_meta_check) void blockstore_disk_t::calc_lengths(bool skip_meta_check)
@ -227,13 +224,9 @@ void blockstore_disk_t::calc_lengths(bool skip_meta_check)
} }
// required metadata size // required metadata size
block_count = data_len / data_block_size; block_count = data_len / data_block_size;
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
clean_dyn_size = clean_entry_bitmap_size*2 + (csum_block_size
? data_block_size/csum_block_size*(data_csum_type & 0xFF) : 0);
clean_entry_size = sizeof(clean_disk_entry) + clean_dyn_size + 4 /*entry_csum*/;
meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size; meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size;
bool new_doesnt_fit = (!meta_format && !skip_meta_check && meta_area_size < meta_len && !data_csum_type); if (meta_format == BLOCKSTORE_META_FORMAT_V1 ||
if (meta_format == BLOCKSTORE_META_FORMAT_V1 || new_doesnt_fit) !meta_format && !skip_meta_check && meta_area_size < meta_len && !data_csum_type)
{ {
uint64_t clean_entry_v0_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size; uint64_t clean_entry_v0_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
uint64_t meta_v0_len = (1 + (block_count - 1 + meta_block_size / clean_entry_v0_size) uint64_t meta_v0_len = (1 + (block_count - 1 + meta_block_size / clean_entry_v0_size)
@ -241,11 +234,7 @@ void blockstore_disk_t::calc_lengths(bool skip_meta_check)
if (meta_format == BLOCKSTORE_META_FORMAT_V1 || meta_area_size >= meta_v0_len) if (meta_format == BLOCKSTORE_META_FORMAT_V1 || meta_area_size >= meta_v0_len)
{ {
// Old metadata fits. // Old metadata fits.
if (new_doesnt_fit) printf("Warning: Using old metadata format without checksums because the new format doesn't fit into provided area\n");
{
printf("Warning: Using old metadata format without checksums because the new format"
" doesn't fit into provided area (%ju bytes required, %ju bytes available)\n", meta_len, meta_area_size);
}
clean_entry_size = clean_entry_v0_size; clean_entry_size = clean_entry_v0_size;
meta_len = meta_v0_len; meta_len = meta_v0_len;
meta_format = BLOCKSTORE_META_FORMAT_V1; meta_format = BLOCKSTORE_META_FORMAT_V1;
@ -257,7 +246,7 @@ void blockstore_disk_t::calc_lengths(bool skip_meta_check)
meta_format = BLOCKSTORE_META_FORMAT_V2; meta_format = BLOCKSTORE_META_FORMAT_V2;
if (!skip_meta_check && meta_area_size < meta_len) if (!skip_meta_check && meta_area_size < meta_len)
{ {
throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes, have only "+std::to_string(meta_area_size)+" bytes"); throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes");
} }
// requested journal size // requested journal size
if (!skip_meta_check && cfg_journal_size > journal_len) if (!skip_meta_check && cfg_journal_size > journal_len)
@ -426,44 +415,3 @@ void blockstore_disk_t::close_all()
close(journal_fd); close(journal_fd);
data_fd = meta_fd = journal_fd = -1; data_fd = meta_fd = journal_fd = -1;
} }
// Sadly DISCARD only works through ioctl(), but it seems to always block the device queue,
// so it's not a big deal that we can only run it synchronously.
int blockstore_disk_t::trim_data(allocator_t *alloc)
{
int r = 0;
uint64_t j = 0, i = 0;
uint64_t discarded = 0;
for (; i <= block_count; i++)
{
if (i >= block_count || alloc->get(i))
{
if (i > j && (i-j)*data_block_size >= min_discard_size)
{
uint64_t range[2] = { data_offset + j*data_block_size, (i-j)*data_block_size };
if (discard_granularity)
{
range[1] += range[0];
if (range[1] % discard_granularity)
range[1] = range[1] - (range[1] % discard_granularity);
if (range[0] % discard_granularity)
range[0] = range[0] + discard_granularity - (range[0] % discard_granularity);
if (range[0] >= range[1])
continue;
range[1] -= range[0];
}
r = ioctl(data_fd, BLKDISCARD, &range);
if (r != 0)
{
fprintf(stderr, "Failed to execute BLKDISCARD %ju+%ju on %s: %s (code %d)\n",
range[0], range[1], data_device.c_str(), strerror(-r), r);
return -errno;
}
discarded += range[1];
}
j = i+1;
}
}
fprintf(stderr, "%s (%ju bytes) of unused data discarded on %s\n", format_size(discarded).c_str(), discarded, data_device.c_str());
return 0;
}

View File

@ -12,8 +12,6 @@
// Lower byte of checksum type is its length // Lower byte of checksum type is its length
#define BLOCKSTORE_CSUM_CRC32C 0x104 #define BLOCKSTORE_CSUM_CRC32C 0x104
class allocator_t;
struct blockstore_disk_t struct blockstore_disk_t
{ {
std::string data_device, meta_device, journal_device; std::string data_device, meta_device, journal_device;
@ -36,18 +34,14 @@ struct blockstore_disk_t
// I/O modes for data, metadata and journal: direct or "" = O_DIRECT, cached = O_SYNC, directsync = O_DIRECT|O_SYNC // I/O modes for data, metadata and journal: direct or "" = O_DIRECT, cached = O_SYNC, directsync = O_DIRECT|O_SYNC
// O_SYNC without O_DIRECT = use Linux page cache for reads and writes // O_SYNC without O_DIRECT = use Linux page cache for reads and writes
std::string data_io, meta_io, journal_io; std::string data_io, meta_io, journal_io;
// Data discard granularity and minimum size (for the sake of performance)
bool discard_on_start = false;
uint64_t min_discard_size = 1024*1024;
uint64_t discard_granularity = 0;
int meta_fd = -1, data_fd = -1, journal_fd = -1; int meta_fd = -1, data_fd = -1, journal_fd = -1;
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len, meta_format = 0; uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len, meta_format = 0;
uint64_t data_offset, data_device_sect, data_device_size, data_len; uint64_t data_offset, data_device_sect, data_device_size, data_len;
uint64_t journal_offset, journal_device_sect, journal_device_size, journal_len; uint64_t journal_offset, journal_device_sect, journal_device_size, journal_len;
uint32_t block_order = 0; uint32_t block_order;
uint64_t block_count = 0; uint64_t block_count;
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0, clean_dyn_size = 0; uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0, clean_dyn_size = 0;
void parse_config(std::map<std::string, std::string> & config); void parse_config(std::map<std::string, std::string> & config);
@ -56,7 +50,6 @@ struct blockstore_disk_t
void open_journal(); void open_journal();
void calc_lengths(bool skip_meta_check = false); void calc_lengths(bool skip_meta_check = false);
void close_all(); void close_all();
int trim_data(allocator_t *alloc);
inline uint64_t dirty_dyn_size(uint64_t offset, uint64_t len) inline uint64_t dirty_dyn_size(uint64_t offset, uint64_t len)
{ {

View File

@ -12,15 +12,15 @@ blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *
ringloop->register_consumer(&ring_consumer); ringloop->register_consumer(&ring_consumer);
initialized = 0; initialized = 0;
parse_config(config, true); parse_config(config, true);
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, dsk.data_block_size);
alloc_dyn_data = dsk.clean_dyn_size > sizeof(void*) || dsk.csum_block_size > 0;
try try
{ {
dsk.open_data(); dsk.open_data();
dsk.open_meta(); dsk.open_meta();
dsk.open_journal(); dsk.open_journal();
calc_lengths(); calc_lengths();
alloc_dyn_data = dsk.clean_dyn_size > sizeof(void*) || dsk.csum_block_size > 0; data_alloc = new allocator(dsk.block_count);
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, dsk.data_block_size);
data_alloc = new allocator_t(dsk.block_count);
} }
catch (std::exception & e) catch (std::exception & e)
{ {
@ -34,8 +34,7 @@ blockstore_impl_t::~blockstore_impl_t()
{ {
delete data_alloc; delete data_alloc;
delete flusher; delete flusher;
if (zero_object) free(zero_object);
free(zero_object);
ringloop->unregister_consumer(&ring_consumer); ringloop->unregister_consumer(&ring_consumer);
dsk.close_all(); dsk.close_all();
if (metadata_buffer) if (metadata_buffer)
@ -84,20 +83,14 @@ void blockstore_impl_t::loop()
{ {
delete journal_init_reader; delete journal_init_reader;
journal_init_reader = NULL; journal_init_reader = NULL;
initialized = 3; if (journal.flush_journal)
initialized = 3;
else
initialized = 10;
ringloop->wakeup(); ringloop->wakeup();
} }
} }
if (initialized == 3) if (initialized == 3)
{
if (!readonly && dsk.discard_on_start)
dsk.trim_data(data_alloc);
if (journal.flush_journal)
initialized = 4;
else
initialized = 10;
}
if (initialized == 4)
{ {
if (readonly) if (readonly)
{ {

View File

@ -266,8 +266,6 @@ class blockstore_impl_t
int throttle_threshold_us = 50; int throttle_threshold_us = 50;
// Maximum writes between automatically added fsync operations // Maximum writes between automatically added fsync operations
uint64_t autosync_writes = 128; uint64_t autosync_writes = 128;
// Log level (0-10)
int log_level = 0;
/******* END OF OPTIONS *******/ /******* END OF OPTIONS *******/
struct ring_consumer_t ring_consumer; struct ring_consumer_t ring_consumer;
@ -281,9 +279,9 @@ class blockstore_impl_t
std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes; std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes;
int unsynced_big_write_count = 0, unstable_unsynced = 0; int unsynced_big_write_count = 0, unstable_unsynced = 0;
int unsynced_queued_ops = 0; int unsynced_queued_ops = 0;
allocator_t *data_alloc = NULL; allocator *data_alloc = NULL;
uint64_t used_blocks = 0; uint64_t used_blocks = 0;
uint8_t *zero_object = NULL; uint8_t *zero_object;
void *metadata_buffer = NULL; void *metadata_buffer = NULL;

View File

@ -138,11 +138,7 @@ resume_1:
exit(1); exit(1);
} }
hdr->header_csum = csum; hdr->header_csum = csum;
if (bs->dsk.meta_format != BLOCKSTORE_META_FORMAT_V2) bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V2;
{
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V2;
bs->dsk.calc_lengths();
}
} }
else if (hdr->version == BLOCKSTORE_META_FORMAT_V1) else if (hdr->version == BLOCKSTORE_META_FORMAT_V1)
{ {
@ -150,15 +146,11 @@ resume_1:
hdr->csum_block_size = 0; hdr->csum_block_size = 0;
hdr->header_csum = 0; hdr->header_csum = 0;
// Enable compatibility mode - entries without checksums // Enable compatibility mode - entries without checksums
if (bs->dsk.meta_format != BLOCKSTORE_META_FORMAT_V1 || bs->dsk.clean_entry_size = sizeof(clean_disk_entry) + bs->dsk.clean_entry_bitmap_size*2;
bs->dsk.data_csum_type != 0 || bs->dsk.csum_block_size != 0) bs->dsk.meta_len = (1 + (bs->dsk.block_count - 1 + bs->dsk.meta_block_size / bs->dsk.clean_entry_size)
{ / (bs->dsk.meta_block_size / bs->dsk.clean_entry_size)) * bs->dsk.meta_block_size;
bs->dsk.data_csum_type = 0; bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V1;
bs->dsk.csum_block_size = 0; printf("Warning: Starting with metadata in the old format without checksums, as stored on disk\n");
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V1;
bs->dsk.calc_lengths();
printf("Warning: Starting with metadata in the old format without checksums, as stored on disk\n");
}
} }
else if (hdr->version > BLOCKSTORE_META_FORMAT_V2) else if (hdr->version > BLOCKSTORE_META_FORMAT_V2)
{ {
@ -346,7 +338,7 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + bs->dsk.clean_entry_size - 4); uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + bs->dsk.clean_entry_size - 4);
if (*entry_csum != crc32c(0, entry, bs->dsk.clean_entry_size - 4)) if (*entry_csum != crc32c(0, entry, bs->dsk.clean_entry_size - 4))
{ {
printf("Metadata entry %ju is corrupt (checksum mismatch: %08x vs %08x), skipping\n", done_cnt+i, *entry_csum, crc32c(0, entry, bs->dsk.clean_entry_size - 4)); printf("Metadata entry %ju is corrupt (checksum mismatch), skipping\n", done_cnt+i);
// zero out the invalid entry, otherwise we'll hit "tried to overwrite non-zero metadata entry" later // zero out the invalid entry, otherwise we'll hit "tried to overwrite non-zero metadata entry" later
if (bs->inmemory_meta) if (bs->inmemory_meta)
{ {

View File

@ -113,13 +113,10 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
if (!right_dir && next_pos >= bs->journal.used_start-bs->journal.block_size) if (!right_dir && next_pos >= bs->journal.used_start-bs->journal.block_size)
{ {
// No space in the journal. Wait until used_start changes. // No space in the journal. Wait until used_start changes.
if (bs->log_level > 5) printf(
{ "Ran out of journal space (used_start=%08jx, next_free=%08jx, dirty_start=%08jx)\n",
printf( bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start
"Ran out of journal space (used_start=%08jx, next_free=%08jx, dirty_start=%08jx)\n", );
bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start
);
}
PRIV(op)->wait_for = WAIT_JOURNAL; PRIV(op)->wait_for = WAIT_JOURNAL;
bs->flusher->request_trim(); bs->flusher->request_trim();
PRIV(op)->wait_detail = bs->journal.used_start; PRIV(op)->wait_detail = bs->journal.used_start;

View File

@ -101,7 +101,6 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes"; config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes";
journal.inmemory = config["inmemory_journal"] != "false" && config["inmemory_journal"] != "0" && journal.inmemory = config["inmemory_journal"] != "false" && config["inmemory_journal"] != "0" &&
config["inmemory_journal"] != "no"; config["inmemory_journal"] != "no";
log_level = strtoull(config["log_level"].c_str(), NULL, 10);
// Validate // Validate
if (journal.sector_count < 2) if (journal.sector_count < 2)
{ {

View File

@ -7,14 +7,10 @@ set(MSGR_RDMA "")
if (IBVERBS_LIBRARIES) if (IBVERBS_LIBRARIES)
set(MSGR_RDMA "msgr_rdma.cpp") set(MSGR_RDMA "msgr_rdma.cpp")
endif (IBVERBS_LIBRARIES) endif (IBVERBS_LIBRARIES)
set(MSGR_RDMACM "")
if (RDMACM_LIBRARIES)
set(MSGR_RDMACM "msgr_rdmacm.cpp")
endif (RDMACM_LIBRARIES)
add_library(vitastor_common STATIC add_library(vitastor_common STATIC
../util/epoll_manager.cpp etcd_state_client.cpp messenger.cpp ../util/addr_util.cpp ../util/epoll_manager.cpp etcd_state_client.cpp messenger.cpp ../util/addr_util.cpp
msgr_stop.cpp msgr_op.cpp msgr_send.cpp msgr_receive.cpp ../util/ringloop.cpp ../../json11/json11.cpp msgr_stop.cpp msgr_op.cpp msgr_send.cpp msgr_receive.cpp ../util/ringloop.cpp ../../json11/json11.cpp
http_client.cpp osd_ops.cpp pg_states.cpp ../util/timerfd_manager.cpp ../util/str_util.cpp ../util/json_util.cpp ${MSGR_RDMA} ${MSGR_RDMACM} http_client.cpp osd_ops.cpp pg_states.cpp ../util/timerfd_manager.cpp ../util/str_util.cpp ../util/json_util.cpp ${MSGR_RDMA}
) )
target_link_libraries(vitastor_common pthread) target_link_libraries(vitastor_common pthread)
target_compile_options(vitastor_common PUBLIC -fPIC) target_compile_options(vitastor_common PUBLIC -fPIC)
@ -32,7 +28,6 @@ target_link_libraries(vitastor_client
vitastor_cli vitastor_cli
${LIBURING_LIBRARIES} ${LIBURING_LIBRARIES}
${IBVERBS_LIBRARIES} ${IBVERBS_LIBRARIES}
${RDMACM_LIBRARIES}
) )
set_target_properties(vitastor_client PROPERTIES VERSION ${VITASTOR_VERSION} SOVERSION 0) set_target_properties(vitastor_client PROPERTIES VERSION ${VITASTOR_VERSION} SOVERSION 0)
configure_file(vitastor.pc.in vitastor.pc @ONLY) configure_file(vitastor.pc.in vitastor.pc @ONLY)
@ -93,7 +88,7 @@ add_executable(test_cluster_client
EXCLUDE_FROM_ALL EXCLUDE_FROM_ALL
../test/test_cluster_client.cpp ../test/test_cluster_client.cpp
pg_states.cpp osd_ops.cpp cluster_client.cpp cluster_client_list.cpp cluster_client_wb.cpp msgr_op.cpp ../test/mock/messenger.cpp msgr_stop.cpp pg_states.cpp osd_ops.cpp cluster_client.cpp cluster_client_list.cpp cluster_client_wb.cpp msgr_op.cpp ../test/mock/messenger.cpp msgr_stop.cpp
etcd_state_client.cpp ../util/timerfd_manager.cpp ../util/addr_util.cpp ../util/str_util.cpp ../util/json_util.cpp ../../json11/json11.cpp etcd_state_client.cpp ../util/timerfd_manager.cpp ../util/str_util.cpp ../util/json_util.cpp ../../json11/json11.cpp
) )
target_compile_definitions(test_cluster_client PUBLIC -D__MOCK__) target_compile_definitions(test_cluster_client PUBLIC -D__MOCK__)
target_include_directories(test_cluster_client BEFORE PUBLIC ${CMAKE_SOURCE_DIR}/src/test/mock) target_include_directories(test_cluster_client BEFORE PUBLIC ${CMAKE_SOURCE_DIR}/src/test/mock)

View File

@ -29,7 +29,8 @@ cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd
if (msgr.osd_peer_fds.find(peer_osd) != msgr.osd_peer_fds.end()) if (msgr.osd_peer_fds.find(peer_osd) != msgr.osd_peer_fds.end())
{ {
// peer_osd just connected // peer_osd just connected
continue_ops(); // retry operations waiting for connection immediately
continue_ops(client_retry_interval);
continue_lists(); continue_lists();
continue_raw_ops(peer_osd); continue_raw_ops(peer_osd);
} }
@ -1244,6 +1245,7 @@ int cluster_client_t::try_send(cluster_op_t *op, int i)
.req = { .rw = { .req = { .rw = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = next_op_id(),
.opcode = op->opcode == OSD_OP_READ_BITMAP || op->opcode == OSD_OP_READ_CHAIN_BITMAP ? OSD_OP_READ : op->opcode, .opcode = op->opcode == OSD_OP_READ_BITMAP || op->opcode == OSD_OP_READ_CHAIN_BITMAP ? OSD_OP_READ : op->opcode,
}, },
.inode = op->cur_inode, .inode = op->cur_inode,
@ -1352,6 +1354,7 @@ void cluster_client_t::send_sync(cluster_op_t *op, cluster_op_part_t *part)
.req = { .req = {
.hdr = { .hdr = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = next_op_id(),
.opcode = OSD_OP_SYNC, .opcode = OSD_OP_SYNC,
}, },
}, },
@ -1496,3 +1499,8 @@ void cluster_client_t::copy_part_bitmap(cluster_op_t *op, cluster_op_part_t *par
part_len--; part_len--;
} }
} }
uint64_t cluster_client_t::next_op_id()
{
return msgr.next_subop_id++;
}

View File

@ -83,11 +83,8 @@ class writeback_cache_t;
// FIXME: Split into public and private interfaces // FIXME: Split into public and private interfaces
class cluster_client_t class cluster_client_t
{ {
#ifdef __MOCK__ timerfd_manager_t *tfd;
public: ring_loop_t *ringloop;
#endif
timerfd_manager_t *tfd = NULL;
ring_loop_t *ringloop = NULL;
std::map<pool_id_t, uint64_t> pg_counts; std::map<pool_id_t, uint64_t> pg_counts;
std::map<pool_pg_num_t, osd_num_t> pg_primary; std::map<pool_pg_num_t, osd_num_t> pg_primary;
@ -147,16 +144,14 @@ public:
bool get_immediate_commit(uint64_t inode); bool get_immediate_commit(uint64_t inode);
void continue_ops(int time_passed = 0);
void list_inode(inode_t inode, uint64_t min_offset, uint64_t max_offset, int max_parallel_pgs, std::function<void( void list_inode(inode_t inode, uint64_t min_offset, uint64_t max_offset, int max_parallel_pgs, std::function<void(
int status, int pgs_left, pg_num_t pg_num, std::set<object_id>&& objects)> pg_callback); int status, int pgs_left, pg_num_t pg_num, std::set<object_id>&& objects)> pg_callback);
//inline uint32_t get_bs_bitmap_granularity() { return st_cli.global_bitmap_granularity; } //inline uint32_t get_bs_bitmap_granularity() { return st_cli.global_bitmap_granularity; }
//inline uint64_t get_bs_block_size() { return st_cli.global_block_size; } //inline uint64_t get_bs_block_size() { return st_cli.global_block_size; }
uint64_t next_op_id();
#ifndef __MOCK__
protected:
#endif
void continue_ops(int time_passed = 0);
protected: protected:
bool affects_osd(uint64_t inode, uint64_t offset, uint64_t len, osd_num_t osd); bool affects_osd(uint64_t inode, uint64_t offset, uint64_t len, osd_num_t osd);

View File

@ -52,7 +52,6 @@ public:
bool read_from_cache(cluster_op_t *op, uint32_t bitmap_granularity); bool read_from_cache(cluster_op_t *op, uint32_t bitmap_granularity);
void flush_buffers(cluster_client_t *cli, dirty_buf_it_t from_it, dirty_buf_it_t to_it); void flush_buffers(cluster_client_t *cli, dirty_buf_it_t from_it, dirty_buf_it_t to_it);
void mark_flush_written(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id); void mark_flush_written(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id);
void delete_flush(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id);
void fsync_start(); void fsync_start();
void fsync_error(); void fsync_error();
void fsync_ok(); void fsync_ok();

View File

@ -342,6 +342,7 @@ void cluster_client_t::send_list(inode_list_osd_t *cur_list)
.sec_list = { .sec_list = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = next_op_id(),
.opcode = OSD_OP_SEC_LIST, .opcode = OSD_OP_SEC_LIST,
}, },
.list_pg = cur_list->pg->pg_num, .list_pg = cur_list->pg->pg_num,

View File

@ -9,7 +9,7 @@ writeback_cache_t::~writeback_cache_t()
{ {
for (auto & bp: dirty_buffers) for (auto & bp: dirty_buffers)
{ {
if (bp.second.buf && !--(*bp.second.refcnt)) if (!--(*bp.second.refcnt))
{ {
free(bp.second.refcnt); // refcnt is allocated with the buffer free(bp.second.refcnt); // refcnt is allocated with the buffer
} }
@ -115,10 +115,7 @@ void writeback_cache_t::copy_write(cluster_op_t *op, int state, uint64_t new_flu
.flush_id = dirty_it->second.flush_id, .flush_id = dirty_it->second.flush_id,
.refcnt = dirty_it->second.refcnt, .refcnt = dirty_it->second.refcnt,
}); });
if (dirty_it->second.buf) (*dirty_it->second.refcnt)++;
{
(*dirty_it->second.refcnt)++;
}
if (dirty_it->second.state == CACHE_DIRTY) if (dirty_it->second.state == CACHE_DIRTY)
{ {
if (dirty_it->second.buf) if (dirty_it->second.buf)
@ -196,7 +193,7 @@ void writeback_cache_t::copy_write(cluster_op_t *op, int state, uint64_t new_flu
writeback_queue_size++; writeback_queue_size++;
} }
} }
if (dirty_it->second.buf && !--(*dirty_it->second.refcnt)) if (!--(*dirty_it->second.refcnt))
{ {
free(dirty_it->second.refcnt); free(dirty_it->second.refcnt);
} }
@ -207,10 +204,7 @@ void writeback_cache_t::copy_write(cluster_op_t *op, int state, uint64_t new_flu
bool is_del = op->opcode == OSD_OP_DELETE; bool is_del = op->opcode == OSD_OP_DELETE;
uint64_t *refcnt = is_del ? NULL : (uint64_t*)malloc_or_die(sizeof(uint64_t) + op->len); uint64_t *refcnt = is_del ? NULL : (uint64_t*)malloc_or_die(sizeof(uint64_t) + op->len);
uint8_t *buf = is_del ? NULL : ((uint8_t*)refcnt + sizeof(uint64_t)); uint8_t *buf = is_del ? NULL : ((uint8_t*)refcnt + sizeof(uint64_t));
if (!is_del) *refcnt = 1;
{
*refcnt = 1;
}
dirty_it = dirty_buffers.emplace_hint(dirty_it, (object_id){ dirty_it = dirty_buffers.emplace_hint(dirty_it, (object_id){
.inode = op->inode, .inode = op->inode,
.stripe = op->offset, .stripe = op->offset,
@ -332,14 +326,7 @@ void writeback_cache_t::flush_buffers(cluster_client_t *cli, dirty_buf_it_t from
} }
flushed_buffers.erase(fl_it++); flushed_buffers.erase(fl_it++);
} }
if (op->flags & OP_IMMEDIATE_COMMIT) mark_flush_written(op->inode, op->offset, op->len, flush_id);
{
delete_flush(op->inode, op->offset, op->len, flush_id);
}
else
{
mark_flush_written(op->inode, op->offset, op->len, flush_id);
}
delete op; delete op;
writebacks_active--; writebacks_active--;
// We can't call execute_internal because it affects an invalid copy of the list here // We can't call execute_internal because it affects an invalid copy of the list here
@ -357,25 +344,6 @@ void writeback_cache_t::flush_buffers(cluster_client_t *cli, dirty_buf_it_t from
} }
} }
void writeback_cache_t::delete_flush(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id)
{
for (auto dirty_it = find_dirty(inode, offset);
dirty_it != dirty_buffers.end() && dirty_it->first.inode == inode &&
dirty_it->first.stripe < offset+len; )
{
if (dirty_it->second.flush_id == flush_id && dirty_it->second.state == CACHE_REPEATING)
{
if (dirty_it->second.buf && !--(*dirty_it->second.refcnt))
{
free(dirty_it->second.refcnt);
}
dirty_buffers.erase(dirty_it++);
}
else
dirty_it++;
}
}
void writeback_cache_t::mark_flush_written(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id) void writeback_cache_t::mark_flush_written(uint64_t inode, uint64_t offset, uint64_t len, uint64_t flush_id)
{ {
for (auto dirty_it = find_dirty(inode, offset); for (auto dirty_it = find_dirty(inode, offset);
@ -592,10 +560,8 @@ void writeback_cache_t::fsync_ok()
{ {
if (uw_it->second.state == CACHE_FLUSHING) if (uw_it->second.state == CACHE_FLUSHING)
{ {
if (uw_it->second.buf && !--(*uw_it->second.refcnt)) if (!--(*uw_it->second.refcnt))
{
free(uw_it->second.refcnt); free(uw_it->second.refcnt);
}
dirty_buffers.erase(uw_it++); dirty_buffers.erase(uw_it++);
} }
else else

View File

@ -31,11 +31,6 @@ etcd_state_client_t::~etcd_state_client_t()
keepalive_client = NULL; keepalive_client = NULL;
} }
#endif #endif
if (load_pgs_timer_id >= 0)
{
tfd->clear_timer(load_pgs_timer_id);
load_pgs_timer_id = -1;
}
} }
#ifndef __MOCK__ #ifndef __MOCK__
@ -148,7 +143,6 @@ void etcd_state_client_t::etcd_call(std::string api, json11::Json payload, int t
} }
if (interval > 0) if (interval > 0)
{ {
// FIXME: Prevent destruction of etcd_state_client if timers or requests are active
tfd->set_timer(interval, false, [this, api, payload, timeout, retries, interval, callback](int) tfd->set_timer(interval, false, [this, api, payload, timeout, retries, interval, callback](int)
{ {
etcd_call(api, payload, timeout, retries, interval, callback); etcd_call(api, payload, timeout, retries, interval, callback);
@ -182,7 +176,7 @@ void etcd_state_client_t::add_etcd_url(std::string addr)
exit(1); exit(1);
} }
if (!local_ips.size()) if (!local_ips.size())
local_ips = getifaddr_list(std::vector<addr_mask_t>(), true); local_ips = getifaddr_list(std::vector<std::string>(), true);
std::string check_addr; std::string check_addr;
int pos = addr.find('/'); int pos = addr.find('/');
int pos2 = addr.find(':'); int pos2 = addr.find(':');
@ -277,11 +271,6 @@ void etcd_state_client_t::parse_config(const json11::Json & config)
{ {
this->etcd_quick_timeout = 1000; this->etcd_quick_timeout = 1000;
} }
this->etcd_min_reload_interval = config["etcd_min_reload_interval"].uint64_value();
if (this->etcd_min_reload_interval <= 0)
{
this->etcd_min_reload_interval = 50;
}
if (this->etcd_ws_keepalive_interval != old_etcd_ws_keepalive_interval && ws_keepalive_timer >= 0) if (this->etcd_ws_keepalive_interval != old_etcd_ws_keepalive_interval && ws_keepalive_timer >= 0)
{ {
#ifndef __MOCK__ #ifndef __MOCK__
@ -614,23 +603,6 @@ void etcd_state_client_t::load_global_config()
void etcd_state_client_t::load_pgs() void etcd_state_client_t::load_pgs()
{ {
timespec tv;
clock_gettime(CLOCK_REALTIME, &tv);
uint64_t ms_passed = (tv.tv_sec-etcd_last_reload.tv_sec)*1000 + (tv.tv_nsec-etcd_last_reload.tv_nsec)/1000000;
if (ms_passed < etcd_min_reload_interval)
{
if (load_pgs_timer_id < 0)
{
load_pgs_timer_id = tfd->set_timer(etcd_min_reload_interval+50-ms_passed, false, [this](int) { load_pgs(); });
}
return;
}
etcd_last_reload = tv;
if (load_pgs_timer_id >= 0)
{
tfd->clear_timer(load_pgs_timer_id);
load_pgs_timer_id = -1;
}
json11::Json::array txn = { json11::Json::array txn = {
json11::Json::object { json11::Json::object {
{ "request_range", json11::Json::object { { "request_range", json11::Json::object {
@ -917,11 +889,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
if (!pc.scrub_interval) if (!pc.scrub_interval)
pc.scrub_interval = 0; pc.scrub_interval = 0;
// Mark pool as VitastorFS pool (disable per-inode stats and block volume creation) // Mark pool as VitastorFS pool (disable per-inode stats and block volume creation)
pc.used_for_app = pool_item.second["used_for_fs"].as_string(); pc.used_for_fs = pool_item.second["used_for_fs"].as_string();
if (pc.used_for_app != "")
pc.used_for_app = "fs:"+pc.used_for_app;
else
pc.used_for_app = pool_item.second["used_for_app"].as_string();
// Immediate Commit Mode // Immediate Commit Mode
pc.immediate_commit = pool_item.second["immediate_commit"].is_string() pc.immediate_commit = pool_item.second["immediate_commit"].is_string()
? parse_immediate_commit(pool_item.second["immediate_commit"].string_value(), IMMEDIATE_ALL) ? parse_immediate_commit(pool_item.second["immediate_commit"].string_value(), IMMEDIATE_ALL)

View File

@ -61,7 +61,7 @@ struct pool_config_t
uint64_t pg_stripe_size; uint64_t pg_stripe_size;
std::map<pg_num_t, pg_config_t> pg_config; std::map<pg_num_t, pg_config_t> pg_config;
uint64_t scrub_interval; uint64_t scrub_interval;
std::string used_for_app; std::string used_for_fs;
int backfillfull; int backfillfull;
}; };
@ -108,7 +108,6 @@ public:
int max_etcd_attempts = 5; int max_etcd_attempts = 5;
int etcd_quick_timeout = 1000; int etcd_quick_timeout = 1000;
int etcd_slow_timeout = 5000; int etcd_slow_timeout = 5000;
int etcd_min_reload_interval = 1000;
bool infinite_start = true; bool infinite_start = true;
uint64_t global_block_size = DEFAULT_BLOCK_SIZE; uint64_t global_block_size = DEFAULT_BLOCK_SIZE;
uint32_t global_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY; uint32_t global_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
@ -123,8 +122,6 @@ public:
uint64_t etcd_watch_revision_config = 0; uint64_t etcd_watch_revision_config = 0;
uint64_t etcd_watch_revision_osd = 0; uint64_t etcd_watch_revision_osd = 0;
uint64_t etcd_watch_revision_pg = 0; uint64_t etcd_watch_revision_pg = 0;
timespec etcd_last_reload = {};
int load_pgs_timer_id = -1;
std::map<pool_id_t, pool_config_t> pool_config; std::map<pool_id_t, pool_config_t> pool_config;
std::map<osd_num_t, json11::Json> peer_states; std::map<osd_num_t, json11::Json> peer_states;
std::set<osd_num_t> seen_peers; std::set<osd_num_t> seen_peers;

View File

@ -377,7 +377,7 @@ static void io_callback(void *opaque, long retval)
bsd->completed.push_back(io); bsd->completed.push_back(io);
if (bsd->trace) if (bsd->trace)
{ {
printf("--- %s 0x%jx retval=%jd\n", io->ddir == DDIR_READ ? "READ" : printf("--- %s 0x%jx retval=%ld\n", io->ddir == DDIR_READ ? "READ" :
(io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), (uint64_t)io, retval); (io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), (uint64_t)io, retval);
} }
} }

View File

@ -117,60 +117,32 @@ void msgr_iothread_t::run()
void osd_messenger_t::init() void osd_messenger_t::init()
{ {
#ifdef WITH_RDMACM
if (use_rdmacm)
{
// RDMA-CM only requires the event channel. All the remaining work is done separately
rdmacm_evch = rdma_create_event_channel();
if (!rdmacm_evch)
{
// ENODEV means that the client doesn't have RDMA devices available
if (errno != ENODEV || log_level > 0)
fprintf(stderr, "Failed to initialize RDMA-CM event channel: %s (code %d)\n", strerror(errno), errno);
}
else
{
fcntl(rdmacm_evch->fd, F_SETFL, fcntl(rdmacm_evch->fd, F_GETFL, 0) | O_NONBLOCK);
tfd->set_fd_handler(rdmacm_evch->fd, false, [this](int rdmacm_eventfd, int epoll_events)
{
handle_rdmacm_events();
});
}
}
else
#endif
#ifdef WITH_RDMA #ifdef WITH_RDMA
if (use_rdma) if (use_rdma)
{ {
rdma_contexts = msgr_rdma_context_t::create_all( rdma_context = msgr_rdma_context_t::create(
osd_num && osd_cluster_network_masks.size() ? osd_cluster_network_masks : osd_network_masks, osd_networks, rdma_device != "" ? rdma_device.c_str() : NULL,
rdma_device != "" ? rdma_device.c_str() : NULL,
rdma_port_num, rdma_gid_index, rdma_mtu, rdma_odp, log_level rdma_port_num, rdma_gid_index, rdma_mtu, rdma_odp, log_level
); );
if (!rdma_contexts.size()) if (!rdma_context)
{ {
if (log_level > 0) if (log_level > 0)
fprintf(stderr, "[OSD %ju] Couldn't initialize RDMA, proceeding with TCP only\n", osd_num); fprintf(stderr, "[OSD %ju] Couldn't initialize RDMA, proceeding with TCP only\n", osd_num);
} }
else else
{ {
rdma_max_sge = rdma_max_sge < rdma_context->attrx.orig_attr.max_sge
? rdma_max_sge : rdma_context->attrx.orig_attr.max_sge;
fprintf(stderr, "[OSD %ju] RDMA initialized successfully\n", osd_num); fprintf(stderr, "[OSD %ju] RDMA initialized successfully\n", osd_num);
for (msgr_rdma_context_t* rdma_context: rdma_contexts) fcntl(rdma_context->channel->fd, F_SETFL, fcntl(rdma_context->channel->fd, F_GETFL, 0) | O_NONBLOCK);
tfd->set_fd_handler(rdma_context->channel->fd, false, [this](int notify_fd, int epoll_events)
{ {
fcntl(rdma_context->channel->fd, F_SETFL, fcntl(rdma_context->channel->fd, F_GETFL, 0) | O_NONBLOCK); handle_rdma_events();
tfd->set_fd_handler(rdma_context->channel->fd, false, [this, rdma_context](int notify_fd, int epoll_events) });
{ handle_rdma_events();
handle_rdma_events(rdma_context);
});
handle_rdma_events(rdma_context);
}
} }
} }
#endif #endif
if (ringloop)
{
has_sendmsg_zc = ringloop->has_sendmsg_zc();
}
if (ringloop && iothread_count > 0) if (ringloop && iothread_count > 0)
{ {
for (int i = 0; i < iothread_count; i++) for (int i = 0; i < iothread_count; i++)
@ -217,6 +189,7 @@ void osd_messenger_t::init()
op->req = (osd_any_op_t){ op->req = (osd_any_op_t){
.hdr = { .hdr = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = this->next_subop_id++,
.opcode = OSD_OP_PING, .opcode = OSD_OP_PING,
}, },
}; };
@ -274,19 +247,10 @@ osd_messenger_t::~osd_messenger_t()
iothreads.clear(); iothreads.clear();
} }
#ifdef WITH_RDMA #ifdef WITH_RDMA
for (auto rdma_context: rdma_contexts) if (rdma_context)
{ {
delete rdma_context; delete rdma_context;
} }
rdma_contexts.clear();
#endif
#ifdef WITH_RDMACM
if (rdmacm_evch)
{
tfd->set_fd_handler(rdmacm_evch->fd, false, NULL);
rdma_destroy_event_channel(rdmacm_evch);
rdmacm_evch = NULL;
}
#endif #endif
} }
@ -298,14 +262,10 @@ void osd_messenger_t::parse_config(const json11::Json & config)
// RDMA is on by default in RDMA-enabled builds // RDMA is on by default in RDMA-enabled builds
this->use_rdma = config["use_rdma"].bool_value() || config["use_rdma"].uint64_value() != 0; this->use_rdma = config["use_rdma"].bool_value() || config["use_rdma"].uint64_value() != 0;
} }
#ifdef WITH_RDMACM
// Use RDMA CM? (required for iWARP and may be useful for IB)
// FIXME: Only parse during start
this->use_rdmacm = config["use_rdmacm"].bool_value() || config["use_rdmacm"].uint64_value() != 0;
this->disable_tcp = this->use_rdmacm && (config["disable_tcp"].bool_value() || config["disable_tcp"].uint64_value() != 0);
#endif
this->rdma_device = config["rdma_device"].string_value(); this->rdma_device = config["rdma_device"].string_value();
this->rdma_port_num = (uint8_t)config["rdma_port_num"].uint64_value(); this->rdma_port_num = (uint8_t)config["rdma_port_num"].uint64_value();
if (!this->rdma_port_num)
this->rdma_port_num = 1;
if (!config["rdma_gid_index"].is_null()) if (!config["rdma_gid_index"].is_null())
this->rdma_gid_index = (uint8_t)config["rdma_gid_index"].uint64_value(); this->rdma_gid_index = (uint8_t)config["rdma_gid_index"].uint64_value();
this->rdma_mtu = (uint32_t)config["rdma_mtu"].uint64_value(); this->rdma_mtu = (uint32_t)config["rdma_mtu"].uint64_value();
@ -322,6 +282,15 @@ void osd_messenger_t::parse_config(const json11::Json & config)
if (!this->rdma_max_msg || this->rdma_max_msg > 128*1024*1024) if (!this->rdma_max_msg || this->rdma_max_msg > 128*1024*1024)
this->rdma_max_msg = 129*1024; this->rdma_max_msg = 129*1024;
this->rdma_odp = config["rdma_odp"].bool_value(); this->rdma_odp = config["rdma_odp"].bool_value();
std::vector<std::string> mask;
if (config["bind_address"].is_string())
mask.push_back(config["bind_address"].string_value());
else if (config["osd_network"].is_string())
mask.push_back(config["osd_network"].string_value());
else
for (auto v: config["osd_network"].array_items())
mask.push_back(v.string_value());
this->osd_networks = mask;
#endif #endif
if (!osd_num) if (!osd_num)
this->iothread_count = (uint32_t)config["client_iothread_count"].uint64_value(); this->iothread_count = (uint32_t)config["client_iothread_count"].uint64_value();
@ -332,9 +301,6 @@ void osd_messenger_t::parse_config(const json11::Json & config)
this->receive_buffer_size = 65536; this->receive_buffer_size = 65536;
this->use_sync_send_recv = config["use_sync_send_recv"].bool_value() || this->use_sync_send_recv = config["use_sync_send_recv"].bool_value() ||
config["use_sync_send_recv"].uint64_value(); config["use_sync_send_recv"].uint64_value();
this->min_zerocopy_send_size = config["min_zerocopy_send_size"].is_null()
? DEFAULT_MIN_ZEROCOPY_SEND_SIZE
: (int)config["min_zerocopy_send_size"].int64_value();
this->peer_connect_interval = config["peer_connect_interval"].uint64_value(); this->peer_connect_interval = config["peer_connect_interval"].uint64_value();
if (!this->peer_connect_interval) if (!this->peer_connect_interval)
this->peer_connect_interval = 5; this->peer_connect_interval = 5;
@ -348,87 +314,23 @@ void osd_messenger_t::parse_config(const json11::Json & config)
if (!this->osd_ping_timeout) if (!this->osd_ping_timeout)
this->osd_ping_timeout = 5; this->osd_ping_timeout = 5;
this->log_level = config["log_level"].uint64_value(); this->log_level = config["log_level"].uint64_value();
// OSD public & cluster networks
this->osd_networks.clear();
if (config["osd_network"].is_string())
this->osd_networks.push_back(config["osd_network"].string_value());
else
for (auto v: config["osd_network"].array_items())
this->osd_networks.push_back(v.string_value());
this->osd_cluster_networks.clear();
if (config["osd_cluster_network"].is_string())
this->osd_cluster_networks.push_back(config["osd_cluster_network"].string_value());
else
for (auto v: config["osd_cluster_network"].array_items())
this->osd_cluster_networks.push_back(v.string_value());
if (this->osd_cluster_networks.size())
for (auto & net: this->osd_cluster_networks)
for (int i = this->osd_networks.size()-1; i >= 0; i--)
if (this->osd_networks[i] == net)
this->osd_networks.erase(this->osd_networks.begin()+i, this->osd_networks.begin()+i+1);
this->osd_network_masks.clear();
for (auto & netstr: this->osd_networks)
this->osd_network_masks.push_back(cidr_parse(netstr));
this->osd_cluster_network_masks.clear();
for (auto & netstr: this->osd_cluster_networks)
this->osd_cluster_network_masks.push_back(cidr_parse(netstr));
this->all_osd_networks.clear();
this->all_osd_networks.insert(this->all_osd_networks.end(), this->osd_networks.begin(), this->osd_networks.end());
this->all_osd_networks.insert(this->all_osd_networks.end(), this->osd_cluster_networks.begin(), this->osd_cluster_networks.end());
this->all_osd_network_masks.clear();
this->all_osd_network_masks.insert(this->all_osd_network_masks.end(), this->osd_network_masks.begin(), this->osd_network_masks.end());
this->all_osd_network_masks.insert(this->all_osd_network_masks.end(), this->osd_cluster_network_masks.begin(), this->osd_cluster_network_masks.end());
if (!this->osd_networks.size())
{
this->osd_networks = this->osd_cluster_networks;
this->osd_network_masks = this->osd_cluster_network_masks;
}
} }
void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state) void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state)
{ {
if (wanted_peers[peer_osd].raw_address_list != peer_state["addresses"]) if (wanted_peers.find(peer_osd) == wanted_peers.end())
{ {
wanted_peers[peer_osd].raw_address_list = peer_state["addresses"]; wanted_peers[peer_osd] = (osd_wanted_peer_t){
// We are an OSD -> try to select a cluster address .address_list = peer_state["addresses"],
// We are a client -> try to select a public address .port = (int)peer_state["port"].int64_value(),
// OSD only has 1 address -> don't try anything, it's pointless };
// FIXME: Maybe support optional fallback from cluster to public network?
auto & match_masks = (this->osd_num ? osd_cluster_network_masks : osd_network_masks);
if (peer_state["addresses"].array_items().size() > 1 && match_masks.size())
{
json11::Json::array address_list;
for (auto json_addr: peer_state["addresses"].array_items())
{
struct sockaddr_storage addr;
auto ok = string_to_addr(json_addr.string_value(), false, 0, &addr);
if (ok)
{
bool matches = false;
for (auto & mask: match_masks)
{
if (cidr_sockaddr_match(addr, mask))
{
matches = true;
break;
}
}
if (matches)
address_list.push_back(json_addr);
}
}
if (!address_list.size())
address_list = peer_state["addresses"].array_items();
wanted_peers[peer_osd].address_list = address_list;
}
else
wanted_peers[peer_osd].address_list = peer_state["addresses"];
wanted_peers[peer_osd].address_changed = true;
} }
#ifdef WITH_RDMACM else
wanted_peers[peer_osd].rdmacm_port = (int)peer_state["rdmacm_port"].int64_value(); {
#endif wanted_peers[peer_osd].address_list = peer_state["addresses"];
wanted_peers[peer_osd].port = (int)peer_state["port"].int64_value(); wanted_peers[peer_osd].port = (int)peer_state["port"].int64_value();
}
wanted_peers[peer_osd].address_changed = true;
try_connect_peer(peer_osd); try_connect_peer(peer_osd);
} }
@ -453,24 +355,12 @@ void osd_messenger_t::try_connect_peer(uint64_t peer_osd)
wp.cur_addr = wp.address_list[wp.address_index].string_value(); wp.cur_addr = wp.address_list[wp.address_index].string_value();
wp.cur_port = wp.port; wp.cur_port = wp.port;
wp.connecting = true; wp.connecting = true;
#ifdef WITH_RDMACM try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port);
if (use_rdmacm && wp.rdmacm_port)
rdmacm_try_connect_peer(peer_osd, wp.cur_addr.c_str(), wp.rdmacm_port, wp.cur_port);
else
#endif
try_connect_peer_tcp(peer_osd, wp.cur_addr.c_str(), wp.cur_port);
} }
void osd_messenger_t::try_connect_peer_tcp(osd_num_t peer_osd, const char *peer_host, int peer_port) void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port)
{ {
assert(peer_osd != this->osd_num); assert(peer_osd != this->osd_num);
#ifdef WITH_RDMACM
if (disable_tcp)
{
on_connect_peer(peer_osd, -EINVAL);
return;
}
#endif
struct sockaddr_storage addr; struct sockaddr_storage addr;
if (!string_to_addr(peer_host, 0, peer_port, &addr)) if (!string_to_addr(peer_host, 0, peer_port, &addr))
{ {
@ -628,46 +518,29 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
.show_conf = { .show_conf = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = this->next_subop_id++,
.opcode = OSD_OP_SHOW_CONFIG, .opcode = OSD_OP_SHOW_CONFIG,
}, },
}, },
}; };
json11::Json::object payload;
if (osd_num)
{
// Inform that we're OSD <osd_num>
payload["osd_num"] = osd_num;
}
payload["features"] = json11::Json::object{ { "check_sequencing", true } };
#ifdef WITH_RDMA #ifdef WITH_RDMA
if (!use_rdmacm && rdma_contexts.size()) if (rdma_context)
{ {
// Choose the right context for the selected network cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
msgr_rdma_context_t *selected_ctx = choose_rdma_context(cl); if (cl->rdma_conn)
if (!selected_ctx)
{ {
if (log_level > 0) json11::Json payload = json11::Json::object {
fprintf(stderr, "No RDMA context for OSD %ju connection (peer %d), using only TCP\n", cl->osd_num, cl->peer_fd); { "connect_rdma", cl->rdma_conn->addr.to_string() },
} { "rdma_max_msg", cl->rdma_conn->max_msg },
else };
{ std::string payload_str = payload.dump();
cl->rdma_conn = msgr_rdma_connection_t::create(selected_ctx, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg); op->req.show_conf.json_len = payload_str.size();
if (cl->rdma_conn) op->buf = malloc_or_die(payload_str.size());
{ op->iov.push_back(op->buf, payload_str.size());
payload["connect_rdma"] = cl->rdma_conn->addr.to_string(); memcpy(op->buf, payload_str.c_str(), payload_str.size());
payload["rdma_max_msg"] = cl->rdma_conn->max_msg;
}
} }
} }
#endif #endif
if (payload.size())
{
std::string payload_str = json11::Json(payload).dump();
op->req.show_conf.json_len = payload_str.size();
op->buf = malloc_or_die(payload_str.size());
op->iov.push_back(op->buf, payload_str.size());
memcpy(op->buf, payload_str.c_str(), payload_str.size());
}
op->callback = [this, cl](osd_op_t *op) op->callback = [this, cl](osd_op_t *op)
{ {
std::string json_err; std::string json_err;
@ -709,23 +582,29 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
{ {
osd_num_t peer_osd = cl->osd_num; osd_num_t peer_osd = cl->osd_num;
stop_client(op->peer_fd); stop_client(op->peer_fd);
on_connect_peer(peer_osd, -EINVAL); on_connect_peer(peer_osd, -1);
delete op; delete op;
return; return;
} }
#ifdef WITH_RDMA #ifdef WITH_RDMA
if (!use_rdmacm && cl->rdma_conn && config["rdma_address"].is_string()) if (config["rdma_address"].is_string())
{ {
msgr_rdma_address_t addr; msgr_rdma_address_t addr;
if (!msgr_rdma_address_t::from_string(config["rdma_address"].string_value().c_str(), &addr) || if (!msgr_rdma_address_t::from_string(config["rdma_address"].string_value().c_str(), &addr) ||
cl->rdma_conn->connect(&addr) != 0) cl->rdma_conn->connect(&addr) != 0)
{ {
fprintf( fprintf(
stderr, "Failed to connect to OSD %ju (address %s) using RDMA, switching back to TCP\n", stderr, "Failed to connect to OSD %ju (address %s) using RDMA\n",
cl->osd_num, config["rdma_address"].string_value().c_str() cl->osd_num, config["rdma_address"].string_value().c_str()
); );
delete cl->rdma_conn; delete cl->rdma_conn;
cl->rdma_conn = NULL; cl->rdma_conn = NULL;
// FIXME: Keep TCP connection in this case
osd_num_t peer_osd = cl->osd_num;
stop_client(cl->peer_fd);
on_connect_peer(peer_osd, -1);
delete op;
return;
} }
else else
{ {
@ -794,30 +673,9 @@ void osd_messenger_t::accept_connections(int listen_fd)
} }
#ifdef WITH_RDMA #ifdef WITH_RDMA
msgr_rdma_context_t* osd_messenger_t::choose_rdma_context(osd_client_t *cl)
{
// Choose the right context for the selected network
msgr_rdma_context_t *selected_ctx = NULL;
for (auto rdma_ctx: rdma_contexts)
{
if (!rdma_ctx->net_mask.family && !selected_ctx ||
rdma_ctx->net_mask.family && cidr_sockaddr_match(cl->peer_addr, rdma_ctx->net_mask))
{
selected_ctx = rdma_ctx;
}
}
return selected_ctx;
}
bool osd_messenger_t::is_rdma_enabled() bool osd_messenger_t::is_rdma_enabled()
{ {
return rdma_contexts.size() > 0; return rdma_context != NULL;
}
#endif
#ifdef WITH_RDMACM
bool osd_messenger_t::is_use_rdmacm()
{
return use_rdmacm;
} }
#endif #endif
@ -910,7 +768,6 @@ static const char* local_only_params[] = {
"tcp_header_buffer_size", "tcp_header_buffer_size",
"use_rdma", "use_rdma",
"use_sync_send_recv", "use_sync_send_recv",
"min_zerocopy_send_size",
}; };
static const char **local_only_end = local_only_params + (sizeof(local_only_params)/sizeof(local_only_params[0])); static const char **local_only_end = local_only_params + (sizeof(local_only_params)/sizeof(local_only_params[0]));

View File

@ -16,7 +16,6 @@
#include "json11/json11.hpp" #include "json11/json11.hpp"
#include "msgr_op.h" #include "msgr_op.h"
#include "timerfd_manager.h" #include "timerfd_manager.h"
#include "addr_util.h"
#include <ringloop.h> #include <ringloop.h>
#define CL_READ_HDR 1 #define CL_READ_HDR 1
@ -32,8 +31,6 @@
#define VITASTOR_CONFIG_PATH "/etc/vitastor/vitastor.conf" #define VITASTOR_CONFIG_PATH "/etc/vitastor/vitastor.conf"
#define DEFAULT_MIN_ZEROCOPY_SEND_SIZE 32*1024
#define MSGR_SENDP_HDR 1 #define MSGR_SENDP_HDR 1
#define MSGR_SENDP_FREE 2 #define MSGR_SENDP_FREE 2
@ -52,10 +49,10 @@ struct osd_client_t
{ {
int refs = 0; int refs = 0;
sockaddr_storage peer_addr = {}; sockaddr_storage peer_addr;
int peer_port = 0; int peer_port;
int peer_fd = -1; int peer_fd = -1;
int peer_state = 0; int peer_state;
int connect_timeout_id = -1; int connect_timeout_id = -1;
int ping_time_remaining = 0; int ping_time_remaining = 0;
int idle_time_remaining = 0; int idle_time_remaining = 0;
@ -75,15 +72,12 @@ struct osd_client_t
int read_remaining = 0; int read_remaining = 0;
int read_state = 0; int read_state = 0;
osd_op_buf_list_t recv_list; osd_op_buf_list_t recv_list;
uint64_t read_op_id = 1;
bool check_sequencing = false;
// Incoming operations // Incoming operations
std::vector<osd_op_t*> received_ops; std::vector<osd_op_t*> received_ops;
// Outbound operations // Outbound operations
std::map<uint64_t, osd_op_t*> sent_ops; std::map<uint64_t, osd_op_t*> sent_ops;
uint64_t send_op_id = 0;
// PGs dirtied by this client's primary-writes // PGs dirtied by this client's primary-writes
std::set<pool_pg_num_t> dirty_pgs; std::set<pool_pg_num_t> dirty_pgs;
@ -93,25 +87,19 @@ struct osd_client_t
int write_state = 0; int write_state = 0;
std::vector<iovec> send_list, next_send_list; std::vector<iovec> send_list, next_send_list;
std::vector<msgr_sendp_t> outbox, next_outbox; std::vector<msgr_sendp_t> outbox, next_outbox;
std::vector<osd_op_t*> zc_free_list;
~osd_client_t(); ~osd_client_t();
}; };
struct osd_wanted_peer_t struct osd_wanted_peer_t
{ {
json11::Json raw_address_list;
json11::Json address_list; json11::Json address_list;
int port = 0; int port;
// FIXME: Remove separate WITH_RDMACM? time_t last_connect_attempt;
#ifdef WITH_RDMACM bool connecting, address_changed;
int rdmacm_port = 0; int address_index;
#endif
time_t last_connect_attempt = 0;
bool connecting = false, address_changed = false;
int address_index = 0;
std::string cur_addr; std::string cur_addr;
int cur_port = 0; int cur_port;
}; };
struct osd_op_stats_t struct osd_op_stats_t
@ -161,15 +149,6 @@ public:
}; };
#endif #endif
#ifdef WITH_RDMA
struct rdma_event_channel;
struct rdma_cm_id;
struct rdma_cm_event;
struct ibv_context;
struct osd_messenger_t;
struct rdmacm_connecting_t;
#endif
struct osd_messenger_t struct osd_messenger_t
{ {
protected: protected:
@ -182,24 +161,18 @@ protected:
int osd_ping_timeout = 0; int osd_ping_timeout = 0;
int log_level = 0; int log_level = 0;
bool use_sync_send_recv = false; bool use_sync_send_recv = false;
int min_zerocopy_send_size = DEFAULT_MIN_ZEROCOPY_SEND_SIZE;
int iothread_count = 0; int iothread_count = 0;
#ifdef WITH_RDMA #ifdef WITH_RDMA
bool use_rdma = true; bool use_rdma = true;
bool use_rdmacm = false; std::vector<std::string> osd_networks;
bool disable_tcp = false;
std::string rdma_device; std::string rdma_device;
uint64_t rdma_port_num = 1; uint64_t rdma_port_num = 1, rdma_mtu = 0;
int rdma_mtu = 0;
int rdma_gid_index = -1; int rdma_gid_index = -1;
std::vector<msgr_rdma_context_t *> rdma_contexts; msgr_rdma_context_t *rdma_context = NULL;
uint64_t rdma_max_sge = 0, rdma_max_send = 0, rdma_max_recv = 0; uint64_t rdma_max_sge = 0, rdma_max_send = 0, rdma_max_recv = 0;
uint64_t rdma_max_msg = 0; uint64_t rdma_max_msg = 0;
bool rdma_odp = false; bool rdma_odp = false;
rdma_event_channel *rdmacm_evch = NULL;
std::map<rdma_cm_id*, osd_client_t*> rdmacm_connections;
std::map<rdma_cm_id*, rdmacm_connecting_t*> rdmacm_connecting;
#endif #endif
std::vector<msgr_iothread_t*> iothreads; std::vector<msgr_iothread_t*> iothreads;
@ -209,20 +182,14 @@ protected:
std::vector<osd_op_t*> set_immediate_ops; std::vector<osd_op_t*> set_immediate_ops;
public: public:
timerfd_manager_t *tfd = NULL; timerfd_manager_t *tfd;
ring_loop_t *ringloop = NULL; ring_loop_t *ringloop;
bool has_sendmsg_zc = false;
// osd_num_t is only for logging and asserts // osd_num_t is only for logging and asserts
osd_num_t osd_num; osd_num_t osd_num;
uint64_t next_subop_id = 1;
std::map<int, osd_client_t*> clients; std::map<int, osd_client_t*> clients;
std::map<osd_num_t, osd_wanted_peer_t> wanted_peers; std::map<osd_num_t, osd_wanted_peer_t> wanted_peers;
std::map<uint64_t, int> osd_peer_fds; std::map<uint64_t, int> osd_peer_fds;
std::vector<std::string> osd_networks;
std::vector<addr_mask_t> osd_network_masks;
std::vector<std::string> osd_cluster_networks;
std::vector<addr_mask_t> osd_cluster_network_masks;
std::vector<std::string> all_osd_networks;
std::vector<addr_mask_t> all_osd_network_masks;
// op statistics // op statistics
osd_op_stats_t stats, recovery_stats; osd_op_stats_t stats, recovery_stats;
@ -249,18 +216,13 @@ public:
bool is_rdma_enabled(); bool is_rdma_enabled();
bool connect_rdma(int peer_fd, std::string rdma_address, uint64_t client_max_msg); bool connect_rdma(int peer_fd, std::string rdma_address, uint64_t client_max_msg);
#endif #endif
#ifdef WITH_RDMACM
bool is_use_rdmacm();
rdma_cm_id *rdmacm_listen(const std::string & bind_address, int rdmacm_port, int *bound_port, int log_level);
void rdmacm_destroy_listener(rdma_cm_id *listener);
#endif
void inc_op_stats(osd_op_stats_t & stats, uint64_t opcode, timespec & tv_begin, timespec & tv_end, uint64_t len); void inc_op_stats(osd_op_stats_t & stats, uint64_t opcode, timespec & tv_begin, timespec & tv_end, uint64_t len);
void measure_exec(osd_op_t *cur_op); void measure_exec(osd_op_t *cur_op);
protected: protected:
void try_connect_peer(uint64_t osd_num); void try_connect_peer(uint64_t osd_num);
void try_connect_peer_tcp(osd_num_t peer_osd, const char *peer_host, int peer_port); void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port);
void handle_peer_epoll(int peer_fd, int epoll_events); void handle_peer_epoll(int peer_fd, int epoll_events);
void handle_connect_epoll(int peer_fd); void handle_connect_epoll(int peer_fd);
void on_connect_peer(osd_num_t peer_osd, int peer_fd); void on_connect_peer(osd_num_t peer_osd, int peer_fd);
@ -269,7 +231,7 @@ protected:
void cancel_op(osd_op_t *op); void cancel_op(osd_op_t *op);
bool try_send(osd_client_t *cl); bool try_send(osd_client_t *cl);
void handle_send(int result, bool prev, bool more, osd_client_t *cl); void handle_send(int result, osd_client_t *cl);
bool handle_read(int result, osd_client_t *cl); bool handle_read(int result, osd_client_t *cl);
bool handle_read_buffer(osd_client_t *cl, void *curbuf, int remain); bool handle_read_buffer(osd_client_t *cl, void *curbuf, int remain);
@ -285,19 +247,6 @@ protected:
void try_send_rdma_odp(osd_client_t *cl); void try_send_rdma_odp(osd_client_t *cl);
void try_send_rdma_nodp(osd_client_t *cl); void try_send_rdma_nodp(osd_client_t *cl);
bool try_recv_rdma(osd_client_t *cl); bool try_recv_rdma(osd_client_t *cl);
void handle_rdma_events(msgr_rdma_context_t *rdma_context); void handle_rdma_events();
msgr_rdma_context_t* choose_rdma_context(osd_client_t *cl);
#endif
#ifdef WITH_RDMACM
void handle_rdmacm_events();
msgr_rdma_context_t* rdmacm_get_context(ibv_context *verbs);
msgr_rdma_context_t* rdmacm_create_qp(rdma_cm_id *cmid);
void rdmacm_accept(rdma_cm_event *ev);
void rdmacm_try_connect_peer(uint64_t peer_osd, const std::string & addr, int rdmacm_port, int fallback_tcp_port);
void rdmacm_set_conn_timeout(rdmacm_connecting_t *conn);
void rdmacm_on_connect_peer_error(rdma_cm_id *cmid, int res);
void rdmacm_address_resolved(rdma_cm_event *ev);
void rdmacm_route_resolved(rdma_cm_event *ev);
void rdmacm_established(rdma_cm_event *ev);
#endif #endif
}; };

View File

@ -3,35 +3,10 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include "addr_util.h"
#include "msgr_rdma.h" #include "msgr_rdma.h"
#include "messenger.h" #include "messenger.h"
static uint32_t ibv_mtu_to_bytes(ibv_mtu mtu)
{
switch (mtu)
{
case IBV_MTU_256: return 256;
case IBV_MTU_512: return 512;
case IBV_MTU_1024: return 1024;
case IBV_MTU_2048: return 2048;
case IBV_MTU_4096: return 4096;
}
return 4096;
}
static ibv_mtu bytes_to_ibv_mtu(uint32_t mtu)
{
switch (mtu)
{
case 256: return IBV_MTU_256;
case 512: return IBV_MTU_512;
case 1024: return IBV_MTU_1024;
case 2048: return IBV_MTU_2048;
case 4096: return IBV_MTU_4096;
}
return IBV_MTU_4096;
}
std::string msgr_rdma_address_t::to_string() std::string msgr_rdma_address_t::to_string()
{ {
char msg[sizeof "0000:00000000:00000000:00000000000000000000000000000000"]; char msg[sizeof "0000:00000000:00000000:00000000000000000000000000000000"];
@ -63,27 +38,15 @@ msgr_rdma_context_t::~msgr_rdma_context_t()
ibv_dereg_mr(mr); ibv_dereg_mr(mr);
if (pd) if (pd)
ibv_dealloc_pd(pd); ibv_dealloc_pd(pd);
if (context && !is_cm) if (context)
ibv_close_device(context); ibv_close_device(context);
} }
msgr_rdma_connection_t::~msgr_rdma_connection_t() msgr_rdma_connection_t::~msgr_rdma_connection_t()
{ {
ctx->reserve_cqe(-max_send-max_recv); ctx->used_max_cqe -= max_send+max_recv;
#ifdef WITH_RDMACM
if (qp && !cmid)
ibv_destroy_qp(qp);
if (cmid)
{
ctx->cm_refs--;
if (cmid->qp)
rdma_destroy_qp(cmid);
rdma_destroy_id(cmid);
}
#else
if (qp) if (qp)
ibv_destroy_qp(qp); ibv_destroy_qp(qp);
#endif
if (recv_buffers.size()) if (recv_buffers.size())
{ {
for (auto b: recv_buffers) for (auto b: recv_buffers)
@ -114,21 +77,21 @@ static bool is_ipv4_gid(ibv_gid_entry *gidx)
((uint32_t*)gidx->gid.raw)[2] == 0xffff0000); ((uint32_t*)gidx->gid.raw)[2] == 0xffff0000);
} }
static int match_gid(ibv_gid_entry *gidx, const addr_mask_t *networks, int nnet) static bool match_gid(ibv_gid_entry *gidx, addr_mask_t *networks, int nnet)
{ {
if (gidx->gid_type != IBV_GID_TYPE_ROCE_V1 && if (gidx->gid_type != IBV_GID_TYPE_ROCE_V1 &&
gidx->gid_type != IBV_GID_TYPE_ROCE_V2 || gidx->gid_type != IBV_GID_TYPE_ROCE_V2 ||
((uint64_t*)gidx->gid.raw)[0] == 0 && ((uint64_t*)gidx->gid.raw)[0] == 0 &&
((uint64_t*)gidx->gid.raw)[1] == 0) ((uint64_t*)gidx->gid.raw)[1] == 0)
{ {
return -1; return false;
} }
if (is_ipv4_gid(gidx)) if (is_ipv4_gid(gidx))
{ {
for (int i = 0; i < nnet; i++) for (int i = 0; i < nnet; i++)
{ {
if (networks[i].family == AF_INET && cidr_match(*(in_addr*)(gidx->gid.raw+12), networks[i].ipv4, networks[i].bits)) if (networks[i].family == AF_INET && cidr_match(*(in_addr*)(gidx->gid.raw+12), networks[i].ipv4, networks[i].bits))
return i; return true;
} }
} }
else else
@ -136,67 +99,119 @@ static int match_gid(ibv_gid_entry *gidx, const addr_mask_t *networks, int nnet)
for (int i = 0; i < nnet; i++) for (int i = 0; i < nnet; i++)
{ {
if (networks[i].family == AF_INET6 && cidr6_match(*(in6_addr*)gidx->gid.raw, networks[i].ipv6, networks[i].bits)) if (networks[i].family == AF_INET6 && cidr6_match(*(in6_addr*)gidx->gid.raw, networks[i].ipv6, networks[i].bits))
return i; return true;
} }
} }
return -1; return false;
} }
static void log_rdma_dev_port_gid(ibv_device *dev, int ib_port, int gid_index, int mtu, ibv_gid_entry & gidx) struct matched_dev
{
int dev = -1;
int port = -1;
int gid = -1;
bool rocev2 = false;
};
static void log_rdma_dev_port_gid(ibv_device *dev, int ib_port, int gid_index, ibv_gid_entry & gidx)
{ {
bool is4 = ((uint64_t*)gidx.gid.raw)[0] == 0 && ((uint32_t*)gidx.gid.raw)[2] == 0xffff0000; bool is4 = ((uint64_t*)gidx.gid.raw)[0] == 0 && ((uint32_t*)gidx.gid.raw)[2] == 0xffff0000;
char buf[256]; char buf[256];
inet_ntop(is4 ? AF_INET : AF_INET6, is4 ? gidx.gid.raw+12 : gidx.gid.raw, buf, sizeof(buf)); inet_ntop(is4 ? AF_INET : AF_INET6, is4 ? gidx.gid.raw+12 : gidx.gid.raw, buf, sizeof(buf));
fprintf( fprintf(
stderr, "Selected RDMA device %s port %d GID %d - ROCEv%d IPv%d %s, MTU %d\n", stderr, "Auto-selected RDMA device %s port %d GID %d - ROCEv%d IPv%d %s\n",
ibv_get_device_name(dev), ib_port, gid_index, ibv_get_device_name(dev), ib_port, gid_index,
gidx.gid_type == IBV_GID_TYPE_ROCE_V2 ? 2 : 1, is4 ? 4 : 6, buf, mtu gidx.gid_type == IBV_GID_TYPE_ROCE_V2 ? 2 : 1, is4 ? 4 : 6, buf
); );
} }
static int match_port_gid(const std::vector<addr_mask_t> & osd_network_masks, ibv_context *context, static matched_dev match_device(ibv_device **dev_list, addr_mask_t *networks, int nnet, int log_level)
int port_num, int gid_count, int log_level, ibv_gid_entry *best_gidx, int *net_num)
{ {
// Try to find a port with matching address matched_dev best;
int best_gid_idx = -1, res = 0; ibv_device_attr attr;
for (int k = 0; k < gid_count; k++) ibv_port_attr portinfo;
ibv_gid_entry best_gidx;
int res;
bool have_non_roce = false, have_roce = false;
for (int i = 0; dev_list[i]; ++i)
{ {
ibv_gid_entry gidx; auto dev = dev_list[i];
if ((res = ibv_query_gid_ex(context, port_num, k, &gidx, 0)) != 0) ibv_context *context = ibv_open_device(dev_list[i]);
if ((res = ibv_query_device(context, &attr)) != 0)
{ {
if (res != ENODATA) fprintf(stderr, "Couldn't query RDMA device %s for its features: %s\n", ibv_get_device_name(dev_list[i]), strerror(res));
{ goto cleanup;
fprintf(stderr, "Couldn't read RDMA device %s GID index %d: %s\n", ibv_get_device_name(context->device), k, strerror(res));
continue;
}
else
break;
} }
if ((res = match_gid(&gidx, osd_network_masks.data(), osd_network_masks.size())) >= 0) for (int j = 1; j <= attr.phys_port_cnt; j++)
{ {
// Prefer RoCEv2 // Try to find a port with matching address
if (best_gid_idx < 0 || best_gidx->gid_type != IBV_GID_TYPE_ROCE_V2 && gidx.gid_type == IBV_GID_TYPE_ROCE_V2) if ((res = ibv_query_port(context, j, &portinfo)) != 0)
{ {
best_gid_idx = k; fprintf(stderr, "Couldn't get RDMA device %s port %d info: %s\n", ibv_get_device_name(dev), j, strerror(res));
*best_gidx = gidx; goto cleanup;
*net_num = res;
} }
for (int k = 0; k < portinfo.gid_tbl_len; k++)
{
ibv_gid_entry gidx;
if ((res = ibv_query_gid_ex(context, j, k, &gidx, 0)) != 0)
{
if (res != ENODATA)
{
fprintf(stderr, "Couldn't read RDMA device %s GID index %d: %s\n", ibv_get_device_name(dev), k, strerror(res));
goto cleanup;
}
else
break;
}
if (gidx.gid_type != IBV_GID_TYPE_ROCE_V1 &&
gidx.gid_type != IBV_GID_TYPE_ROCE_V2)
have_non_roce = true;
else
have_roce = true;
if (match_gid(&gidx, networks, nnet))
{
// Prefer RoCEv2
if (!best.rocev2)
{
best.dev = i;
best.port = j;
best.gid = k;
best.rocev2 = (gidx.gid_type == IBV_GID_TYPE_ROCE_V2);
best_gidx = gidx;
}
}
}
}
cleanup:
ibv_close_device(context);
if (best.rocev2)
{
break;
} }
} }
return best_gid_idx; if (best.dev >= 0 && log_level > 0)
{
log_rdma_dev_port_gid(dev_list[best.dev], best.port, best.gid, best_gidx);
}
if (best.dev < 0 && have_non_roce && !have_roce)
{
best.dev = -2;
}
return best;
} }
#endif #endif
std::vector<msgr_rdma_context_t*> msgr_rdma_context_t::create_all(const std::vector<addr_mask_t> & osd_network_masks, msgr_rdma_context_t *msgr_rdma_context_t::create(std::vector<std::string> osd_networks, const char *ib_devname, uint8_t ib_port, int gid_index, uint32_t mtu, bool odp, int log_level)
const char *sel_dev_name, int sel_port_num, int sel_gid_index, uint32_t sel_mtu, bool odp, int log_level)
{ {
int res; int res;
std::vector<msgr_rdma_context_t*> ret;
ibv_device **raw_dev_list = NULL;
ibv_device **dev_list = NULL; ibv_device **dev_list = NULL;
ibv_device *single_list[2] = {}; msgr_rdma_context_t *ctx = new msgr_rdma_context_t();
ctx->mtu = mtu;
raw_dev_list = dev_list = ibv_get_device_list(NULL); timespec tv;
clock_gettime(CLOCK_REALTIME, &tv);
srand48(tv.tv_sec*1000000000 + tv.tv_nsec);
dev_list = ibv_get_device_list(NULL);
if (!dev_list || !*dev_list) if (!dev_list || !*dev_list)
{ {
if (errno == -ENOSYS || errno == ENOSYS) if (errno == -ENOSYS || errno == ENOSYS)
@ -213,131 +228,121 @@ std::vector<msgr_rdma_context_t*> msgr_rdma_context_t::create_all(const std::vec
fprintf(stderr, "Failed to get RDMA device list: %s\n", strerror(errno)); fprintf(stderr, "Failed to get RDMA device list: %s\n", strerror(errno));
goto cleanup; goto cleanup;
} }
if (ib_devname)
if (sel_dev_name)
{ {
int i; int i;
for (i = 0; dev_list[i]; ++i) for (i = 0; dev_list[i]; ++i)
if (!strcmp(ibv_get_device_name(dev_list[i]), sel_dev_name)) if (!strcmp(ibv_get_device_name(dev_list[i]), ib_devname))
break; break;
if (!dev_list[i]) ctx->dev = dev_list[i];
if (!ctx->dev)
{ {
fprintf(stderr, "RDMA device %s not found\n", sel_dev_name); fprintf(stderr, "RDMA device %s not found\n", ib_devname);
goto cleanup; goto cleanup;
} }
single_list[0] = dev_list[i]; }
dev_list = single_list; #ifdef IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT
else if (osd_networks.size())
{
std::vector<addr_mask_t> nets;
for (auto & netstr: osd_networks)
{
nets.push_back(cidr_parse(netstr));
}
auto best = match_device(dev_list, nets.data(), nets.size(), log_level);
if (best.dev == -2)
{
best.dev = 0;
if (log_level > 0)
fprintf(stderr, "No RoCE devices found, using first available RDMA device %s\n", ibv_get_device_name(*dev_list));
}
else if (best.dev < 0)
{
if (log_level > 0)
fprintf(stderr, "RDMA device matching osd_network is not found, disabling RDMA\n");
goto cleanup;
}
else
{
ib_port = best.port;
gid_index = best.gid;
}
ctx->dev = dev_list[best.dev];
}
#endif
else
{
ctx->dev = *dev_list;
} }
for (int i = 0; dev_list[i]; ++i) ctx->context = ibv_open_device(ctx->dev);
if (!ctx->context)
{ {
auto dev = dev_list[i]; fprintf(stderr, "Couldn't get RDMA context for %s\n", ibv_get_device_name(ctx->dev));
ibv_context *context = ibv_open_device(dev);
if (!context)
{
fprintf(stderr, "Couldn't get RDMA context for %s\n", ibv_get_device_name(dev));
continue;
}
ibv_device_attr attr;
if ((res = ibv_query_device(context, &attr)) != 0)
{
fprintf(stderr, "Couldn't query RDMA device %s for its features: %s\n", ibv_get_device_name(dev), strerror(res));
goto cleanup_dev;
}
if (sel_port_num && sel_port_num > attr.phys_port_cnt)
{
fprintf(stderr, "RDMA device %s port %d does not exist\n", ibv_get_device_name(dev), sel_port_num);
goto cleanup_dev;
}
for (int port_num = (sel_port_num ? sel_port_num : 1); port_num <= (sel_port_num ? sel_port_num : attr.phys_port_cnt); port_num++)
{
ibv_port_attr portinfo;
if ((res = ibv_query_port(context, port_num, &portinfo)) != 0)
{
fprintf(stderr, "Couldn't get RDMA device %s port %d info: %s\n", ibv_get_device_name(dev), port_num, strerror(res));
continue;
}
if (portinfo.state != IBV_PORT_ACTIVE)
{
continue;
}
if (sel_gid_index >= (int)portinfo.gid_tbl_len)
{
fprintf(stderr, "RDMA device %s port %d GID %d does not exist\n", ibv_get_device_name(dev), port_num, sel_gid_index);
continue;
}
uint32_t port_mtu = sel_mtu ? sel_mtu : ibv_mtu_to_bytes(portinfo.active_mtu);
#ifdef IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT
if (sel_gid_index < 0)
{
ibv_gid_entry best_gidx;
int net_num = 0;
int best_gid_idx = match_port_gid(osd_network_masks, context, port_num, portinfo.gid_tbl_len, log_level, &best_gidx, &net_num);
if (best_gid_idx >= 0)
{
if (log_level > 0)
log_rdma_dev_port_gid(dev, port_num, best_gid_idx, port_mtu, best_gidx);
auto ctx = msgr_rdma_context_t::create(dev, portinfo, port_num, best_gid_idx, port_mtu, odp, log_level);
if (ctx)
{
ctx->net_mask = osd_network_masks[net_num];
ret.push_back(ctx);
}
}
}
else
#endif
{
int best_gid_idx = sel_gid_index >= 0 ? sel_gid_index : 0;
#ifdef IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT
if (log_level > 0)
{
ibv_gid_entry gidx;
ibv_query_gid_ex(context, port_num, best_gid_idx, &gidx, 0);
log_rdma_dev_port_gid(dev, port_num, best_gid_idx, port_mtu, gidx);
}
#endif
auto ctx = msgr_rdma_context_t::create(dev, portinfo, port_num, best_gid_idx, port_mtu, odp, log_level);
if (ctx)
ret.push_back(ctx);
}
}
cleanup_dev:
ibv_close_device(context);
}
cleanup:
if (raw_dev_list)
ibv_free_device_list(raw_dev_list);
return ret;
}
msgr_rdma_context_t *msgr_rdma_context_t::create(ibv_device *dev, ibv_port_attr & portinfo, int ib_port, int gid_index, uint32_t mtu, bool odp, int log_level)
{
msgr_rdma_context_t *ctx = new msgr_rdma_context_t();
ibv_context *context = ibv_open_device(dev);
if (!context)
{
fprintf(stderr, "Couldn't get RDMA context for %s\n", ibv_get_device_name(dev));
goto cleanup; goto cleanup;
} }
ctx->mtu = mtu;
ctx->context = context;
ctx->ib_port = ib_port; ctx->ib_port = ib_port;
ctx->portinfo = portinfo; if ((res = ibv_query_port(ctx->context, ib_port, &ctx->portinfo)) != 0)
{
fprintf(stderr, "Couldn't get RDMA device %s port %d info: %s\n", ibv_get_device_name(ctx->dev), ib_port, strerror(res));
goto cleanup;
}
ctx->my_lid = ctx->portinfo.lid; ctx->my_lid = ctx->portinfo.lid;
if (ctx->portinfo.link_layer != IBV_LINK_LAYER_ETHERNET && !ctx->my_lid) if (ctx->portinfo.link_layer != IBV_LINK_LAYER_ETHERNET && !ctx->my_lid)
{ {
fprintf(stderr, "RDMA device %s must have local LID because it's not Ethernet, but LID is zero\n", ibv_get_device_name(dev)); fprintf(stderr, "RDMA device %s must have local LID because it's not Ethernet, but LID is zero\n", ibv_get_device_name(ctx->dev));
goto cleanup; goto cleanup;
} }
ctx->gid_index = gid_index;
if (ibv_query_gid(ctx->context, ib_port, gid_index, &ctx->my_gid)) #ifdef IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT
if (gid_index != -1)
#endif
{ {
fprintf(stderr, "Couldn't read RDMA device %s GID index %d\n", ibv_get_device_name(dev), gid_index); ctx->gid_index = gid_index < 0 ? 0 : gid_index;
goto cleanup; if (ibv_query_gid(ctx->context, ib_port, gid_index, &ctx->my_gid))
{
fprintf(stderr, "Couldn't read RDMA device %s GID index %d\n", ibv_get_device_name(ctx->dev), gid_index);
goto cleanup;
}
} }
#ifdef IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT
else
{
// Auto-guess GID
ibv_gid_entry best_gidx;
for (int k = 0; k < ctx->portinfo.gid_tbl_len; k++)
{
ibv_gid_entry gidx;
if (ibv_query_gid_ex(ctx->context, ib_port, k, &gidx, 0) != 0)
{
fprintf(stderr, "Couldn't read RDMA device %s GID index %d\n", ibv_get_device_name(ctx->dev), k);
goto cleanup;
}
// Skip empty GID
if (((uint64_t*)gidx.gid.raw)[0] == 0 &&
((uint64_t*)gidx.gid.raw)[1] == 0)
{
continue;
}
// Prefer IPv4 RoCEv2 -> IPv6 RoCEv2 -> IPv4 RoCEv1 -> IPv6 RoCEv1 -> IB
if (gid_index == -1 ||
gidx.gid_type == IBV_GID_TYPE_ROCE_V2 && best_gidx.gid_type != IBV_GID_TYPE_ROCE_V2 ||
gidx.gid_type == IBV_GID_TYPE_ROCE_V1 && best_gidx.gid_type == IBV_GID_TYPE_IB ||
gidx.gid_type == best_gidx.gid_type && is_ipv4_gid(&gidx))
{
gid_index = k;
best_gidx = gidx;
}
}
ctx->gid_index = gid_index = (gid_index == -1 ? 0 : gid_index);
if (log_level > 0)
{
log_rdma_dev_port_gid(ctx->dev, ctx->ib_port, ctx->gid_index, best_gidx);
}
ctx->my_gid = best_gidx.gid;
}
#endif
ctx->pd = ibv_alloc_pd(ctx->context); ctx->pd = ibv_alloc_pd(ctx->context);
if (!ctx->pd) if (!ctx->pd)
@ -346,19 +351,18 @@ msgr_rdma_context_t *msgr_rdma_context_t::create(ibv_device *dev, ibv_port_attr
goto cleanup; goto cleanup;
} }
if (ibv_query_device_ex(ctx->context, NULL, &ctx->attrx))
{ {
fprintf(stderr, "Couldn't query RDMA device for its features\n"); if (ibv_query_device_ex(ctx->context, NULL, &ctx->attrx))
goto cleanup; {
} fprintf(stderr, "Couldn't query RDMA device for its features\n");
goto cleanup;
ctx->odp = odp; }
if (ctx->odp) ctx->odp = odp;
{ if (ctx->odp &&
if (!(ctx->attrx.odp_caps.general_caps & IBV_ODP_SUPPORT) || (!(ctx->attrx.odp_caps.general_caps & IBV_ODP_SUPPORT) ||
!(ctx->attrx.odp_caps.general_caps & IBV_ODP_SUPPORT_IMPLICIT) || !(ctx->attrx.odp_caps.general_caps & IBV_ODP_SUPPORT_IMPLICIT) ||
!(ctx->attrx.odp_caps.per_transport_caps.rc_odp_caps & IBV_ODP_SUPPORT_SEND) || !(ctx->attrx.odp_caps.per_transport_caps.rc_odp_caps & IBV_ODP_SUPPORT_SEND) ||
!(ctx->attrx.odp_caps.per_transport_caps.rc_odp_caps & IBV_ODP_SUPPORT_RECV)) !(ctx->attrx.odp_caps.per_transport_caps.rc_odp_caps & IBV_ODP_SUPPORT_RECV)))
{ {
ctx->odp = false; ctx->odp = false;
if (log_level > 0) if (log_level > 0)
@ -391,43 +395,20 @@ msgr_rdma_context_t *msgr_rdma_context_t::create(ibv_device *dev, ibv_port_attr
goto cleanup; goto cleanup;
} }
if (dev_list)
ibv_free_device_list(dev_list);
return ctx; return ctx;
cleanup: cleanup:
if (context)
ibv_close_device(context);
delete ctx; delete ctx;
if (dev_list)
ibv_free_device_list(dev_list);
return NULL; return NULL;
} }
bool msgr_rdma_context_t::reserve_cqe(int n)
{
this->used_max_cqe += n;
if (this->used_max_cqe > this->max_cqe)
{
// Resize CQ
// Mellanox ConnectX-4 supports up to 4194303 CQEs, so it's fine to put everything into a single CQ
int new_max_cqe = this->max_cqe;
while (this->used_max_cqe > new_max_cqe)
{
new_max_cqe *= 2;
}
if (ibv_resize_cq(this->cq, new_max_cqe) != 0)
{
fprintf(stderr, "Couldn't resize RDMA completion queue to %d entries\n", new_max_cqe);
return false;
}
this->max_cqe = new_max_cqe;
}
return true;
}
msgr_rdma_connection_t *msgr_rdma_connection_t::create(msgr_rdma_context_t *ctx, uint32_t max_send, msgr_rdma_connection_t *msgr_rdma_connection_t::create(msgr_rdma_context_t *ctx, uint32_t max_send,
uint32_t max_recv, uint32_t max_sge, uint32_t max_msg) uint32_t max_recv, uint32_t max_sge, uint32_t max_msg)
{ {
if (!ctx->reserve_cqe(max_send+max_recv))
return NULL;
msgr_rdma_connection_t *conn = new msgr_rdma_connection_t; msgr_rdma_connection_t *conn = new msgr_rdma_connection_t;
max_sge = max_sge > ctx->attrx.orig_attr.max_sge ? ctx->attrx.orig_attr.max_sge : max_sge; max_sge = max_sge > ctx->attrx.orig_attr.max_sge ? ctx->attrx.orig_attr.max_sge : max_sge;
@ -438,6 +419,25 @@ msgr_rdma_connection_t *msgr_rdma_connection_t::create(msgr_rdma_context_t *ctx,
conn->max_sge = max_sge; conn->max_sge = max_sge;
conn->max_msg = max_msg; conn->max_msg = max_msg;
ctx->used_max_cqe += max_send+max_recv;
if (ctx->used_max_cqe > ctx->max_cqe)
{
// Resize CQ
// Mellanox ConnectX-4 supports up to 4194303 CQEs, so it's fine to put everything into a single CQ
int new_max_cqe = ctx->max_cqe;
while (ctx->used_max_cqe > new_max_cqe)
{
new_max_cqe *= 2;
}
if (ibv_resize_cq(ctx->cq, new_max_cqe) != 0)
{
fprintf(stderr, "Couldn't resize RDMA completion queue to %d entries\n", new_max_cqe);
delete conn;
return NULL;
}
ctx->max_cqe = new_max_cqe;
}
ibv_qp_init_attr init_attr = { ibv_qp_init_attr init_attr = {
.send_cq = ctx->cq, .send_cq = ctx->cq,
.recv_cq = ctx->cq, .recv_cq = ctx->cq,
@ -469,10 +469,9 @@ msgr_rdma_connection_t *msgr_rdma_connection_t::create(msgr_rdma_context_t *ctx,
.port_num = ctx->ib_port, .port_num = ctx->ib_port,
}; };
int r = 0; if (ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS))
if ((r = ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS)) != 0)
{ {
fprintf(stderr, "Failed to switch RDMA queue pair to INIT state: %s (code %d)\n", strerror(r), r); fprintf(stderr, "Failed to switch RDMA queue pair to INIT state\n");
delete conn; delete conn;
return NULL; return NULL;
} }
@ -480,12 +479,25 @@ msgr_rdma_connection_t *msgr_rdma_connection_t::create(msgr_rdma_context_t *ctx,
return conn; return conn;
} }
static ibv_mtu mtu_to_ibv_mtu(uint32_t mtu)
{
switch (mtu)
{
case 256: return IBV_MTU_256;
case 512: return IBV_MTU_512;
case 1024: return IBV_MTU_1024;
case 2048: return IBV_MTU_2048;
case 4096: return IBV_MTU_4096;
}
return IBV_MTU_4096;
}
int msgr_rdma_connection_t::connect(msgr_rdma_address_t *dest) int msgr_rdma_connection_t::connect(msgr_rdma_address_t *dest)
{ {
auto conn = this; auto conn = this;
ibv_qp_attr attr = { ibv_qp_attr attr = {
.qp_state = IBV_QPS_RTR, .qp_state = IBV_QPS_RTR,
.path_mtu = bytes_to_ibv_mtu(conn->ctx->mtu), .path_mtu = mtu_to_ibv_mtu(conn->ctx->mtu),
.rq_psn = dest->psn, .rq_psn = dest->psn,
.sq_psn = conn->addr.psn, .sq_psn = conn->addr.psn,
.dest_qp_num = dest->qpn, .dest_qp_num = dest->qpn,
@ -510,19 +522,18 @@ int msgr_rdma_connection_t::connect(msgr_rdma_address_t *dest)
.rnr_retry = 7, .rnr_retry = 7,
}; };
// FIXME No idea if ibv_modify_qp is a blocking operation or not. No idea if it has a timeout and what it is. // FIXME No idea if ibv_modify_qp is a blocking operation or not. No idea if it has a timeout and what it is.
int r; if (ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU |
if ((r = ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER))
IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER)) != 0)
{ {
fprintf(stderr, "Failed to switch RDMA queue pair to RTR (ready-to-receive) state: %s (code %d)\n", strerror(r), r); fprintf(stderr, "Failed to switch RDMA queue pair to RTR (ready-to-receive) state\n");
return -r; return 1;
} }
attr.qp_state = IBV_QPS_RTS; attr.qp_state = IBV_QPS_RTS;
if ((r = ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_TIMEOUT | if (ibv_modify_qp(conn->qp, &attr, IBV_QP_STATE | IBV_QP_TIMEOUT |
IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC)) != 0) IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC))
{ {
fprintf(stderr, "Failed to switch RDMA queue pair to RTS (ready-to-send) state: %s (code %d)\n", strerror(r), r); fprintf(stderr, "Failed to switch RDMA queue pair to RTS (ready-to-send) state\n");
return -r; return 1;
} }
return 0; return 0;
} }
@ -537,15 +548,7 @@ bool osd_messenger_t::connect_rdma(int peer_fd, std::string rdma_address, uint64
{ {
client_max_msg = rdma_max_msg; client_max_msg = rdma_max_msg;
} }
auto cl = clients.at(peer_fd); auto rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, client_max_msg);
msgr_rdma_context_t *selected_ctx = choose_rdma_context(cl);
if (!selected_ctx)
{
if (log_level > 0)
fprintf(stderr, "No RDMA context for peer %d, using only TCP\n", cl->peer_fd);
return false;
}
msgr_rdma_connection_t *rdma_conn = msgr_rdma_connection_t::create(selected_ctx, rdma_max_send, rdma_max_recv, rdma_max_sge, client_max_msg);
if (rdma_conn) if (rdma_conn)
{ {
int r = rdma_conn->connect(&addr); int r = rdma_conn->connect(&addr);
@ -583,7 +586,7 @@ static void try_send_rdma_wr(osd_client_t *cl, ibv_sge *sge, int op_sge)
int err = ibv_post_send(cl->rdma_conn->qp, &wr, &bad_wr); int err = ibv_post_send(cl->rdma_conn->qp, &wr, &bad_wr);
if (err || bad_wr) if (err || bad_wr)
{ {
fprintf(stderr, "RDMA send failed: %s (code %d)\n", strerror(err), err); fprintf(stderr, "RDMA send failed: %s\n", strerror(err));
exit(1); exit(1);
} }
cl->rdma_conn->cur_send++; cl->rdma_conn->cur_send++;
@ -664,9 +667,9 @@ void osd_messenger_t::try_send_rdma_nodp(osd_client_t *cl)
// Allocate send ring buffer, if not yet // Allocate send ring buffer, if not yet
rc->send_out_size = rc->max_msg*rdma_max_send; rc->send_out_size = rc->max_msg*rdma_max_send;
rc->send_out.buf = malloc_or_die(rc->send_out_size); rc->send_out.buf = malloc_or_die(rc->send_out_size);
if (!rc->ctx->odp) if (!rdma_context->odp)
{ {
rc->send_out.mr = ibv_reg_mr(rc->ctx->pd, rc->send_out.buf, rc->send_out_size, 0); rc->send_out.mr = ibv_reg_mr(rdma_context->pd, rc->send_out.buf, rc->send_out_size, 0);
if (!rc->send_out.mr) if (!rc->send_out.mr)
{ {
fprintf(stderr, "Failed to register RDMA memory region: %s\n", strerror(errno)); fprintf(stderr, "Failed to register RDMA memory region: %s\n", strerror(errno));
@ -696,7 +699,7 @@ void osd_messenger_t::try_send_rdma_nodp(osd_client_t *cl)
ibv_sge sge = { ibv_sge sge = {
.addr = (uintptr_t)dst, .addr = (uintptr_t)dst,
.length = (uint32_t)copied, .length = (uint32_t)copied,
.lkey = rc->ctx->odp ? rc->ctx->mr->lkey : rc->send_out.mr->lkey, .lkey = rdma_context->odp ? rdma_context->mr->lkey : rc->send_out.mr->lkey,
}; };
try_send_rdma_wr(cl, &sge, 1); try_send_rdma_wr(cl, &sge, 1);
rc->send_sizes.push_back(copied); rc->send_sizes.push_back(copied);
@ -706,7 +709,7 @@ void osd_messenger_t::try_send_rdma_nodp(osd_client_t *cl)
void osd_messenger_t::try_send_rdma(osd_client_t *cl) void osd_messenger_t::try_send_rdma(osd_client_t *cl)
{ {
if (cl->rdma_conn->ctx->odp) if (rdma_context->odp)
try_send_rdma_odp(cl); try_send_rdma_odp(cl);
else else
try_send_rdma_nodp(cl); try_send_rdma_nodp(cl);
@ -741,9 +744,9 @@ bool osd_messenger_t::try_recv_rdma(osd_client_t *cl)
{ {
msgr_rdma_buf_t b; msgr_rdma_buf_t b;
b.buf = malloc_or_die(rc->max_msg); b.buf = malloc_or_die(rc->max_msg);
if (!rc->ctx->odp) if (!rdma_context->odp)
{ {
b.mr = ibv_reg_mr(rc->ctx->pd, b.buf, rc->max_msg, IBV_ACCESS_LOCAL_WRITE); b.mr = ibv_reg_mr(rdma_context->pd, b.buf, rc->max_msg, IBV_ACCESS_LOCAL_WRITE);
if (!b.mr) if (!b.mr)
{ {
fprintf(stderr, "Failed to register RDMA memory region: %s\n", strerror(errno)); fprintf(stderr, "Failed to register RDMA memory region: %s\n", strerror(errno));
@ -758,7 +761,7 @@ bool osd_messenger_t::try_recv_rdma(osd_client_t *cl)
#define RDMA_EVENTS_AT_ONCE 32 #define RDMA_EVENTS_AT_ONCE 32
void osd_messenger_t::handle_rdma_events(msgr_rdma_context_t *rdma_context) void osd_messenger_t::handle_rdma_events()
{ {
// Request next notification // Request next notification
ibv_cq *ev_cq; ibv_cq *ev_cq;
@ -803,9 +806,6 @@ void osd_messenger_t::handle_rdma_events(msgr_rdma_context_t *rdma_context)
} }
if (!is_send) if (!is_send)
{ {
// Reset OSD ping state - client is obviously alive
cl->ping_time_remaining = 0;
cl->idle_time_remaining = osd_idle_timeout;
rc->cur_recv--; rc->cur_recv--;
if (!handle_read_buffer(cl, rc->recv_buffers[rc->next_recv_buf].buf, wc[i].byte_len)) if (!handle_read_buffer(cl, rc->recv_buffers[rc->next_recv_buf].buf, wc[i].byte_len))
{ {

View File

@ -2,13 +2,9 @@
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) // License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#ifdef WITH_RDMACM
#include <rdma/rdma_cma.h>
#endif
#include <infiniband/verbs.h> #include <infiniband/verbs.h>
#include <string> #include <string>
#include <vector> #include <vector>
#include "addr_util.h"
struct msgr_rdma_address_t struct msgr_rdma_address_t
{ {
@ -24,6 +20,7 @@ struct msgr_rdma_address_t
struct msgr_rdma_context_t struct msgr_rdma_context_t
{ {
ibv_context *context = NULL; ibv_context *context = NULL;
ibv_device *dev = NULL;
ibv_device_attr_ex attrx; ibv_device_attr_ex attrx;
ibv_pd *pd = NULL; ibv_pd *pd = NULL;
bool odp = false; bool odp = false;
@ -38,17 +35,8 @@ struct msgr_rdma_context_t
uint32_t mtu; uint32_t mtu;
int max_cqe = 0; int max_cqe = 0;
int used_max_cqe = 0; int used_max_cqe = 0;
addr_mask_t net_mask = {};
bool is_cm = false;
int cm_refs = 0;
static std::vector<msgr_rdma_context_t*> create_all(const std::vector<addr_mask_t> & osd_network_masks,
const char *sel_dev_name, int sel_port_num, int sel_gid_index, uint32_t sel_mtu, bool odp, int log_level);
static msgr_rdma_context_t *create(ibv_device *dev, ibv_port_attr & portinfo,
int ib_port, int gid_index, uint32_t mtu, bool odp, int log_level);
static msgr_rdma_context_t* create_cm(ibv_context *ctx);
bool reserve_cqe(int n);
static msgr_rdma_context_t *create(std::vector<std::string> osd_networks, const char *ib_devname, uint8_t ib_port, int gid_index, uint32_t mtu, bool odp, int log_level);
~msgr_rdma_context_t(); ~msgr_rdma_context_t();
}; };
@ -62,14 +50,11 @@ struct msgr_rdma_connection_t
{ {
msgr_rdma_context_t *ctx = NULL; msgr_rdma_context_t *ctx = NULL;
ibv_qp *qp = NULL; ibv_qp *qp = NULL;
#ifdef WITH_RDMACM
rdma_cm_id *cmid = NULL;
#endif
msgr_rdma_address_t addr; msgr_rdma_address_t addr;
int max_send = 0, max_recv = 0, max_sge = 0; int max_send = 0, max_recv = 0, max_sge = 0;
int cur_send = 0, cur_recv = 0;
uint64_t max_msg = 0; uint64_t max_msg = 0;
int cur_send = 0, cur_recv = 0;
int send_pos = 0, send_buf_pos = 0; int send_pos = 0, send_buf_pos = 0;
int next_recv_buf = 0; int next_recv_buf = 0;
std::vector<msgr_rdma_buf_t> recv_buffers; std::vector<msgr_rdma_buf_t> recv_buffers;

View File

@ -1,539 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include "msgr_rdma.h"
#include "messenger.h"
struct rdmacm_connecting_t
{
rdma_cm_id *cmid = NULL;
int peer_fd = -1;
osd_num_t peer_osd = 0;
std::string addr;
sockaddr_storage parsed_addr = {};
int rdmacm_port = 0;
int tcp_port = 0;
int timeout_ms = 0;
int timeout_id = -1;
msgr_rdma_context_t *rdma_context = NULL;
};
rdma_cm_id *osd_messenger_t::rdmacm_listen(const std::string & bind_address, int rdmacm_port, int *bound_port, int log_level)
{
sockaddr_storage addr = {};
rdma_cm_id *listener = NULL;
int r = rdma_create_id(rdmacm_evch, &listener, NULL, RDMA_PS_TCP);
if (r != 0)
{
fprintf(stderr, "Failed to create RDMA-CM ID: %s (code %d)\n", strerror(errno), errno);
goto fail;
}
if (!string_to_addr(bind_address, 0, rdmacm_port, &addr))
{
fprintf(stderr, "Server address: %s is not valid\n", bind_address.c_str());
goto fail;
}
r = rdma_bind_addr(listener, (sockaddr*)&addr);
if (r != 0)
{
fprintf(stderr, "Failed to bind RDMA-CM to %s:%d: %s (code %d)\n", bind_address.c_str(), rdmacm_port, strerror(errno), errno);
goto fail;
}
r = rdma_listen(listener, 128);
if (r != 0)
{
fprintf(stderr, "Failed to listen to RDMA-CM address %s:%d: %s (code %d)\n", bind_address.c_str(), rdmacm_port, strerror(errno), errno);
goto fail;
}
if (bound_port)
{
*bound_port = ntohs(rdma_get_src_port(listener));
}
if (log_level > 0)
{
fprintf(stderr, "Listening to RDMA-CM address %s port %d\n", bind_address.c_str(), *bound_port);
}
return listener;
fail:
rdma_destroy_id(listener);
return NULL;
}
void osd_messenger_t::rdmacm_destroy_listener(rdma_cm_id *listener)
{
rdma_destroy_id(listener);
}
void osd_messenger_t::handle_rdmacm_events()
{
// rdma_destroy_id infinitely waits for pthread_cond if called before all events are acked :-(...
std::vector<rdma_cm_event> events_copy;
while (1)
{
rdma_cm_event *ev = NULL;
int r = rdma_get_cm_event(rdmacm_evch, &ev);
if (r != 0)
{
if (errno == EAGAIN || errno == EINTR)
break;
fprintf(stderr, "Failed to get RDMA-CM event: %s (code %d)\n", strerror(errno), errno);
exit(1);
}
// ...so we save a copy of all events EXCEPT connection requests, otherwise they sometimes fail with EVENT_DISCONNECT
if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
{
rdmacm_accept(ev);
}
else
{
events_copy.push_back(*ev);
}
r = rdma_ack_cm_event(ev);
if (r != 0)
{
fprintf(stderr, "Failed to ack (free) RDMA-CM event: %s (code %d)\n", strerror(errno), errno);
exit(1);
}
}
for (auto & evl: events_copy)
{
auto ev = &evl;
if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
{
// Do nothing, handled above
}
else if (ev->event == RDMA_CM_EVENT_CONNECT_ERROR ||
ev->event == RDMA_CM_EVENT_REJECTED ||
ev->event == RDMA_CM_EVENT_DISCONNECTED ||
ev->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
{
auto event_type_name = ev->event == RDMA_CM_EVENT_CONNECT_ERROR ? "RDMA_CM_EVENT_CONNECT_ERROR" : (
ev->event == RDMA_CM_EVENT_REJECTED ? "RDMA_CM_EVENT_REJECTED" : (
ev->event == RDMA_CM_EVENT_DISCONNECTED ? "RDMA_CM_EVENT_DISCONNECTED" : "RDMA_CM_EVENT_DEVICE_REMOVAL"));
auto cli_it = rdmacm_connections.find(ev->id);
if (cli_it != rdmacm_connections.end())
{
fprintf(stderr, "Received %s event for peer %d, closing connection\n",
event_type_name, cli_it->second->peer_fd);
stop_client(cli_it->second->peer_fd);
}
else if (rdmacm_connecting.find(ev->id) != rdmacm_connecting.end())
{
fprintf(stderr, "Received %s event for RDMA-CM OSD %ju connection\n",
event_type_name, rdmacm_connecting[ev->id]->peer_osd);
rdmacm_established(ev);
}
else
{
fprintf(stderr, "Received %s event for an unknown RDMA-CM connection 0x%jx - ignoring\n",
event_type_name, (uint64_t)ev->id);
}
}
else if (ev->event == RDMA_CM_EVENT_ADDR_RESOLVED || ev->event == RDMA_CM_EVENT_ADDR_ERROR)
{
rdmacm_address_resolved(ev);
}
else if (ev->event == RDMA_CM_EVENT_ROUTE_RESOLVED || ev->event == RDMA_CM_EVENT_ROUTE_ERROR)
{
rdmacm_route_resolved(ev);
}
else if (ev->event == RDMA_CM_EVENT_CONNECT_RESPONSE)
{
// Just OK
}
else if (ev->event == RDMA_CM_EVENT_UNREACHABLE || ev->event == RDMA_CM_EVENT_REJECTED)
{
// Handle error
rdmacm_established(ev);
}
else if (ev->event == RDMA_CM_EVENT_ESTABLISHED)
{
rdmacm_established(ev);
}
else if (ev->event == RDMA_CM_EVENT_ADDR_CHANGE || ev->event == RDMA_CM_EVENT_TIMEWAIT_EXIT)
{
// Do nothing
}
else
{
// Other events are unexpected
fprintf(stderr, "Unexpected RDMA-CM event type: %d\n", ev->event);
}
}
}
msgr_rdma_context_t* msgr_rdma_context_t::create_cm(ibv_context *ctx)
{
auto rdma_context = new msgr_rdma_context_t;
rdma_context->is_cm = true;
rdma_context->context = ctx;
rdma_context->pd = ibv_alloc_pd(ctx);
if (!rdma_context->pd)
{
fprintf(stderr, "Couldn't allocate RDMA protection domain\n");
delete rdma_context;
return NULL;
}
rdma_context->odp = false;
rdma_context->channel = ibv_create_comp_channel(rdma_context->context);
if (!rdma_context->channel)
{
fprintf(stderr, "Couldn't create RDMA completion channel\n");
delete rdma_context;
return NULL;
}
rdma_context->max_cqe = 4096;
rdma_context->cq = ibv_create_cq(rdma_context->context, rdma_context->max_cqe, NULL, rdma_context->channel, 0);
if (!rdma_context->cq)
{
fprintf(stderr, "Couldn't create RDMA completion queue\n");
delete rdma_context;
return NULL;
}
if (ibv_query_device_ex(rdma_context->context, NULL, &rdma_context->attrx))
{
fprintf(stderr, "Couldn't query RDMA device for its features\n");
delete rdma_context;
return NULL;
}
return rdma_context;
}
msgr_rdma_context_t* osd_messenger_t::rdmacm_get_context(ibv_context *verbs)
{
// Find the context by device
// We assume that RDMA_CM ev->id->verbs is always the same for the same device (but PD for example isn't)
msgr_rdma_context_t *rdma_context = NULL;
for (auto ctx: rdma_contexts)
{
if (ctx->context == verbs)
{
rdma_context = ctx;
break;
}
}
if (!rdma_context)
{
// Wrap into a new msgr_rdma_context_t
rdma_context = msgr_rdma_context_t::create_cm(verbs);
if (!rdma_context)
return NULL;
fcntl(rdma_context->channel->fd, F_SETFL, fcntl(rdma_context->channel->fd, F_GETFL, 0) | O_NONBLOCK);
tfd->set_fd_handler(rdma_context->channel->fd, false, [this, rdma_context](int notify_fd, int epoll_events)
{
handle_rdma_events(rdma_context);
});
handle_rdma_events(rdma_context);
rdma_contexts.push_back(rdma_context);
}
return rdma_context;
}
msgr_rdma_context_t* osd_messenger_t::rdmacm_create_qp(rdma_cm_id *cmid)
{
auto rdma_context = rdmacm_get_context(cmid->verbs);
if (!rdma_context)
{
return NULL;
}
rdma_context->reserve_cqe(rdma_max_send+rdma_max_recv);
auto max_sge = rdma_max_sge > rdma_context->attrx.orig_attr.max_sge
? rdma_context->attrx.orig_attr.max_sge : rdma_max_sge;
ibv_qp_init_attr init_attr = {
.send_cq = rdma_context->cq,
.recv_cq = rdma_context->cq,
.cap = {
.max_send_wr = (uint32_t)rdma_max_send,
.max_recv_wr = (uint32_t)rdma_max_recv,
.max_send_sge = (uint32_t)max_sge,
.max_recv_sge = (uint32_t)max_sge,
},
.qp_type = IBV_QPT_RC,
};
int r = rdma_create_qp(cmid, rdma_context->pd, &init_attr);
if (r != 0)
{
fprintf(stderr, "Failed to create a queue pair via RDMA-CM: %s (code %d)\n", strerror(errno), errno);
rdma_context->reserve_cqe(-rdma_max_send-rdma_max_recv);
return NULL;
}
return rdma_context;
}
void osd_messenger_t::rdmacm_accept(rdma_cm_event *ev)
{
// Make a fake FD (FIXME: do not use FDs for identifying clients!)
int fake_fd = socket(AF_INET, SOCK_STREAM, 0);
if (fake_fd < 0)
{
fprintf(stderr, "Failed to allocate a fake socket for RDMA-CM client: %s (code %d)\n", strerror(errno), errno);
rdma_destroy_id(ev->id);
return;
}
auto rdma_context = rdmacm_create_qp(ev->id);
if (!rdma_context)
{
rdma_destroy_id(ev->id);
return;
}
// We don't need private_data, RDMA_READ or ATOMIC so use default 1
rdma_conn_param conn_params = {
.responder_resources = 1,
.initiator_depth = 1,
.retry_count = 7,
.rnr_retry_count = 7,
};
if (rdma_accept(ev->id, &conn_params) != 0)
{
fprintf(stderr, "Failed to accept RDMA-CM connection: %s (code %d)\n", strerror(errno), errno);
rdma_context->reserve_cqe(-rdma_max_send-rdma_max_recv);
rdma_destroy_qp(ev->id);
rdma_destroy_id(ev->id);
return;
}
// Wait for RDMA_CM_ESTABLISHED, and enable the connection only after it
auto conn = new rdmacm_connecting_t;
conn->cmid = ev->id;
conn->peer_fd = fake_fd;
conn->parsed_addr = *(sockaddr_storage*)rdma_get_peer_addr(ev->id);
conn->rdma_context = rdma_context;
rdmacm_set_conn_timeout(conn);
rdmacm_connecting[ev->id] = conn;
fprintf(stderr, "[OSD %ju] new client %d: connection from %s via RDMA-CM\n", this->osd_num, conn->peer_fd,
addr_to_string(conn->parsed_addr).c_str());
}
void osd_messenger_t::rdmacm_set_conn_timeout(rdmacm_connecting_t *conn)
{
conn->timeout_ms = peer_connect_timeout*1000;
if (peer_connect_timeout > 0)
{
conn->timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, cmid = conn->cmid](int timer_id)
{
auto conn = rdmacm_connecting.at(cmid);
conn->timeout_id = -1;
if (conn->peer_osd)
fprintf(stderr, "RDMA-CM connection to %s timed out\n", conn->addr.c_str());
else
fprintf(stderr, "Incoming RDMA-CM connection from %s timed out\n", addr_to_string(conn->parsed_addr).c_str());
rdmacm_on_connect_peer_error(cmid, -EPIPE);
});
}
}
void osd_messenger_t::rdmacm_on_connect_peer_error(rdma_cm_id *cmid, int res)
{
auto conn = rdmacm_connecting.at(cmid);
auto addr = conn->addr;
auto tcp_port = conn->tcp_port;
auto peer_osd = conn->peer_osd;
if (conn->timeout_id >= 0)
tfd->clear_timer(conn->timeout_id);
if (conn->peer_fd >= 0)
close(conn->peer_fd);
if (conn->rdma_context)
conn->rdma_context->reserve_cqe(-rdma_max_send-rdma_max_recv);
if (conn->cmid)
{
if (conn->cmid->qp)
rdma_destroy_qp(conn->cmid);
rdma_destroy_id(conn->cmid);
}
rdmacm_connecting.erase(cmid);
delete conn;
if (peer_osd)
{
if (!disable_tcp)
{
// Fall back to TCP instead of just reporting the error to on_connect_peer()
try_connect_peer_tcp(peer_osd, addr.c_str(), tcp_port);
}
else
{
// TCP is disabled
on_connect_peer(peer_osd, res == 0 ? -EINVAL : (res > 0 ? -res : res));
}
}
}
void osd_messenger_t::rdmacm_try_connect_peer(uint64_t peer_osd, const std::string & addr, int rdmacm_port, int fallback_tcp_port)
{
struct sockaddr_storage sa = {};
if (!string_to_addr(addr, false, rdmacm_port, &sa))
{
fprintf(stderr, "Address %s is invalid\n", addr.c_str());
on_connect_peer(peer_osd, -EINVAL);
return;
}
rdma_cm_id *cmid = NULL;
if (rdma_create_id(rdmacm_evch, &cmid, NULL, RDMA_PS_TCP) != 0)
{
int res = -errno;
fprintf(stderr, "Failed to create RDMA-CM ID: %s (code %d), using TCP\n", strerror(errno), errno);
if (!disable_tcp)
try_connect_peer_tcp(peer_osd, addr.c_str(), fallback_tcp_port);
else
on_connect_peer(peer_osd, res);
return;
}
// Make a fake FD (FIXME: do not use FDs for identifying clients!)
int fake_fd = socket(AF_INET, SOCK_STREAM, 0);
if (fake_fd < 0)
{
int res = -errno;
rdma_destroy_id(cmid);
// Can't create socket, pointless to try TCP
on_connect_peer(peer_osd, res);
return;
}
if (log_level > 0)
fprintf(stderr, "Trying to connect to OSD %ju at %s:%d via RDMA-CM\n", peer_osd, addr.c_str(), rdmacm_port);
auto conn = new rdmacm_connecting_t;
rdmacm_connecting[cmid] = conn;
conn->cmid = cmid;
conn->peer_fd = fake_fd;
conn->peer_osd = peer_osd;
conn->addr = addr;
conn->parsed_addr = sa;
conn->rdmacm_port = rdmacm_port;
conn->tcp_port = fallback_tcp_port;
rdmacm_set_conn_timeout(conn);
if (rdma_resolve_addr(cmid, NULL, (sockaddr*)&conn->parsed_addr, conn->timeout_ms) != 0)
{
auto res = -errno;
// ENODEV means that the client doesn't have an RDMA device for this address
if (res != -ENODEV || log_level > 0)
fprintf(stderr, "Failed to resolve address %s via RDMA-CM: %s (code %d)\n", addr.c_str(), strerror(errno), errno);
rdmacm_on_connect_peer_error(cmid, res);
return;
}
}
void osd_messenger_t::rdmacm_address_resolved(rdma_cm_event *ev)
{
auto cmid = ev->id;
auto conn_it = rdmacm_connecting.find(cmid);
if (conn_it == rdmacm_connecting.end())
{
// Silently ignore unknown IDs
return;
}
auto conn = conn_it->second;
if (ev->event != RDMA_CM_EVENT_ADDR_RESOLVED || ev->status != 0)
{
fprintf(stderr, "Failed to resolve address %s via RDMA-CM: %s (code %d)\n", conn->addr.c_str(),
ev->status > 0 ? "unknown error" : strerror(-ev->status), ev->status);
rdmacm_on_connect_peer_error(cmid, ev->status);
return;
}
auto rdma_context = rdmacm_create_qp(cmid);
if (!rdma_context)
{
rdmacm_on_connect_peer_error(cmid, -EIO);
return;
}
conn->rdma_context = rdma_context;
if (rdma_resolve_route(cmid, conn->timeout_ms) != 0)
{
int res = -errno;
fprintf(stderr, "Failed to resolve route to %s via RDMA-CM: %s (code %d)\n", conn->addr.c_str(), strerror(errno), errno);
rdmacm_on_connect_peer_error(cmid, res);
return;
}
}
void osd_messenger_t::rdmacm_route_resolved(rdma_cm_event *ev)
{
auto cmid = ev->id;
auto conn_it = rdmacm_connecting.find(cmid);
if (conn_it == rdmacm_connecting.end())
{
// Silently ignore unknown IDs
return;
}
auto conn = conn_it->second;
if (ev->event != RDMA_CM_EVENT_ROUTE_RESOLVED || ev->status != 0)
{
fprintf(stderr, "Failed to resolve route to %s via RDMA-CM: %s (code %d)\n", conn->addr.c_str(),
ev->status > 0 ? "unknown error" : strerror(-ev->status), ev->status);
rdmacm_on_connect_peer_error(cmid, ev->status);
return;
}
// We don't need private_data, RDMA_READ or ATOMIC so use default 1
rdma_conn_param conn_params = {
.responder_resources = 1,
.initiator_depth = 1,
.retry_count = 7,
.rnr_retry_count = 7,
};
if (rdma_connect(cmid, &conn_params) != 0)
{
int res = -errno;
fprintf(stderr, "Failed to connect to %s:%d via RDMA-CM: %s (code %d)\n", conn->addr.c_str(), conn->rdmacm_port, strerror(errno), errno);
rdmacm_on_connect_peer_error(cmid, res);
return;
}
}
void osd_messenger_t::rdmacm_established(rdma_cm_event *ev)
{
auto cmid = ev->id;
auto conn_it = rdmacm_connecting.find(cmid);
if (conn_it == rdmacm_connecting.end())
{
// Silently ignore unknown IDs
return;
}
auto conn = conn_it->second;
auto peer_osd = conn->peer_osd;
if (ev->event != RDMA_CM_EVENT_ESTABLISHED || ev->status != 0)
{
fprintf(stderr, "Failed to connect to %s:%d via RDMA-CM: %s (code %d)\n", conn->addr.c_str(), conn->rdmacm_port,
ev->status > 0 ? "unknown error" : strerror(-ev->status), ev->status);
rdmacm_on_connect_peer_error(cmid, ev->status);
return;
}
// Wrap into a new msgr_rdma_connection_t
msgr_rdma_connection_t *rc = new msgr_rdma_connection_t;
rc->ctx = conn->rdma_context;
rc->ctx->cm_refs++; // FIXME now unused, count also connecting_t's when used
rc->max_send = rdma_max_send;
rc->max_recv = rdma_max_recv;
rc->max_sge = rdma_max_sge > rc->ctx->attrx.orig_attr.max_sge
? rc->ctx->attrx.orig_attr.max_sge : rdma_max_sge;
rc->max_msg = rdma_max_msg;
rc->cmid = conn->cmid;
rc->qp = conn->cmid->qp;
// And an osd_client_t
auto cl = new osd_client_t();
cl->peer_addr = conn->parsed_addr;
cl->peer_port = conn->rdmacm_port;
cl->peer_fd = conn->peer_fd;
cl->peer_state = PEER_RDMA;
cl->connect_timeout_id = -1;
cl->osd_num = peer_osd;
cl->in_buf = malloc_or_die(receive_buffer_size);
cl->rdma_conn = rc;
clients[conn->peer_fd] = cl;
if (conn->timeout_id >= 0)
{
tfd->clear_timer(conn->timeout_id);
}
delete conn;
rdmacm_connecting.erase(cmid);
rdmacm_connections[cmid] = cl;
if (log_level > 0 && peer_osd)
{
fprintf(stderr, "Successfully connected with OSD %ju using RDMA-CM\n", peer_osd);
}
// Add initial receive request(s)
try_recv_rdma(cl);
if (peer_osd)
{
check_peer_config(cl);
}
}

View File

@ -214,7 +214,6 @@ bool osd_messenger_t::handle_read_buffer(osd_client_t *cl, void *curbuf, int rem
bool osd_messenger_t::handle_finished_read(osd_client_t *cl) bool osd_messenger_t::handle_finished_read(osd_client_t *cl)
{ {
// Reset OSD ping state
cl->ping_time_remaining = 0; cl->ping_time_remaining = 0;
cl->idle_time_remaining = osd_idle_timeout; cl->idle_time_remaining = osd_idle_timeout;
cl->recv_list.reset(); cl->recv_list.reset();
@ -223,19 +222,7 @@ bool osd_messenger_t::handle_finished_read(osd_client_t *cl)
if (cl->read_op->req.hdr.magic == SECONDARY_OSD_REPLY_MAGIC) if (cl->read_op->req.hdr.magic == SECONDARY_OSD_REPLY_MAGIC)
return handle_reply_hdr(cl); return handle_reply_hdr(cl);
else if (cl->read_op->req.hdr.magic == SECONDARY_OSD_OP_MAGIC) else if (cl->read_op->req.hdr.magic == SECONDARY_OSD_OP_MAGIC)
{
if (cl->check_sequencing)
{
if (cl->read_op->req.hdr.id != cl->read_op_id)
{
fprintf(stderr, "Warning: operation sequencing is broken on client %d, stopping client\n", cl->peer_fd);
stop_client(cl->peer_fd);
return false;
}
cl->read_op_id++;
}
handle_op_hdr(cl); handle_op_hdr(cl);
}
else else
{ {
fprintf(stderr, "Received garbage: magic=%jx id=%ju opcode=%jx from %d\n", cl->read_op->req.hdr.magic, cl->read_op->req.hdr.id, cl->read_op->req.hdr.opcode, cl->peer_fd); fprintf(stderr, "Received garbage: magic=%jx id=%ju opcode=%jx from %d\n", cl->read_op->req.hdr.magic, cl->read_op->req.hdr.id, cl->read_op->req.hdr.opcode, cl->peer_fd);

View File

@ -14,7 +14,6 @@ void osd_messenger_t::outbox_push(osd_op_t *cur_op)
if (cur_op->op_type == OSD_OP_OUT) if (cur_op->op_type == OSD_OP_OUT)
{ {
clock_gettime(CLOCK_REALTIME, &cur_op->tv_begin); clock_gettime(CLOCK_REALTIME, &cur_op->tv_begin);
cur_op->req.hdr.id = ++cl->send_op_id;
} }
else else
{ {
@ -188,7 +187,6 @@ bool osd_messenger_t::try_send(osd_client_t *cl)
{ {
return true; return true;
} }
assert(cl->peer_state != PEER_RDMA);
if (ringloop && !use_sync_send_recv) if (ringloop && !use_sync_send_recv)
{ {
auto iothread = iothreads.size() ? iothreads[peer_fd % iothreads.size()] : NULL; auto iothread = iothreads.size() ? iothreads[peer_fd % iothreads.size()] : NULL;
@ -204,24 +202,8 @@ bool osd_messenger_t::try_send(osd_client_t *cl)
cl->write_msg.msg_iovlen = cl->send_list.size() < IOV_MAX ? cl->send_list.size() : IOV_MAX; cl->write_msg.msg_iovlen = cl->send_list.size() < IOV_MAX ? cl->send_list.size() : IOV_MAX;
cl->refs++; cl->refs++;
ring_data_t* data = ((ring_data_t*)sqe->user_data); ring_data_t* data = ((ring_data_t*)sqe->user_data);
data->callback = [this, cl](ring_data_t *data) { handle_send(data->res, data->prev, data->more, cl); }; data->callback = [this, cl](ring_data_t *data) { handle_send(data->res, cl); };
bool use_zc = has_sendmsg_zc && min_zerocopy_send_size >= 0; my_uring_prep_sendmsg(sqe, peer_fd, &cl->write_msg, 0);
if (use_zc && min_zerocopy_send_size > 0)
{
size_t avg_size = 0;
for (size_t i = 0; i < cl->write_msg.msg_iovlen; i++)
avg_size += cl->write_msg.msg_iov[i].iov_len;
if (avg_size/cl->write_msg.msg_iovlen < min_zerocopy_send_size)
use_zc = false;
}
if (use_zc)
{
my_uring_prep_sendmsg_zc(sqe, peer_fd, &cl->write_msg, MSG_WAITALL);
}
else
{
my_uring_prep_sendmsg(sqe, peer_fd, &cl->write_msg, MSG_WAITALL);
}
if (iothread) if (iothread)
{ {
iothread->add_sqe(sqe_local); iothread->add_sqe(sqe_local);
@ -237,7 +219,7 @@ bool osd_messenger_t::try_send(osd_client_t *cl)
{ {
result = -errno; result = -errno;
} }
handle_send(result, false, false, cl); handle_send(result, cl);
} }
return true; return true;
} }
@ -257,16 +239,10 @@ void osd_messenger_t::send_replies()
write_ready_clients.clear(); write_ready_clients.clear();
} }
void osd_messenger_t::handle_send(int result, bool prev, bool more, osd_client_t *cl) void osd_messenger_t::handle_send(int result, osd_client_t *cl)
{ {
if (!prev) cl->write_msg.msg_iovlen = 0;
{ cl->refs--;
cl->write_msg.msg_iovlen = 0;
}
if (!more)
{
cl->refs--;
}
if (cl->peer_state == PEER_STOPPED) if (cl->peer_state == PEER_STOPPED)
{ {
if (cl->refs <= 0) if (cl->refs <= 0)
@ -284,16 +260,6 @@ void osd_messenger_t::handle_send(int result, bool prev, bool more, osd_client_t
} }
if (result >= 0) if (result >= 0)
{ {
if (prev)
{
// Second notification - only free a batch of postponed ops
int i = 0;
for (; i < cl->zc_free_list.size() && cl->zc_free_list[i]; i++)
delete cl->zc_free_list[i];
if (i > 0)
cl->zc_free_list.erase(cl->zc_free_list.begin(), cl->zc_free_list.begin()+i+1);
return;
}
int done = 0; int done = 0;
while (result > 0 && done < cl->send_list.size()) while (result > 0 && done < cl->send_list.size())
{ {
@ -303,10 +269,7 @@ void osd_messenger_t::handle_send(int result, bool prev, bool more, osd_client_t
if (cl->outbox[done].flags & MSGR_SENDP_FREE) if (cl->outbox[done].flags & MSGR_SENDP_FREE)
{ {
// Reply fully sent // Reply fully sent
if (more) delete cl->outbox[done].op;
cl->zc_free_list.push_back(cl->outbox[done].op);
else
delete cl->outbox[done].op;
} }
result -= iov.iov_len; result -= iov.iov_len;
done++; done++;
@ -318,12 +281,6 @@ void osd_messenger_t::handle_send(int result, bool prev, bool more, osd_client_t
break; break;
} }
} }
if (more)
{
auto expected = cl->send_list.size() < IOV_MAX ? cl->send_list.size() : IOV_MAX;
assert(done == expected);
cl->zc_free_list.push_back(NULL); // end marker
}
if (done > 0) if (done > 0)
{ {
cl->send_list.erase(cl->send_list.begin(), cl->send_list.begin()+done); cl->send_list.erase(cl->send_list.begin(), cl->send_list.begin()+done);

View File

@ -1194,7 +1194,7 @@ protected:
} }
else else
{ {
if (cur_op->opcode == OSD_OP_WRITE && !inode && watch->cfg.readonly) if (cur_op->opcode == OSD_OP_WRITE && watch->cfg.readonly)
{ {
cur_op->retval = -EROFS; cur_op->retval = -EROFS;
std::function<void(cluster_op_t*)>(cur_op->callback)(cur_op); std::function<void(cluster_op_t*)>(cur_op->callback)(cur_op);

View File

@ -294,9 +294,7 @@ static void coroutine_fn vitastor_co_get_metadata(VitastorRPC *task)
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
vitastor_c_watch_inode(client->proxy, client->image, vitastor_co_generic_cb, task); vitastor_c_watch_inode(client->proxy, client->image, vitastor_co_generic_cb, task);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_schedule_uring_handler(client); vitastor_schedule_uring_handler(client);
#endif
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
while (!task->complete) while (!task->complete)
@ -568,22 +566,6 @@ static int vitastor_file_open(BlockDriverState *bs, QDict *options, int flags, E
static void vitastor_close(BlockDriverState *bs) static void vitastor_close(BlockDriverState *bs)
{ {
VitastorClient *client = bs->opaque; VitastorClient *client = bs->opaque;
if (client->uring_eventfd >= 0)
{
// clear the eventfd handler
universal_aio_set_fd_handler(client->ctx, client->uring_eventfd, NULL, NULL, NULL);
int wait_bh = 0;
qemu_mutex_lock(&client->mutex);
// clear uring_eventfd itself to prevent future scheduling of new B/H
client->uring_eventfd = -1;
wait_bh = client->bh_uring_scheduled;
qemu_mutex_unlock(&client->mutex);
if (wait_bh)
{
// wait until existing scheduled B/H is ran
BDRV_POLL_WHILE(bs, client->bh_uring_scheduled);
}
}
vitastor_c_destroy(client->proxy); vitastor_c_destroy(client->proxy);
if (client->fds) if (client->fds)
{ {
@ -767,9 +749,7 @@ static int coroutine_fn vitastor_co_preadv(BlockDriverState *bs,
uint64_t inode = client->watch ? vitastor_c_inode_get_num(client->watch) : client->inode; uint64_t inode = client->watch ? vitastor_c_inode_get_num(client->watch) : client->inode;
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
vitastor_c_read(client->proxy, inode, offset, bytes, iov->iov, iov->niov, vitastor_co_read_cb, &task); vitastor_c_read(client->proxy, inode, offset, bytes, iov->iov, iov->niov, vitastor_co_read_cb, &task);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_schedule_uring_handler(client); vitastor_schedule_uring_handler(client);
#endif
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
while (!task.complete) while (!task.complete)
@ -803,9 +783,7 @@ static int coroutine_fn vitastor_co_pwritev(BlockDriverState *bs,
uint64_t inode = client->watch ? vitastor_c_inode_get_num(client->watch) : client->inode; uint64_t inode = client->watch ? vitastor_c_inode_get_num(client->watch) : client->inode;
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
vitastor_c_write(client->proxy, inode, offset, bytes, 0, iov->iov, iov->niov, vitastor_co_generic_cb, &task); vitastor_c_write(client->proxy, inode, offset, bytes, 0, iov->iov, iov->niov, vitastor_co_generic_cb, &task);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_schedule_uring_handler(client); vitastor_schedule_uring_handler(client);
#endif
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
while (!task.complete) while (!task.complete)
@ -885,9 +863,7 @@ static int coroutine_fn vitastor_co_block_status(
task.bitmap = client->last_bitmap = NULL; task.bitmap = client->last_bitmap = NULL;
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
vitastor_c_read_bitmap(client->proxy, task.inode, task.offset, task.len, !client->skip_parents, vitastor_co_read_bitmap_cb, &task); vitastor_c_read_bitmap(client->proxy, task.inode, task.offset, task.len, !client->skip_parents, vitastor_co_read_bitmap_cb, &task);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_schedule_uring_handler(client); vitastor_schedule_uring_handler(client);
#endif
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
while (!task.complete) while (!task.complete)
{ {
@ -974,9 +950,7 @@ static int coroutine_fn vitastor_co_flush(BlockDriverState *bs)
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
vitastor_c_sync(client->proxy, vitastor_co_generic_cb, &task); vitastor_c_sync(client->proxy, vitastor_co_generic_cb, &task);
#if !defined VITASTOR_C_API_VERSION || VITASTOR_C_API_VERSION < 5
vitastor_schedule_uring_handler(client); vitastor_schedule_uring_handler(client);
#endif
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
while (!task.complete) while (!task.complete)

View File

@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: Vitastor Name: Vitastor
Description: Vitastor client library Description: Vitastor client library
Version: 2.1.0 Version: 1.11.0
Libs: -L${libdir} -lvitastor_client Libs: -L${libdir} -lvitastor_client
Cflags: -I${includedir} Cflags: -I${includedir}

View File

@ -127,7 +127,6 @@ vitastor_c *vitastor_c_create_qemu_uring(QEMUSetFDHandler *aio_set_fd_handler, v
auto self = vitastor_c_create_qemu_common(aio_set_fd_handler, aio_context); auto self = vitastor_c_create_qemu_common(aio_set_fd_handler, aio_context);
self->ringloop = ringloop; self->ringloop = ringloop;
self->cli = new cluster_client_t(self->ringloop, self->tfd, cfg_json); self->cli = new cluster_client_t(self->ringloop, self->tfd, cfg_json);
ringloop->loop();
return self; return self;
} }
@ -151,7 +150,6 @@ vitastor_c *vitastor_c_create_uring(const char *config_path, const char *etcd_ho
self->ringloop = ringloop; self->ringloop = ringloop;
self->epmgr = new epoll_manager_t(self->ringloop); self->epmgr = new epoll_manager_t(self->ringloop);
self->cli = new cluster_client_t(self->ringloop, self->epmgr->tfd, cfg_json); self->cli = new cluster_client_t(self->ringloop, self->epmgr->tfd, cfg_json);
ringloop->loop();
return self; return self;
} }
@ -185,7 +183,6 @@ vitastor_c *vitastor_c_create_uring_json(const char **options, int options_len)
self->ringloop = ringloop; self->ringloop = ringloop;
self->epmgr = new epoll_manager_t(self->ringloop); self->epmgr = new epoll_manager_t(self->ringloop);
self->cli = new cluster_client_t(self->ringloop, self->epmgr->tfd, cfg_json); self->cli = new cluster_client_t(self->ringloop, self->epmgr->tfd, cfg_json);
ringloop->loop();
return self; return self;
} }
@ -231,10 +228,6 @@ void vitastor_c_on_ready(vitastor_c *client, VitastorIOHandler cb, void *opaque)
{ {
cb(opaque, 0); cb(opaque, 0);
}); });
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_uring_wait_ready(vitastor_c *client) void vitastor_c_uring_wait_ready(vitastor_c *client)
@ -291,10 +284,6 @@ void vitastor_c_read(vitastor_c *client, uint64_t inode, uint64_t offset, uint64
delete op; delete op;
}; };
client->cli->execute(op); client->cli->execute(op);
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_write(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, uint64_t check_version, void vitastor_c_write(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, uint64_t check_version,
@ -316,10 +305,6 @@ void vitastor_c_write(vitastor_c *client, uint64_t inode, uint64_t offset, uint6
delete op; delete op;
}; };
client->cli->execute(op); client->cli->execute(op);
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_delete(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, uint64_t check_version, void vitastor_c_delete(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, uint64_t check_version,
@ -337,10 +322,6 @@ void vitastor_c_delete(vitastor_c *client, uint64_t inode, uint64_t offset, uint
delete op; delete op;
}; };
client->cli->execute(op); client->cli->execute(op);
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_read_bitmap(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, void vitastor_c_read_bitmap(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len,
@ -363,10 +344,6 @@ void vitastor_c_read_bitmap(vitastor_c *client, uint64_t inode, uint64_t offset,
delete op; delete op;
}; };
client->cli->execute(op); client->cli->execute(op);
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_sync(vitastor_c *client, VitastorIOHandler cb, void *opaque) void vitastor_c_sync(vitastor_c *client, VitastorIOHandler cb, void *opaque)
@ -379,10 +356,6 @@ void vitastor_c_sync(vitastor_c *client, VitastorIOHandler cb, void *opaque)
delete op; delete op;
}; };
client->cli->execute(op); client->cli->execute(op);
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_watch_inode(vitastor_c *client, char *image, VitastorIOHandler cb, void *opaque) void vitastor_c_watch_inode(vitastor_c *client, char *image, VitastorIOHandler cb, void *opaque)
@ -392,10 +365,6 @@ void vitastor_c_watch_inode(vitastor_c *client, char *image, VitastorIOHandler c
auto watch = client->cli->st_cli.watch_inode(std::string(image)); auto watch = client->cli->st_cli.watch_inode(std::string(image));
cb(opaque, (long)watch); cb(opaque, (long)watch);
}); });
if (client->ringloop)
{
client->ringloop->loop();
}
} }
void vitastor_c_close_watch(vitastor_c *client, void *handle) void vitastor_c_close_watch(vitastor_c *client, void *handle)

View File

@ -7,7 +7,7 @@
#define VITASTOR_QEMU_PROXY_H #define VITASTOR_QEMU_PROXY_H
// C API wrapper version // C API wrapper version
#define VITASTOR_C_API_VERSION 5 #define VITASTOR_C_API_VERSION 4
#ifndef POOL_ID_BITS #ifndef POOL_ID_BITS
#define POOL_ID_BITS 16 #define POOL_ID_BITS 16

Some files were not shown because too many files have changed in this diff Show More