forked from vitalif/vitastor
Compare commits
1 Commits
separate-d
...
sec_osd_ms
Author | SHA1 | Date | |
---|---|---|---|
68d4f2a481 |
@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8)
|
|||||||
|
|
||||||
project(vitastor)
|
project(vitastor)
|
||||||
|
|
||||||
set(VERSION "0.6.11")
|
set(VERSION "0.6.10")
|
||||||
|
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
VERSION ?= v0.6.11
|
VERSION ?= v0.6.10
|
||||||
|
|
||||||
all: build push
|
all: build push
|
||||||
|
|
||||||
|
@@ -49,7 +49,7 @@ spec:
|
|||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: vitalif/vitastor-csi:v0.6.11
|
image: vitalif/vitastor-csi:v0.6.10
|
||||||
args:
|
args:
|
||||||
- "--node=$(NODE_ID)"
|
- "--node=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
@@ -116,7 +116,7 @@ spec:
|
|||||||
privileged: true
|
privileged: true
|
||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
image: vitalif/vitastor-csi:v0.6.11
|
image: vitalif/vitastor-csi:v0.6.10
|
||||||
args:
|
args:
|
||||||
- "--node=$(NODE_ID)"
|
- "--node=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
@@ -5,7 +5,7 @@ package vitastor
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
vitastorCSIDriverName = "csi.vitastor.io"
|
vitastorCSIDriverName = "csi.vitastor.io"
|
||||||
vitastorCSIDriverVersion = "0.6.11"
|
vitastorCSIDriverVersion = "0.6.10"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config struct fills the parameters of request or user input
|
// Config struct fills the parameters of request or user input
|
||||||
|
2
debian/changelog
vendored
2
debian/changelog
vendored
@@ -1,4 +1,4 @@
|
|||||||
vitastor (0.6.11-1) unstable; urgency=medium
|
vitastor (0.6.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
* RDMA support
|
* RDMA support
|
||||||
* Bugfixes
|
* Bugfixes
|
||||||
|
8
debian/vitastor.Dockerfile
vendored
8
debian/vitastor.Dockerfile
vendored
@@ -33,8 +33,8 @@ RUN set -e -x; \
|
|||||||
mkdir -p /root/packages/vitastor-$REL; \
|
mkdir -p /root/packages/vitastor-$REL; \
|
||||||
rm -rf /root/packages/vitastor-$REL/*; \
|
rm -rf /root/packages/vitastor-$REL/*; \
|
||||||
cd /root/packages/vitastor-$REL; \
|
cd /root/packages/vitastor-$REL; \
|
||||||
cp -r /root/vitastor vitastor-0.6.11; \
|
cp -r /root/vitastor vitastor-0.6.10; \
|
||||||
cd vitastor-0.6.11; \
|
cd vitastor-0.6.10; \
|
||||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||||
@@ -47,8 +47,8 @@ RUN set -e -x; \
|
|||||||
rm -rf a b; \
|
rm -rf a b; \
|
||||||
echo "dep:fio=$FIO" > debian/fio_version; \
|
echo "dep:fio=$FIO" > debian/fio_version; \
|
||||||
cd /root/packages/vitastor-$REL; \
|
cd /root/packages/vitastor-$REL; \
|
||||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.6.11.orig.tar.xz vitastor-0.6.11; \
|
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.6.10.orig.tar.xz vitastor-0.6.10; \
|
||||||
cd vitastor-0.6.11; \
|
cd vitastor-0.6.10; \
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||||
|
@@ -50,7 +50,7 @@ from cinder.volume import configuration
|
|||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume import volume_utils
|
from cinder.volume import volume_utils
|
||||||
|
|
||||||
VERSION = '0.6.11'
|
VERSION = '0.6.10'
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@@ -25,4 +25,4 @@ rm fio
|
|||||||
mv fio-copy fio
|
mv fio-copy fio
|
||||||
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||||
tar --transform 's#^#vitastor-0.6.11/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.6.11$(rpm --eval '%dist').tar.gz *
|
tar --transform 's#^#vitastor-0.6.10/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.6.10$(rpm --eval '%dist').tar.gz *
|
||||||
|
@@ -34,7 +34,7 @@ ADD . /root/vitastor
|
|||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-0.6.11.el7.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-0.6.10.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
@@ -1,11 +1,11 @@
|
|||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 0.6.11
|
Version: 0.6.10
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.1
|
License: Vitastor Network Public License 1.1
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-0.6.11.el7.tar.gz
|
Source0: vitastor-0.6.10.el7.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
|
@@ -33,7 +33,7 @@ ADD . /root/vitastor
|
|||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-0.6.11.el8.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-0.6.10.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
@@ -1,11 +1,11 @@
|
|||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 0.6.11
|
Version: 0.6.10
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.1
|
License: Vitastor Network Public License 1.1
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-0.6.11.el8.tar.gz
|
Source0: vitastor-0.6.10.el8.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
|
@@ -15,7 +15,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
|
|||||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_definitions(-DVERSION="0.6.11")
|
add_definitions(-DVERSION="0.6.10")
|
||||||
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
|
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
|
||||||
if (${WITH_ASAN})
|
if (${WITH_ASAN})
|
||||||
add_definitions(-fsanitize=address -fno-omit-frame-pointer)
|
add_definitions(-fsanitize=address -fno-omit-frame-pointer)
|
||||||
@@ -111,11 +111,12 @@ if (${WITH_FIO})
|
|||||||
# libfio_vitastor_sec.so
|
# libfio_vitastor_sec.so
|
||||||
add_library(fio_vitastor_sec SHARED
|
add_library(fio_vitastor_sec SHARED
|
||||||
fio_sec_osd.cpp
|
fio_sec_osd.cpp
|
||||||
rw_blocking.cpp
|
|
||||||
addr_util.cpp
|
|
||||||
)
|
)
|
||||||
target_link_libraries(fio_vitastor_sec
|
target_link_libraries(fio_vitastor_sec
|
||||||
|
vitastor_common
|
||||||
tcmalloc_minimal
|
tcmalloc_minimal
|
||||||
|
${LIBURING_LIBRARIES}
|
||||||
|
${IBVERBS_LIBRARIES}
|
||||||
)
|
)
|
||||||
endif (${WITH_FIO})
|
endif (${WITH_FIO})
|
||||||
|
|
||||||
|
@@ -29,22 +29,23 @@
|
|||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
#include "addr_util.h"
|
#include "addr_util.h"
|
||||||
#include "rw_blocking.h"
|
#include "epoll_manager.h"
|
||||||
|
#include "ringloop.h"
|
||||||
|
#include "messenger.h"
|
||||||
#include "osd_ops.h"
|
#include "osd_ops.h"
|
||||||
#include "fio_headers.h"
|
#include "fio_headers.h"
|
||||||
|
|
||||||
struct op_buf_t
|
|
||||||
{
|
|
||||||
osd_any_op_t buf;
|
|
||||||
io_u* fio_op;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sec_data
|
struct sec_data
|
||||||
{
|
{
|
||||||
|
epoll_manager_t *epmgr;
|
||||||
|
ring_loop_t *ringloop;
|
||||||
|
osd_messenger_t *msgr;
|
||||||
|
ring_consumer_t looper;
|
||||||
|
void *bitmap_buf;
|
||||||
int connect_fd;
|
int connect_fd;
|
||||||
|
uint64_t op_id;
|
||||||
/* block_size = 1 << block_order (128KB by default) */
|
/* block_size = 1 << block_order (128KB by default) */
|
||||||
uint64_t block_order = 17, block_size = 1 << 17;
|
uint64_t block_order = 17, block_size = 1 << 17;
|
||||||
std::unordered_map<uint64_t, op_buf_t*> queue;
|
|
||||||
bool last_sync = false;
|
bool last_sync = false;
|
||||||
/* The list of completed io_u structs. */
|
/* The list of completed io_u structs. */
|
||||||
std::vector<io_u*> completed;
|
std::vector<io_u*> completed;
|
||||||
@@ -59,7 +60,6 @@ struct sec_options
|
|||||||
int single_primary = 0;
|
int single_primary = 0;
|
||||||
int trace = 0;
|
int trace = 0;
|
||||||
int block_order = 17;
|
int block_order = 17;
|
||||||
int zerocopy_send = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct fio_option options[] = {
|
static struct fio_option options[] = {
|
||||||
@@ -110,16 +110,6 @@ static struct fio_option options[] = {
|
|||||||
.category = FIO_OPT_C_ENGINE,
|
.category = FIO_OPT_C_ENGINE,
|
||||||
.group = FIO_OPT_G_FILENAME,
|
.group = FIO_OPT_G_FILENAME,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.name = "zerocopy_send",
|
|
||||||
.lname = "Use zero-copy send",
|
|
||||||
.type = FIO_OPT_BOOL,
|
|
||||||
.off1 = offsetof(struct sec_options, zerocopy_send),
|
|
||||||
.help = "Use zero-copy send (MSG_ZEROCOPY)",
|
|
||||||
.def = "0",
|
|
||||||
.category = FIO_OPT_C_ENGINE,
|
|
||||||
.group = FIO_OPT_G_FILENAME,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.name = NULL,
|
.name = NULL,
|
||||||
},
|
},
|
||||||
@@ -128,9 +118,6 @@ static struct fio_option options[] = {
|
|||||||
static int sec_setup(struct thread_data *td)
|
static int sec_setup(struct thread_data *td)
|
||||||
{
|
{
|
||||||
sec_data *bsd;
|
sec_data *bsd;
|
||||||
//fio_file *f;
|
|
||||||
//int r;
|
|
||||||
//int64_t size;
|
|
||||||
|
|
||||||
bsd = new sec_data;
|
bsd = new sec_data;
|
||||||
if (!bsd)
|
if (!bsd)
|
||||||
@@ -147,8 +134,6 @@ static int sec_setup(struct thread_data *td)
|
|||||||
td->o.open_files++;
|
td->o.open_files++;
|
||||||
}
|
}
|
||||||
|
|
||||||
//f = td->files[0];
|
|
||||||
//f->real_file_size = size;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,6 +142,10 @@ static void sec_cleanup(struct thread_data *td)
|
|||||||
sec_data *bsd = (sec_data*)td->io_ops_data;
|
sec_data *bsd = (sec_data*)td->io_ops_data;
|
||||||
if (bsd)
|
if (bsd)
|
||||||
{
|
{
|
||||||
|
delete bsd->msgr;
|
||||||
|
delete bsd->epmgr;
|
||||||
|
delete bsd->ringloop;
|
||||||
|
free(bsd->bitmap_buf);
|
||||||
close(bsd->connect_fd);
|
close(bsd->connect_fd);
|
||||||
delete bsd;
|
delete bsd;
|
||||||
}
|
}
|
||||||
@@ -190,14 +179,45 @@ static int sec_init(struct thread_data *td)
|
|||||||
}
|
}
|
||||||
int one = 1;
|
int one = 1;
|
||||||
setsockopt(bsd->connect_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
|
setsockopt(bsd->connect_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
|
||||||
if (o->zerocopy_send)
|
|
||||||
|
fcntl(bsd->connect_fd, F_SETFL, fcntl(bsd->connect_fd, F_GETFL, 0) | O_NONBLOCK);
|
||||||
|
|
||||||
|
json11::Json cfg = json11::Json::object{ { "use_rdma", 0 } };
|
||||||
|
|
||||||
|
bsd->bitmap_buf = malloc(4096);
|
||||||
|
|
||||||
|
bsd->ringloop = new ring_loop_t(512);
|
||||||
|
bsd->epmgr = new epoll_manager_t(bsd->ringloop);
|
||||||
|
bsd->msgr = new osd_messenger_t();
|
||||||
|
bsd->msgr->tfd = bsd->epmgr->tfd;
|
||||||
|
bsd->msgr->ringloop = bsd->ringloop;
|
||||||
|
bsd->msgr->repeer_pgs = [](osd_num_t){};
|
||||||
|
bsd->msgr->parse_config(cfg);
|
||||||
|
bsd->msgr->init();
|
||||||
|
|
||||||
|
bsd->looper.loop = [bsd]()
|
||||||
{
|
{
|
||||||
if (setsockopt(bsd->connect_fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)) < 0)
|
bsd->msgr->read_requests();
|
||||||
{
|
bsd->msgr->send_replies();
|
||||||
perror("setsockopt zerocopy");
|
bsd->ringloop->submit();
|
||||||
return 1;
|
};
|
||||||
}
|
bsd->ringloop->register_consumer(&bsd->looper);
|
||||||
}
|
|
||||||
|
int peer_fd = bsd->connect_fd;
|
||||||
|
bsd->msgr->clients[peer_fd] = new osd_client_t();
|
||||||
|
bsd->msgr->clients[peer_fd]->peer_addr = addr;
|
||||||
|
bsd->msgr->clients[peer_fd]->peer_port = ntohs(((sockaddr_in*)&addr)->sin_port);
|
||||||
|
bsd->msgr->clients[peer_fd]->peer_fd = peer_fd;
|
||||||
|
bsd->msgr->clients[peer_fd]->peer_state = PEER_CONNECTED;
|
||||||
|
bsd->msgr->clients[peer_fd]->connect_timeout_id = -1;
|
||||||
|
bsd->msgr->clients[peer_fd]->osd_num = 1;
|
||||||
|
bsd->msgr->clients[peer_fd]->in_buf = malloc_or_die(bsd->msgr->receive_buffer_size);
|
||||||
|
bsd->epmgr->tfd->set_fd_handler(peer_fd, true, [msgr = bsd->msgr](int peer_fd, int epoll_events)
|
||||||
|
{
|
||||||
|
// Either OUT (connected) or HUP
|
||||||
|
msgr->handle_peer_epoll(peer_fd, epoll_events);
|
||||||
|
});
|
||||||
|
bsd->msgr->osd_peer_fds[1] = peer_fd;
|
||||||
|
|
||||||
// FIXME: read config (block size) from OSD
|
// FIXME: read config (block size) from OSD
|
||||||
|
|
||||||
@@ -218,9 +238,12 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
}
|
}
|
||||||
|
|
||||||
io->engine_data = bsd;
|
io->engine_data = bsd;
|
||||||
op_buf_t *op_buf = new op_buf_t;
|
|
||||||
op_buf->fio_op = io;
|
osd_op_t *oo = new osd_op_t();
|
||||||
osd_any_op_t &op = op_buf->buf;
|
oo->op_type = OSD_OP_OUT;
|
||||||
|
oo->peer_fd = bsd->connect_fd;
|
||||||
|
|
||||||
|
osd_any_op_t & op = oo->req;
|
||||||
|
|
||||||
op.hdr.magic = SECONDARY_OSD_OP_MAGIC;
|
op.hdr.magic = SECONDARY_OSD_OP_MAGIC;
|
||||||
op.hdr.id = n;
|
op.hdr.id = n;
|
||||||
@@ -237,6 +260,9 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
op.sec_rw.version = UINT64_MAX; // last unstable
|
op.sec_rw.version = UINT64_MAX; // last unstable
|
||||||
op.sec_rw.offset = io->offset % bsd->block_size;
|
op.sec_rw.offset = io->offset % bsd->block_size;
|
||||||
op.sec_rw.len = io->xfer_buflen;
|
op.sec_rw.len = io->xfer_buflen;
|
||||||
|
op.sec_rw.attr_len = 4;
|
||||||
|
oo->bitmap = bsd->bitmap_buf;
|
||||||
|
oo->bitmap_len = 4;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -245,6 +271,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
op.rw.offset = io->offset;
|
op.rw.offset = io->offset;
|
||||||
op.rw.len = io->xfer_buflen;
|
op.rw.len = io->xfer_buflen;
|
||||||
}
|
}
|
||||||
|
oo->iov.push_back(io->xfer_buf, io->xfer_buflen);
|
||||||
bsd->last_sync = false;
|
bsd->last_sync = false;
|
||||||
break;
|
break;
|
||||||
case DDIR_WRITE:
|
case DDIR_WRITE:
|
||||||
@@ -266,6 +293,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
op.rw.offset = io->offset;
|
op.rw.offset = io->offset;
|
||||||
op.rw.len = io->xfer_buflen;
|
op.rw.len = io->xfer_buflen;
|
||||||
}
|
}
|
||||||
|
oo->iov.push_back(io->xfer_buf, io->xfer_buflen);
|
||||||
bsd->last_sync = false;
|
bsd->last_sync = false;
|
||||||
break;
|
break;
|
||||||
case DDIR_SYNC:
|
case DDIR_SYNC:
|
||||||
@@ -287,6 +315,21 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
return FIO_Q_COMPLETED;
|
return FIO_Q_COMPLETED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
oo->callback = [td, io](osd_op_t *oo)
|
||||||
|
{
|
||||||
|
sec_options *opt = (sec_options*)td->eo;
|
||||||
|
sec_data *bsd = (sec_data*)td->io_ops_data;
|
||||||
|
if (opt->trace)
|
||||||
|
{
|
||||||
|
printf("--- %s # %ld %ld\n", io->ddir == DDIR_READ ? "READ" :
|
||||||
|
(io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), oo->reply.hdr.id, oo->reply.hdr.retval);
|
||||||
|
}
|
||||||
|
io->error = oo->reply.hdr.retval < 0 ? -oo->reply.hdr.retval : 0;
|
||||||
|
bsd->completed.push_back(io);
|
||||||
|
delete oo;
|
||||||
|
};
|
||||||
|
bsd->msgr->outbox_push(oo);
|
||||||
|
|
||||||
if (opt->trace)
|
if (opt->trace)
|
||||||
{
|
{
|
||||||
printf("+++ %s # %d\n", io->ddir == DDIR_READ ? "READ" :
|
printf("+++ %s # %d\n", io->ddir == DDIR_READ ? "READ" :
|
||||||
@@ -296,20 +339,6 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
io->error = 0;
|
io->error = 0;
|
||||||
bsd->inflight++;
|
bsd->inflight++;
|
||||||
bsd->op_n++;
|
bsd->op_n++;
|
||||||
bsd->queue[n] = op_buf;
|
|
||||||
|
|
||||||
iovec iov[2] = { { .iov_base = op.buf, .iov_len = OSD_PACKET_SIZE } };
|
|
||||||
int iovcnt = 1, wtotal = OSD_PACKET_SIZE;
|
|
||||||
if (io->ddir == DDIR_WRITE)
|
|
||||||
{
|
|
||||||
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
|
|
||||||
wtotal += io->xfer_buflen;
|
|
||||||
}
|
|
||||||
if (sendv_blocking(bsd->connect_fd, iov, iovcnt, opt->zerocopy_send ? MSG_ZEROCOPY : 0) != wtotal)
|
|
||||||
{
|
|
||||||
perror("sendmsg");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (io->error != 0)
|
if (io->error != 0)
|
||||||
return FIO_Q_COMPLETED;
|
return FIO_Q_COMPLETED;
|
||||||
@@ -318,74 +347,13 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
|
|||||||
|
|
||||||
static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int max, const struct timespec *t)
|
static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int max, const struct timespec *t)
|
||||||
{
|
{
|
||||||
sec_options *opt = (sec_options*)td->eo;
|
|
||||||
sec_data *bsd = (sec_data*)td->io_ops_data;
|
sec_data *bsd = (sec_data*)td->io_ops_data;
|
||||||
// FIXME timeout, at least poll. Now it's the stupidest implementation possible
|
|
||||||
osd_any_reply_t reply;
|
|
||||||
while (bsd->completed.size() < min)
|
while (bsd->completed.size() < min)
|
||||||
{
|
{
|
||||||
read_blocking(bsd->connect_fd, reply.buf, OSD_PACKET_SIZE);
|
bsd->ringloop->loop();
|
||||||
if (reply.hdr.magic != SECONDARY_OSD_REPLY_MAGIC)
|
if (bsd->completed.size() >= min)
|
||||||
{
|
break;
|
||||||
fprintf(stderr, "bad reply: magic = %lx instead of %lx\n", reply.hdr.magic, SECONDARY_OSD_REPLY_MAGIC);
|
bsd->ringloop->wait();
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
auto it = bsd->queue.find(reply.hdr.id);
|
|
||||||
if (it == bsd->queue.end())
|
|
||||||
{
|
|
||||||
fprintf(stderr, "bad reply: op id %lx missing in local queue\n", reply.hdr.id);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
io_u* io = it->second->fio_op;
|
|
||||||
delete it->second;
|
|
||||||
bsd->queue.erase(it);
|
|
||||||
if (io->ddir == DDIR_READ)
|
|
||||||
{
|
|
||||||
if (reply.hdr.retval != io->xfer_buflen)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Short read: retval = %ld instead of %llu\n", reply.hdr.retval, io->xfer_buflen);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
// Support bitmap
|
|
||||||
uint64_t bitmap = 0;
|
|
||||||
int iovcnt = 0;
|
|
||||||
iovec iov[2];
|
|
||||||
if (reply.sec_rw.attr_len > 0)
|
|
||||||
{
|
|
||||||
if (reply.sec_rw.attr_len <= 8)
|
|
||||||
iov[iovcnt++] = { .iov_base = &bitmap, .iov_len = reply.sec_rw.attr_len };
|
|
||||||
else
|
|
||||||
iov[iovcnt++] = { .iov_base = (void*)(bitmap = (uint64_t)malloc(reply.sec_rw.attr_len)), .iov_len = reply.sec_rw.attr_len };
|
|
||||||
}
|
|
||||||
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
|
|
||||||
readv_blocking(bsd->connect_fd, iov, iovcnt);
|
|
||||||
if (reply.sec_rw.attr_len > 8)
|
|
||||||
{
|
|
||||||
free((void*)bitmap);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (io->ddir == DDIR_WRITE)
|
|
||||||
{
|
|
||||||
if (reply.hdr.retval != io->xfer_buflen)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Short write: retval = %ld instead of %llu\n", reply.hdr.retval, io->xfer_buflen);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (io->ddir == DDIR_SYNC)
|
|
||||||
{
|
|
||||||
if (reply.hdr.retval != 0)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Sync failed: retval = %ld\n", reply.hdr.retval);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (opt->trace)
|
|
||||||
{
|
|
||||||
printf("--- %s # %ld\n", io->ddir == DDIR_READ ? "READ" :
|
|
||||||
(io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), reply.hdr.id);
|
|
||||||
}
|
|
||||||
bsd->completed.push_back(io);
|
|
||||||
}
|
}
|
||||||
return bsd->completed.size();
|
return bsd->completed.size();
|
||||||
}
|
}
|
||||||
|
@@ -4,12 +4,10 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
#include <sys/random.h>
|
|
||||||
#include <sys/epoll.h>
|
#include <sys/epoll.h>
|
||||||
#include <netinet/tcp.h>
|
#include <netinet/tcp.h>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
#include "base64.h"
|
|
||||||
#include "addr_util.h"
|
#include "addr_util.h"
|
||||||
#include "messenger.h"
|
#include "messenger.h"
|
||||||
|
|
||||||
@@ -196,7 +194,7 @@ void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state)
|
|||||||
try_connect_peer(peer_osd);
|
try_connect_peer(peer_osd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void osd_messenger_t::try_connect_peer(osd_num_t peer_osd)
|
void osd_messenger_t::try_connect_peer(uint64_t peer_osd)
|
||||||
{
|
{
|
||||||
auto wp_it = wanted_peers.find(peer_osd);
|
auto wp_it = wanted_peers.find(peer_osd);
|
||||||
if (wp_it == wanted_peers.end() || wp_it->second.connecting ||
|
if (wp_it == wanted_peers.end() || wp_it->second.connecting ||
|
||||||
@@ -217,75 +215,40 @@ void osd_messenger_t::try_connect_peer(osd_num_t peer_osd)
|
|||||||
wp.cur_addr = wp.address_list[wp.address_index].string_value();
|
wp.cur_addr = wp.address_list[wp.address_index].string_value();
|
||||||
wp.cur_port = wp.port;
|
wp.cur_port = wp.port;
|
||||||
wp.connecting = true;
|
wp.connecting = true;
|
||||||
try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port, NULL, [this](osd_num_t peer_osd, int peer_fd)
|
try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port);
|
||||||
{
|
|
||||||
if (peer_fd >= 0)
|
|
||||||
osd_peer_fds[peer_osd] = peer_fd;
|
|
||||||
on_connect_peer(peer_osd, peer_fd);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string urandom_str(int bytes)
|
void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port)
|
||||||
{
|
|
||||||
std::string str;
|
|
||||||
str.resize(bytes);
|
|
||||||
char *buf = (char*)str.data();
|
|
||||||
while (bytes > 0)
|
|
||||||
{
|
|
||||||
int r = getrandom(buf, bytes, 0);
|
|
||||||
if (r < 0)
|
|
||||||
throw std::runtime_error(std::string("getrandom: ") + strerror(errno));
|
|
||||||
buf += r;
|
|
||||||
bytes -= r;
|
|
||||||
}
|
|
||||||
return str;
|
|
||||||
}
|
|
||||||
|
|
||||||
void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port,
|
|
||||||
osd_client_t *meta_cl, std::function<void(osd_num_t, int)> connect_callback)
|
|
||||||
{
|
{
|
||||||
assert(peer_osd != this->osd_num);
|
assert(peer_osd != this->osd_num);
|
||||||
struct sockaddr addr;
|
struct sockaddr addr;
|
||||||
if (!meta_cl)
|
if (!string_to_addr(peer_host, 0, peer_port, &addr))
|
||||||
{
|
{
|
||||||
if (!string_to_addr(peer_host, 0, peer_port, &addr))
|
on_connect_peer(peer_osd, -EINVAL);
|
||||||
{
|
return;
|
||||||
connect_callback(peer_osd, -EINVAL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
addr = meta_cl->peer_addr;
|
|
||||||
}
|
}
|
||||||
int peer_fd = socket(addr.sa_family, SOCK_STREAM, 0);
|
int peer_fd = socket(addr.sa_family, SOCK_STREAM, 0);
|
||||||
if (peer_fd >= 0)
|
|
||||||
{
|
|
||||||
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
|
|
||||||
int r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr));
|
|
||||||
if (r < 0 && errno != EINPROGRESS)
|
|
||||||
{
|
|
||||||
close(peer_fd);
|
|
||||||
peer_fd = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (peer_fd < 0)
|
if (peer_fd < 0)
|
||||||
{
|
{
|
||||||
connect_callback(peer_osd, -errno);
|
on_connect_peer(peer_osd, -errno);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
|
||||||
|
int r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr));
|
||||||
|
if (r < 0 && errno != EINPROGRESS)
|
||||||
|
{
|
||||||
|
close(peer_fd);
|
||||||
|
on_connect_peer(peer_osd, -errno);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
clients[peer_fd] = new osd_client_t();
|
clients[peer_fd] = new osd_client_t();
|
||||||
clients[peer_fd]->peer_addr = addr;
|
clients[peer_fd]->peer_addr = addr;
|
||||||
clients[peer_fd]->peer_port = ((struct sockaddr_in*)&addr)->sin_port;
|
clients[peer_fd]->peer_port = peer_port;
|
||||||
clients[peer_fd]->peer_fd = peer_fd;
|
clients[peer_fd]->peer_fd = peer_fd;
|
||||||
clients[peer_fd]->peer_state = PEER_CONNECTING;
|
clients[peer_fd]->peer_state = PEER_CONNECTING;
|
||||||
clients[peer_fd]->connect_timeout_id = -1;
|
clients[peer_fd]->connect_timeout_id = -1;
|
||||||
clients[peer_fd]->connect_callback = connect_callback;
|
|
||||||
clients[peer_fd]->osd_num = peer_osd;
|
clients[peer_fd]->osd_num = peer_osd;
|
||||||
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
|
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
|
||||||
clients[peer_fd]->data_for = meta_cl ? addr_to_string(meta_cl->peer_addr) : "";
|
|
||||||
clients[peer_fd]->data_connection_cookie = meta_cl
|
|
||||||
? meta_cl->data_connection_cookie : base64_encode(urandom_str(12));
|
|
||||||
tfd->set_fd_handler(peer_fd, true, [this](int peer_fd, int epoll_events)
|
tfd->set_fd_handler(peer_fd, true, [this](int peer_fd, int epoll_events)
|
||||||
{
|
{
|
||||||
// Either OUT (connected) or HUP
|
// Either OUT (connected) or HUP
|
||||||
@@ -295,12 +258,10 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
|
|||||||
{
|
{
|
||||||
clients[peer_fd]->connect_timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
|
clients[peer_fd]->connect_timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
|
||||||
{
|
{
|
||||||
auto cl = clients.at(peer_fd);
|
osd_num_t peer_osd = clients.at(peer_fd)->osd_num;
|
||||||
auto connect_callback = cl->connect_callback;
|
|
||||||
cl->connect_callback = NULL;
|
|
||||||
osd_num_t peer_osd = cl->osd_num;
|
|
||||||
stop_client(peer_fd, true);
|
stop_client(peer_fd, true);
|
||||||
connect_callback(peer_osd, -EPIPE);
|
on_connect_peer(peer_osd, -EPIPE);
|
||||||
|
return;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -322,10 +283,8 @@ void osd_messenger_t::handle_connect_epoll(int peer_fd)
|
|||||||
}
|
}
|
||||||
if (result != 0)
|
if (result != 0)
|
||||||
{
|
{
|
||||||
auto connect_callback = cl->connect_callback;
|
|
||||||
cl->connect_callback = NULL;
|
|
||||||
stop_client(peer_fd, true);
|
stop_client(peer_fd, true);
|
||||||
connect_callback(peer_osd, -result);
|
on_connect_peer(peer_osd, -result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int one = 1;
|
int one = 1;
|
||||||
@@ -405,11 +364,6 @@ void osd_messenger_t::on_connect_peer(osd_num_t peer_osd, int peer_fd)
|
|||||||
|
|
||||||
void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
||||||
{
|
{
|
||||||
json11::Json::object payload;
|
|
||||||
if (cl->data_connection_cookie != "")
|
|
||||||
{
|
|
||||||
payload["data_cookie"] = cl->data_connection_cookie;
|
|
||||||
}
|
|
||||||
osd_op_t *op = new osd_op_t();
|
osd_op_t *op = new osd_op_t();
|
||||||
op->op_type = OSD_OP_OUT;
|
op->op_type = OSD_OP_OUT;
|
||||||
op->peer_fd = cl->peer_fd;
|
op->peer_fd = cl->peer_fd;
|
||||||
@@ -422,33 +376,24 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
if (cl->data_for == "")
|
|
||||||
{
|
|
||||||
#ifdef WITH_RDMA
|
#ifdef WITH_RDMA
|
||||||
if (rdma_context)
|
if (rdma_context)
|
||||||
|
{
|
||||||
|
cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
|
||||||
|
if (cl->rdma_conn)
|
||||||
{
|
{
|
||||||
cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
|
json11::Json payload = json11::Json::object {
|
||||||
if (cl->rdma_conn)
|
{ "connect_rdma", cl->rdma_conn->addr.to_string() },
|
||||||
{
|
{ "rdma_max_msg", cl->rdma_conn->max_msg },
|
||||||
payload["connect_rdma"] = cl->rdma_conn->addr.to_string();
|
};
|
||||||
payload["rdma_max_msg"] = cl->rdma_conn->max_msg;
|
std::string payload_str = payload.dump();
|
||||||
}
|
op->req.show_conf.json_len = payload_str.size();
|
||||||
|
op->buf = malloc_or_die(payload_str.size());
|
||||||
|
op->iov.push_back(op->buf, payload_str.size());
|
||||||
|
memcpy(op->buf, payload_str.c_str(), payload_str.size());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Mark it as a data connection
|
|
||||||
payload["data_for"] = cl->data_for;
|
|
||||||
}
|
|
||||||
if (payload.size())
|
|
||||||
{
|
|
||||||
std::string payload_str = json11::Json(payload).dump();
|
|
||||||
op->req.show_conf.json_len = payload_str.size();
|
|
||||||
op->buf = malloc_or_die(payload_str.size());
|
|
||||||
op->iov.push_back(op->buf, payload_str.size());
|
|
||||||
memcpy(op->buf, payload_str.c_str(), payload_str.size());
|
|
||||||
}
|
|
||||||
op->callback = [this, cl](osd_op_t *op)
|
op->callback = [this, cl](osd_op_t *op)
|
||||||
{
|
{
|
||||||
std::string json_err;
|
std::string json_err;
|
||||||
@@ -481,30 +426,18 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
|||||||
cl->osd_num, config["protocol_version"].uint64_value(), OSD_PROTOCOL_VERSION
|
cl->osd_num, config["protocol_version"].uint64_value(), OSD_PROTOCOL_VERSION
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
else if (cl->data_for != "" && config["data_for"] != cl->data_for)
|
|
||||||
{
|
|
||||||
err = true;
|
|
||||||
fprintf(
|
|
||||||
stderr, "OSD %lu does not support separate data connections."
|
|
||||||
" Proceeding with a single connection\n", cl->osd_num
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (err)
|
if (err)
|
||||||
{
|
{
|
||||||
osd_num_t peer_osd = cl->osd_num;
|
osd_num_t peer_osd = cl->osd_num;
|
||||||
auto connect_callback = cl->connect_callback;
|
|
||||||
cl->connect_callback = NULL;
|
|
||||||
stop_client(op->peer_fd);
|
stop_client(op->peer_fd);
|
||||||
connect_callback(peer_osd, -EINVAL);
|
on_connect_peer(peer_osd, -1);
|
||||||
delete op;
|
delete op;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#ifdef WITH_RDMA
|
#ifdef WITH_RDMA
|
||||||
if (rdma_context && cl->rdma_conn && config["rdma_address"].is_string())
|
if (config["rdma_address"].is_string())
|
||||||
{
|
{
|
||||||
// Prevent creating data connection - we are trying RDMA
|
|
||||||
cl->data_connection_cookie = "";
|
|
||||||
msgr_rdma_address_t addr;
|
msgr_rdma_address_t addr;
|
||||||
if (!msgr_rdma_address_t::from_string(config["rdma_address"].string_value().c_str(), &addr) ||
|
if (!msgr_rdma_address_t::from_string(config["rdma_address"].string_value().c_str(), &addr) ||
|
||||||
cl->rdma_conn->connect(&addr) != 0)
|
cl->rdma_conn->connect(&addr) != 0)
|
||||||
@@ -517,10 +450,8 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
|||||||
cl->rdma_conn = NULL;
|
cl->rdma_conn = NULL;
|
||||||
// FIXME: Keep TCP connection in this case
|
// FIXME: Keep TCP connection in this case
|
||||||
osd_num_t peer_osd = cl->osd_num;
|
osd_num_t peer_osd = cl->osd_num;
|
||||||
auto connect_callback = cl->connect_callback;
|
|
||||||
cl->connect_callback = NULL;
|
|
||||||
stop_client(cl->peer_fd);
|
stop_client(cl->peer_fd);
|
||||||
connect_callback(peer_osd, -EPIPE);
|
on_connect_peer(peer_osd, -1);
|
||||||
delete op;
|
delete op;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -542,37 +473,8 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (cl->data_connection_cookie != "")
|
osd_peer_fds[cl->osd_num] = cl->peer_fd;
|
||||||
{
|
on_connect_peer(cl->osd_num, cl->peer_fd);
|
||||||
// Try to open second connection to the same address
|
|
||||||
try_connect_peer_addr(cl->osd_num, NULL, 0, cl, [this, peer_fd = cl->peer_fd](osd_num_t data_peer, int data_peer_fd)
|
|
||||||
{
|
|
||||||
auto cl_it = clients.find(peer_fd);
|
|
||||||
if (cl_it != clients.end())
|
|
||||||
{
|
|
||||||
// Proceed with or without the data connection
|
|
||||||
auto cl = cl_it->second;
|
|
||||||
if (data_peer_fd >= 0)
|
|
||||||
{
|
|
||||||
cl->data_connection_fd = data_peer_fd;
|
|
||||||
auto data_cl = clients.at(data_peer_fd);
|
|
||||||
data_cl->meta_connection_fd = cl->peer_fd;
|
|
||||||
}
|
|
||||||
osd_peer_fds[cl->osd_num] = cl->peer_fd;
|
|
||||||
on_connect_peer(cl->osd_num, cl->peer_fd);
|
|
||||||
}
|
|
||||||
else if (data_peer_fd >= 0)
|
|
||||||
{
|
|
||||||
stop_client(data_peer_fd);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto connect_callback = cl->connect_callback;
|
|
||||||
cl->connect_callback = NULL;
|
|
||||||
connect_callback(cl->osd_num, cl->peer_fd);
|
|
||||||
}
|
|
||||||
delete op;
|
delete op;
|
||||||
};
|
};
|
||||||
outbox_push(op);
|
outbox_push(op);
|
||||||
@@ -598,7 +500,6 @@ void osd_messenger_t::accept_connections(int listen_fd)
|
|||||||
clients[peer_fd]->peer_fd = peer_fd;
|
clients[peer_fd]->peer_fd = peer_fd;
|
||||||
clients[peer_fd]->peer_state = PEER_CONNECTED;
|
clients[peer_fd]->peer_state = PEER_CONNECTED;
|
||||||
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
|
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
|
||||||
clients_by_addr[addr_to_string(addr)] = peer_fd;
|
|
||||||
// Add FD to epoll
|
// Add FD to epoll
|
||||||
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events)
|
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events)
|
||||||
{
|
{
|
||||||
|
@@ -57,10 +57,6 @@ struct osd_client_t
|
|||||||
int ping_time_remaining = 0;
|
int ping_time_remaining = 0;
|
||||||
int idle_time_remaining = 0;
|
int idle_time_remaining = 0;
|
||||||
osd_num_t osd_num = 0;
|
osd_num_t osd_num = 0;
|
||||||
std::function<void(osd_num_t, int)> connect_callback;
|
|
||||||
|
|
||||||
int data_connection_fd = -1, meta_connection_fd = -1;
|
|
||||||
std::string data_connection_cookie, data_for;
|
|
||||||
|
|
||||||
void *in_buf = NULL;
|
void *in_buf = NULL;
|
||||||
|
|
||||||
@@ -124,7 +120,6 @@ struct osd_messenger_t
|
|||||||
protected:
|
protected:
|
||||||
int keepalive_timer_id = -1;
|
int keepalive_timer_id = -1;
|
||||||
|
|
||||||
uint32_t receive_buffer_size = 0;
|
|
||||||
int peer_connect_interval = 0;
|
int peer_connect_interval = 0;
|
||||||
int peer_connect_timeout = 0;
|
int peer_connect_timeout = 0;
|
||||||
int osd_idle_timeout = 0;
|
int osd_idle_timeout = 0;
|
||||||
@@ -146,13 +141,14 @@ protected:
|
|||||||
std::vector<std::function<void()>> set_immediate;
|
std::vector<std::function<void()>> set_immediate;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
uint32_t receive_buffer_size = 0;
|
||||||
|
|
||||||
timerfd_manager_t *tfd;
|
timerfd_manager_t *tfd;
|
||||||
ring_loop_t *ringloop;
|
ring_loop_t *ringloop;
|
||||||
// osd_num_t is only for logging and asserts
|
// osd_num_t is only for logging and asserts
|
||||||
osd_num_t osd_num;
|
osd_num_t osd_num;
|
||||||
uint64_t next_subop_id = 1;
|
uint64_t next_subop_id = 1;
|
||||||
std::map<int, osd_client_t*> clients;
|
std::map<int, osd_client_t*> clients;
|
||||||
std::map<std::string, int> clients_by_addr;
|
|
||||||
std::map<osd_num_t, osd_wanted_peer_t> wanted_peers;
|
std::map<osd_num_t, osd_wanted_peer_t> wanted_peers;
|
||||||
std::map<uint64_t, int> osd_peer_fds;
|
std::map<uint64_t, int> osd_peer_fds;
|
||||||
// op statistics
|
// op statistics
|
||||||
@@ -162,7 +158,6 @@ public:
|
|||||||
void parse_config(const json11::Json & config);
|
void parse_config(const json11::Json & config);
|
||||||
void connect_peer(uint64_t osd_num, json11::Json peer_state);
|
void connect_peer(uint64_t osd_num, json11::Json peer_state);
|
||||||
void stop_client(int peer_fd, bool force = false, bool force_delete = false);
|
void stop_client(int peer_fd, bool force = false, bool force_delete = false);
|
||||||
void break_data_client_pair(osd_client_t *cl);
|
|
||||||
void outbox_push(osd_op_t *cur_op);
|
void outbox_push(osd_op_t *cur_op);
|
||||||
std::function<void(osd_op_t*)> exec_op;
|
std::function<void(osd_op_t*)> exec_op;
|
||||||
std::function<void(osd_num_t)> repeer_pgs;
|
std::function<void(osd_num_t)> repeer_pgs;
|
||||||
@@ -178,11 +173,11 @@ public:
|
|||||||
bool connect_rdma(int peer_fd, std::string rdma_address, uint64_t client_max_msg);
|
bool connect_rdma(int peer_fd, std::string rdma_address, uint64_t client_max_msg);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void handle_peer_epoll(int peer_fd, int epoll_events);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void try_connect_peer(uint64_t osd_num);
|
void try_connect_peer(uint64_t osd_num);
|
||||||
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port,
|
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port);
|
||||||
osd_client_t *meta_cl, std::function<void(osd_num_t, int)> connect_callback);
|
|
||||||
void handle_peer_epoll(int peer_fd, int epoll_events);
|
|
||||||
void handle_connect_epoll(int peer_fd);
|
void handle_connect_epoll(int peer_fd);
|
||||||
void on_connect_peer(osd_num_t peer_osd, int peer_fd);
|
void on_connect_peer(osd_num_t peer_osd, int peer_fd);
|
||||||
void check_peer_config(osd_client_t *cl);
|
void check_peer_config(osd_client_t *cl);
|
||||||
|
@@ -4,7 +4,6 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
#include "addr_util.h"
|
|
||||||
#include "messenger.h"
|
#include "messenger.h"
|
||||||
|
|
||||||
void osd_messenger_t::cancel_osd_ops(osd_client_t *cl)
|
void osd_messenger_t::cancel_osd_ops(osd_client_t *cl)
|
||||||
@@ -59,8 +58,7 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
|
|||||||
{
|
{
|
||||||
if (cl->osd_num)
|
if (cl->osd_num)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "[OSD %lu] Stopping client %d (OSD %speer %lu)\n",
|
fprintf(stderr, "[OSD %lu] Stopping client %d (OSD peer %lu)\n", osd_num, peer_fd, cl->osd_num);
|
||||||
osd_num, peer_fd, cl->meta_connection_fd >= 0 ? " data" : "", cl->osd_num);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -70,7 +68,7 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
|
|||||||
// First set state to STOPPED so another stop_client() call doesn't try to free it again
|
// First set state to STOPPED so another stop_client() call doesn't try to free it again
|
||||||
cl->refs++;
|
cl->refs++;
|
||||||
cl->peer_state = PEER_STOPPED;
|
cl->peer_state = PEER_STOPPED;
|
||||||
if (cl->osd_num && cl->meta_connection_fd < 0)
|
if (cl->osd_num)
|
||||||
{
|
{
|
||||||
// ...and forget OSD peer
|
// ...and forget OSD peer
|
||||||
osd_peer_fds.erase(cl->osd_num);
|
osd_peer_fds.erase(cl->osd_num);
|
||||||
@@ -102,17 +100,9 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
|
|||||||
#endif
|
#endif
|
||||||
if (cl->osd_num)
|
if (cl->osd_num)
|
||||||
{
|
{
|
||||||
if (cl->meta_connection_fd < 0)
|
// Then repeer PGs because cancel_op() callbacks can try to perform
|
||||||
{
|
// some actions and we need correct PG states to not do something silly
|
||||||
// Then repeer PGs because cancel_op() callbacks can try to perform
|
repeer_pgs(cl->osd_num);
|
||||||
// some actions and we need correct PG states to not do something silly
|
|
||||||
repeer_pgs(cl->osd_num);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// FIXME Try to re-establish data connection
|
|
||||||
// Only when the connection is outbound, but here it's always outbound
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Then cancel all operations
|
// Then cancel all operations
|
||||||
if (cl->read_op)
|
if (cl->read_op)
|
||||||
@@ -138,7 +128,6 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
|
|||||||
delete cl->rdma_conn;
|
delete cl->rdma_conn;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
clients_by_addr.erase(addr_to_string(cl->peer_addr));
|
|
||||||
#endif
|
#endif
|
||||||
// Find the item again because it can be invalidated at this point
|
// Find the item again because it can be invalidated at this point
|
||||||
it = clients.find(peer_fd);
|
it = clients.find(peer_fd);
|
||||||
@@ -146,40 +135,9 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
|
|||||||
{
|
{
|
||||||
clients.erase(it);
|
clients.erase(it);
|
||||||
}
|
}
|
||||||
// Break metadata/data connection pair
|
|
||||||
if (cl->data_connection_fd >= 0)
|
|
||||||
{
|
|
||||||
// No sense to keep data connection when metadata connection is stopped
|
|
||||||
auto dc_it = clients.find(cl->data_connection_fd);
|
|
||||||
cl->data_connection_fd = -1;
|
|
||||||
if (dc_it != clients.end() && dc_it->second->meta_connection_fd == cl->peer_fd)
|
|
||||||
{
|
|
||||||
stop_client(dc_it->second->peer_fd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break_data_client_pair(cl);
|
|
||||||
// Refcount and delete
|
|
||||||
cl->refs--;
|
cl->refs--;
|
||||||
if (cl->refs <= 0 || force_delete)
|
if (cl->refs <= 0 || force_delete)
|
||||||
{
|
{
|
||||||
delete cl;
|
delete cl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void osd_messenger_t::break_data_client_pair(osd_client_t *cl)
|
|
||||||
{
|
|
||||||
if (cl->meta_connection_fd >= 0)
|
|
||||||
{
|
|
||||||
auto dc_it = clients.find(cl->meta_connection_fd);
|
|
||||||
if (dc_it != clients.end() && dc_it->second->data_connection_fd == cl->peer_fd)
|
|
||||||
dc_it->second->data_connection_fd = -1;
|
|
||||||
cl->meta_connection_fd = -1;
|
|
||||||
}
|
|
||||||
if (cl->data_connection_fd >= 0)
|
|
||||||
{
|
|
||||||
auto dc_it = clients.find(cl->data_connection_fd);
|
|
||||||
if (dc_it != clients.end() && dc_it->second->meta_connection_fd == cl->peer_fd)
|
|
||||||
dc_it->second->meta_connection_fd = -1;
|
|
||||||
cl->data_connection_fd = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@@ -102,7 +102,7 @@ class osd_t
|
|||||||
bool no_rebalance = false;
|
bool no_rebalance = false;
|
||||||
bool no_recovery = false;
|
bool no_recovery = false;
|
||||||
std::string bind_address;
|
std::string bind_address;
|
||||||
int bind_port, listen_backlog = 128;
|
int bind_port, listen_backlog;
|
||||||
// FIXME: Implement client queue depth limit
|
// FIXME: Implement client queue depth limit
|
||||||
int client_queue_depth = 128;
|
int client_queue_depth = 128;
|
||||||
bool allow_test_ops = false;
|
bool allow_test_ops = false;
|
||||||
|
@@ -178,37 +178,6 @@ void osd_t::exec_show_config(osd_op_t *cur_op)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
#endif
|
|
||||||
if (req_json["data_for"].is_string())
|
|
||||||
{
|
|
||||||
auto cli = msgr.clients.at(cur_op->peer_fd);
|
|
||||||
auto md_it = msgr.clients_by_addr.find(req_json["data_for"].string_value());
|
|
||||||
if (md_it != msgr.clients_by_addr.end())
|
|
||||||
{
|
|
||||||
int md_peer_fd = md_it->second;
|
|
||||||
auto md_it = msgr.clients.find(md_peer_fd);
|
|
||||||
if (md_it != msgr.clients.end() && md_it->second->data_connection_cookie != "" &&
|
|
||||||
req_json["data_cookie"].string_value() == md_it->second->data_connection_cookie)
|
|
||||||
{
|
|
||||||
// Break previous metadata/data connections for both FDs, if present
|
|
||||||
msgr.break_data_client_pair(cli);
|
|
||||||
msgr.break_data_client_pair(md_it->second);
|
|
||||||
// And setup the new pair
|
|
||||||
cli->meta_connection_fd = md_it->second->peer_fd;
|
|
||||||
md_it->second->data_connection_fd = cli->peer_fd;
|
|
||||||
wire_config["data_for"] = req_json["data_for"];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (req_json["data_cookie"].is_string())
|
|
||||||
{
|
|
||||||
auto cli = msgr.clients.at(cur_op->peer_fd);
|
|
||||||
cli->data_connection_cookie = req_json["data_cookie"].string_value();
|
|
||||||
}
|
|
||||||
#ifdef WITH_RDMA
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
if (cur_op->buf)
|
if (cur_op->buf)
|
||||||
free(cur_op->buf);
|
free(cur_op->buf);
|
||||||
|
@@ -4,8 +4,6 @@
|
|||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
|
|
||||||
#include "rw_blocking.h"
|
#include "rw_blocking.h"
|
||||||
|
|
||||||
@@ -125,41 +123,3 @@ int writev_blocking(int fd, iovec *iov, int iovcnt)
|
|||||||
}
|
}
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
int sendv_blocking(int fd, iovec *iov, int iovcnt, int flags)
|
|
||||||
{
|
|
||||||
struct msghdr msg = { 0 };
|
|
||||||
int v = 0;
|
|
||||||
int done = 0;
|
|
||||||
while (v < iovcnt)
|
|
||||||
{
|
|
||||||
msg.msg_iov = iov+v;
|
|
||||||
msg.msg_iovlen = iovcnt-v;
|
|
||||||
ssize_t r = sendmsg(fd, &msg, flags);
|
|
||||||
if (r < 0)
|
|
||||||
{
|
|
||||||
if (errno != EAGAIN && errno != EPIPE)
|
|
||||||
{
|
|
||||||
perror("sendmsg");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done += r;
|
|
||||||
while (v < iovcnt)
|
|
||||||
{
|
|
||||||
if (iov[v].iov_len > r)
|
|
||||||
{
|
|
||||||
iov[v].iov_len -= r;
|
|
||||||
iov[v].iov_base += r;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
r -= iov[v].iov_len;
|
|
||||||
v++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return done;
|
|
||||||
}
|
|
||||||
|
@@ -10,4 +10,3 @@ int read_blocking(int fd, void *read_buf, size_t remaining);
|
|||||||
int write_blocking(int fd, void *write_buf, size_t remaining);
|
int write_blocking(int fd, void *write_buf, size_t remaining);
|
||||||
int readv_blocking(int fd, iovec *iov, int iovcnt);
|
int readv_blocking(int fd, iovec *iov, int iovcnt);
|
||||||
int writev_blocking(int fd, iovec *iov, int iovcnt);
|
int writev_blocking(int fd, iovec *iov, int iovcnt);
|
||||||
int sendv_blocking(int fd, iovec *iov, int iovcnt, int flags);
|
|
||||||
|
@@ -110,10 +110,8 @@ void stub_exec_op(osd_messenger_t *msgr, osd_op_t *op)
|
|||||||
if (op->req.hdr.opcode == OSD_OP_SEC_READ)
|
if (op->req.hdr.opcode == OSD_OP_SEC_READ)
|
||||||
{
|
{
|
||||||
op->reply.hdr.retval = op->req.sec_rw.len;
|
op->reply.hdr.retval = op->req.sec_rw.len;
|
||||||
op->buf = memalign_or_die(MEM_ALIGNMENT, op->req.sec_rw.len);
|
op->buf = malloc(op->req.sec_rw.len);
|
||||||
op->iov.push_back(op->buf, op->req.sec_rw.len);
|
op->iov.push_back(op->buf, op->req.sec_rw.len);
|
||||||
op->reply.sec_rw.attr_len = 4;
|
|
||||||
op->bitmap = op->buf;
|
|
||||||
}
|
}
|
||||||
else if (op->req.hdr.opcode == OSD_OP_SEC_WRITE || op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE)
|
else if (op->req.hdr.opcode == OSD_OP_SEC_WRITE || op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE)
|
||||||
{
|
{
|
||||||
|
@@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
|
|||||||
|
|
||||||
Name: Vitastor
|
Name: Vitastor
|
||||||
Description: Vitastor client library
|
Description: Vitastor client library
|
||||||
Version: 0.6.11
|
Version: 0.6.10
|
||||||
Libs: -L${libdir} -lvitastor_client
|
Libs: -L${libdir} -lvitastor_client
|
||||||
Cflags: -I${includedir}
|
Cflags: -I${includedir}
|
||||||
|
|
||||||
|
@@ -5,7 +5,6 @@
|
|||||||
OSD_SIZE=${OSD_SIZE:-1024}
|
OSD_SIZE=${OSD_SIZE:-1024}
|
||||||
PG_COUNT=${PG_COUNT:-1}
|
PG_COUNT=${PG_COUNT:-1}
|
||||||
PG_SIZE=${PG_SIZE:-3}
|
PG_SIZE=${PG_SIZE:-3}
|
||||||
PG_MINSIZE=${PG_MINSIZE:-2}
|
|
||||||
OSD_COUNT=${OSD_COUNT:-3}
|
OSD_COUNT=${OSD_COUNT:-3}
|
||||||
SCHEME=${SCHEME:-ec}
|
SCHEME=${SCHEME:-ec}
|
||||||
|
|
||||||
@@ -26,9 +25,9 @@ if [ -n "$GLOBAL_CONF" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$SCHEME" = "replicated" ]; then
|
if [ "$SCHEME" = "replicated" ]; then
|
||||||
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":'$PG_SIZE',"pg_minsize":'$PG_MINSIZE',"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
|
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":'$PG_SIZE',"pg_minsize":'$((PG_SIZE-1))',"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
|
||||||
else
|
else
|
||||||
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"xor","pg_size":'$PG_SIZE',"pg_minsize":'$PG_MINSIZE',"parity_chunks":1,"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
|
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"xor","pg_size":'$PG_SIZE',"pg_minsize":'$((PG_SIZE-1))',"parity_chunks":1,"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sleep 2
|
sleep 2
|
||||||
|
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/bash -ex
|
|
||||||
|
|
||||||
PG_MINSIZE=1
|
|
||||||
SCHEME=replicated
|
|
||||||
|
|
||||||
. `dirname $0`/run_3osds.sh
|
|
||||||
|
|
||||||
kill -INT $OSD1_PID
|
|
||||||
kill -INT $OSD2_PID
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
if ! ($ETCDCTL get /vitastor/pg/state/1/ --prefix --print-value-only | jq -s -e '[ .[] | select(.state == ["active", "degraded"]) ] | length == '$PG_COUNT); then
|
|
||||||
format_error "FAILED: $PG_COUNT PG(s) NOT ACTIVE+DEGRADED"
|
|
||||||
fi
|
|
||||||
|
|
||||||
format_green OK
|
|
Reference in New Issue
Block a user