Compare commits

..

4 Commits

Author SHA1 Message Date
5fab6fc5ed WIP Separate data connections 2021-12-30 01:57:13 +03:00
ec2852c598 Add minsize_1 test 2021-12-28 10:54:36 +03:00
b9f5c2a823 Support zero-copy send in fio_sec_osd to allow testing it
Prelimilary results:
- CPU usage drops significantly. For example, in T1Q8 128K write test against
  stub_uring_osd with 10G network and Athlon X4 860k CPU it drops from 100% to 30%
- Latency becomes slightly worse. In T1Q1 4K write test in the same environment
  latency increases from 56 to 63 us.
- Small write throughput also becomes slightly worse. In T1Q128 4K write test
  against stub iops decreases from 138k to ~110k (unstable, fluctuates 100k..120k).
  Note that this is without io_uring, of course.
2021-12-27 02:12:44 +03:00
e9d2f79aa7 Support reading bitmaps in fio_sec_osd 2021-12-27 02:12:44 +03:00
9 changed files with 337 additions and 56 deletions

View File

@@ -33,12 +33,18 @@
#include "osd_ops.h"
#include "fio_headers.h"
struct op_buf_t
{
osd_any_op_t buf;
io_u* fio_op;
};
struct sec_data
{
int connect_fd;
/* block_size = 1 << block_order (128KB by default) */
uint64_t block_order = 17, block_size = 1 << 17;
std::unordered_map<uint64_t, io_u*> queue;
std::unordered_map<uint64_t, op_buf_t*> queue;
bool last_sync = false;
/* The list of completed io_u structs. */
std::vector<io_u*> completed;
@@ -53,6 +59,7 @@ struct sec_options
int single_primary = 0;
int trace = 0;
int block_order = 17;
int zerocopy_send = 0;
};
static struct fio_option options[] = {
@@ -103,6 +110,16 @@ static struct fio_option options[] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "zerocopy_send",
.lname = "Use zero-copy send",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct sec_options, zerocopy_send),
.help = "Use zero-copy send (MSG_ZEROCOPY)",
.def = "0",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = NULL,
},
@@ -173,6 +190,14 @@ static int sec_init(struct thread_data *td)
}
int one = 1;
setsockopt(bsd->connect_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
if (o->zerocopy_send)
{
if (setsockopt(bsd->connect_fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)) < 0)
{
perror("setsockopt zerocopy");
return 1;
}
}
// FIXME: read config (block size) from OSD
@@ -193,7 +218,9 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
}
io->engine_data = bsd;
osd_any_op_t op = { 0 };
op_buf_t *op_buf = new op_buf_t;
op_buf->fio_op = io;
osd_any_op_t &op = op_buf->buf;
op.hdr.magic = SECONDARY_OSD_OP_MAGIC;
op.hdr.id = n;
@@ -269,19 +296,18 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
io->error = 0;
bsd->inflight++;
bsd->op_n++;
bsd->queue[n] = io;
bsd->queue[n] = op_buf;
iovec iov[2] = { { .iov_base = op.buf, .iov_len = OSD_PACKET_SIZE } };
int iovcnt = 1, wtotal = OSD_PACKET_SIZE;
if (io->ddir == DDIR_WRITE)
{
iov[1] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
wtotal += io->xfer_buflen;
iovcnt++;
}
if (writev_blocking(bsd->connect_fd, iov, iovcnt) != wtotal)
if (sendv_blocking(bsd->connect_fd, iov, iovcnt, opt->zerocopy_send ? MSG_ZEROCOPY : 0) != wtotal)
{
perror("writev");
perror("sendmsg");
exit(1);
}
@@ -310,7 +336,8 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
fprintf(stderr, "bad reply: op id %lx missing in local queue\n", reply.hdr.id);
exit(1);
}
io_u* io = it->second;
io_u* io = it->second->fio_op;
delete it->second;
bsd->queue.erase(it);
if (io->ddir == DDIR_READ)
{
@@ -319,7 +346,23 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
fprintf(stderr, "Short read: retval = %ld instead of %llu\n", reply.hdr.retval, io->xfer_buflen);
exit(1);
}
read_blocking(bsd->connect_fd, io->xfer_buf, io->xfer_buflen);
// Support bitmap
uint64_t bitmap = 0;
int iovcnt = 0;
iovec iov[2];
if (reply.sec_rw.attr_len > 0)
{
if (reply.sec_rw.attr_len <= 8)
iov[iovcnt++] = { .iov_base = &bitmap, .iov_len = reply.sec_rw.attr_len };
else
iov[iovcnt++] = { .iov_base = (void*)(bitmap = (uint64_t)malloc(reply.sec_rw.attr_len)), .iov_len = reply.sec_rw.attr_len };
}
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
readv_blocking(bsd->connect_fd, iov, iovcnt);
if (reply.sec_rw.attr_len > 8)
{
free((void*)bitmap);
}
}
else if (io->ddir == DDIR_WRITE)
{

View File

@@ -4,10 +4,12 @@
#include <unistd.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/random.h>
#include <sys/epoll.h>
#include <netinet/tcp.h>
#include <stdexcept>
#include "base64.h"
#include "addr_util.h"
#include "messenger.h"
@@ -194,7 +196,7 @@ void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state)
try_connect_peer(peer_osd);
}
void osd_messenger_t::try_connect_peer(uint64_t peer_osd)
void osd_messenger_t::try_connect_peer(osd_num_t peer_osd)
{
auto wp_it = wanted_peers.find(peer_osd);
if (wp_it == wanted_peers.end() || wp_it->second.connecting ||
@@ -215,40 +217,75 @@ void osd_messenger_t::try_connect_peer(uint64_t peer_osd)
wp.cur_addr = wp.address_list[wp.address_index].string_value();
wp.cur_port = wp.port;
wp.connecting = true;
try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port);
try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port, NULL, [this](osd_num_t peer_osd, int peer_fd)
{
if (peer_fd >= 0)
osd_peer_fds[peer_osd] = peer_fd;
on_connect_peer(peer_osd, peer_fd);
});
}
void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port)
static std::string urandom_str(int bytes)
{
std::string str;
str.resize(bytes);
char *buf = (char*)str.data();
while (bytes > 0)
{
int r = getrandom(buf, bytes, 0);
if (r < 0)
throw std::runtime_error(std::string("getrandom: ") + strerror(errno));
buf += r;
bytes -= r;
}
return str;
}
void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port,
osd_client_t *meta_cl, std::function<void(osd_num_t, int)> connect_callback)
{
assert(peer_osd != this->osd_num);
struct sockaddr addr;
if (!string_to_addr(peer_host, 0, peer_port, &addr))
if (!meta_cl)
{
on_connect_peer(peer_osd, -EINVAL);
return;
if (!string_to_addr(peer_host, 0, peer_port, &addr))
{
connect_callback(peer_osd, -EINVAL);
return;
}
}
else
{
addr = meta_cl->peer_addr;
}
int peer_fd = socket(addr.sa_family, SOCK_STREAM, 0);
if (peer_fd >= 0)
{
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
int r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr));
if (r < 0 && errno != EINPROGRESS)
{
close(peer_fd);
peer_fd = -1;
}
}
if (peer_fd < 0)
{
on_connect_peer(peer_osd, -errno);
return;
}
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
int r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr));
if (r < 0 && errno != EINPROGRESS)
{
close(peer_fd);
on_connect_peer(peer_osd, -errno);
connect_callback(peer_osd, -errno);
return;
}
clients[peer_fd] = new osd_client_t();
clients[peer_fd]->peer_addr = addr;
clients[peer_fd]->peer_port = peer_port;
clients[peer_fd]->peer_port = ((struct sockaddr_in*)&addr)->sin_port;
clients[peer_fd]->peer_fd = peer_fd;
clients[peer_fd]->peer_state = PEER_CONNECTING;
clients[peer_fd]->connect_timeout_id = -1;
clients[peer_fd]->connect_callback = connect_callback;
clients[peer_fd]->osd_num = peer_osd;
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
clients[peer_fd]->data_for = meta_cl ? addr_to_string(meta_cl->peer_addr) : "";
clients[peer_fd]->data_connection_cookie = meta_cl
? meta_cl->data_connection_cookie : base64_encode(urandom_str(12));
tfd->set_fd_handler(peer_fd, true, [this](int peer_fd, int epoll_events)
{
// Either OUT (connected) or HUP
@@ -258,10 +295,12 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
{
clients[peer_fd]->connect_timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
{
osd_num_t peer_osd = clients.at(peer_fd)->osd_num;
auto cl = clients.at(peer_fd);
auto connect_callback = cl->connect_callback;
cl->connect_callback = NULL;
osd_num_t peer_osd = cl->osd_num;
stop_client(peer_fd, true);
on_connect_peer(peer_osd, -EPIPE);
return;
connect_callback(peer_osd, -EPIPE);
});
}
}
@@ -283,8 +322,10 @@ void osd_messenger_t::handle_connect_epoll(int peer_fd)
}
if (result != 0)
{
auto connect_callback = cl->connect_callback;
cl->connect_callback = NULL;
stop_client(peer_fd, true);
on_connect_peer(peer_osd, -result);
connect_callback(peer_osd, -result);
return;
}
int one = 1;
@@ -364,6 +405,11 @@ void osd_messenger_t::on_connect_peer(osd_num_t peer_osd, int peer_fd)
void osd_messenger_t::check_peer_config(osd_client_t *cl)
{
json11::Json::object payload;
if (cl->data_connection_cookie != "")
{
payload["data_cookie"] = cl->data_connection_cookie;
}
osd_op_t *op = new osd_op_t();
op->op_type = OSD_OP_OUT;
op->peer_fd = cl->peer_fd;
@@ -376,24 +422,33 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
},
},
};
#ifdef WITH_RDMA
if (rdma_context)
if (cl->data_for == "")
{
cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
if (cl->rdma_conn)
#ifdef WITH_RDMA
if (rdma_context)
{
json11::Json payload = json11::Json::object {
{ "connect_rdma", cl->rdma_conn->addr.to_string() },
{ "rdma_max_msg", cl->rdma_conn->max_msg },
};
std::string payload_str = payload.dump();
op->req.show_conf.json_len = payload_str.size();
op->buf = malloc_or_die(payload_str.size());
op->iov.push_back(op->buf, payload_str.size());
memcpy(op->buf, payload_str.c_str(), payload_str.size());
cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
if (cl->rdma_conn)
{
payload["connect_rdma"] = cl->rdma_conn->addr.to_string();
payload["rdma_max_msg"] = cl->rdma_conn->max_msg;
}
}
}
#endif
}
else
{
// Mark it as a data connection
payload["data_for"] = cl->data_for;
}
if (payload.size())
{
std::string payload_str = json11::Json(payload).dump();
op->req.show_conf.json_len = payload_str.size();
op->buf = malloc_or_die(payload_str.size());
op->iov.push_back(op->buf, payload_str.size());
memcpy(op->buf, payload_str.c_str(), payload_str.size());
}
op->callback = [this, cl](osd_op_t *op)
{
std::string json_err;
@@ -426,18 +481,30 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
cl->osd_num, config["protocol_version"].uint64_value(), OSD_PROTOCOL_VERSION
);
}
else if (cl->data_for != "" && config["data_for"] != cl->data_for)
{
err = true;
fprintf(
stderr, "OSD %lu does not support separate data connections."
" Proceeding with a single connection\n", cl->osd_num
);
}
}
if (err)
{
osd_num_t peer_osd = cl->osd_num;
auto connect_callback = cl->connect_callback;
cl->connect_callback = NULL;
stop_client(op->peer_fd);
on_connect_peer(peer_osd, -1);
connect_callback(peer_osd, -EINVAL);
delete op;
return;
}
#ifdef WITH_RDMA
if (config["rdma_address"].is_string())
if (rdma_context && cl->rdma_conn && config["rdma_address"].is_string())
{
// Prevent creating data connection - we are trying RDMA
cl->data_connection_cookie = "";
msgr_rdma_address_t addr;
if (!msgr_rdma_address_t::from_string(config["rdma_address"].string_value().c_str(), &addr) ||
cl->rdma_conn->connect(&addr) != 0)
@@ -450,8 +517,10 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
cl->rdma_conn = NULL;
// FIXME: Keep TCP connection in this case
osd_num_t peer_osd = cl->osd_num;
auto connect_callback = cl->connect_callback;
cl->connect_callback = NULL;
stop_client(cl->peer_fd);
on_connect_peer(peer_osd, -1);
connect_callback(peer_osd, -EPIPE);
delete op;
return;
}
@@ -473,8 +542,37 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
}
}
#endif
osd_peer_fds[cl->osd_num] = cl->peer_fd;
on_connect_peer(cl->osd_num, cl->peer_fd);
if (cl->data_connection_cookie != "")
{
// Try to open second connection to the same address
try_connect_peer_addr(cl->osd_num, NULL, 0, cl, [this, peer_fd = cl->peer_fd](osd_num_t data_peer, int data_peer_fd)
{
auto cl_it = clients.find(peer_fd);
if (cl_it != clients.end())
{
// Proceed with or without the data connection
auto cl = cl_it->second;
if (data_peer_fd >= 0)
{
cl->data_connection_fd = data_peer_fd;
auto data_cl = clients.at(data_peer_fd);
data_cl->meta_connection_fd = cl->peer_fd;
}
osd_peer_fds[cl->osd_num] = cl->peer_fd;
on_connect_peer(cl->osd_num, cl->peer_fd);
}
else if (data_peer_fd >= 0)
{
stop_client(data_peer_fd);
}
});
}
else
{
auto connect_callback = cl->connect_callback;
cl->connect_callback = NULL;
connect_callback(cl->osd_num, cl->peer_fd);
}
delete op;
};
outbox_push(op);
@@ -500,6 +598,7 @@ void osd_messenger_t::accept_connections(int listen_fd)
clients[peer_fd]->peer_fd = peer_fd;
clients[peer_fd]->peer_state = PEER_CONNECTED;
clients[peer_fd]->in_buf = malloc_or_die(receive_buffer_size);
clients_by_addr[addr_to_string(addr)] = peer_fd;
// Add FD to epoll
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events)
{

View File

@@ -57,6 +57,10 @@ struct osd_client_t
int ping_time_remaining = 0;
int idle_time_remaining = 0;
osd_num_t osd_num = 0;
std::function<void(osd_num_t, int)> connect_callback;
int data_connection_fd = -1, meta_connection_fd = -1;
std::string data_connection_cookie, data_for;
void *in_buf = NULL;
@@ -148,6 +152,7 @@ public:
osd_num_t osd_num;
uint64_t next_subop_id = 1;
std::map<int, osd_client_t*> clients;
std::map<std::string, int> clients_by_addr;
std::map<osd_num_t, osd_wanted_peer_t> wanted_peers;
std::map<uint64_t, int> osd_peer_fds;
// op statistics
@@ -157,6 +162,7 @@ public:
void parse_config(const json11::Json & config);
void connect_peer(uint64_t osd_num, json11::Json peer_state);
void stop_client(int peer_fd, bool force = false, bool force_delete = false);
void break_data_client_pair(osd_client_t *cl);
void outbox_push(osd_op_t *cur_op);
std::function<void(osd_op_t*)> exec_op;
std::function<void(osd_num_t)> repeer_pgs;
@@ -174,7 +180,8 @@ public:
protected:
void try_connect_peer(uint64_t osd_num);
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port);
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port,
osd_client_t *meta_cl, std::function<void(osd_num_t, int)> connect_callback);
void handle_peer_epoll(int peer_fd, int epoll_events);
void handle_connect_epoll(int peer_fd);
void on_connect_peer(osd_num_t peer_osd, int peer_fd);

View File

@@ -4,6 +4,7 @@
#include <unistd.h>
#include <assert.h>
#include "addr_util.h"
#include "messenger.h"
void osd_messenger_t::cancel_osd_ops(osd_client_t *cl)
@@ -58,7 +59,8 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
{
if (cl->osd_num)
{
fprintf(stderr, "[OSD %lu] Stopping client %d (OSD peer %lu)\n", osd_num, peer_fd, cl->osd_num);
fprintf(stderr, "[OSD %lu] Stopping client %d (OSD %speer %lu)\n",
osd_num, peer_fd, cl->meta_connection_fd >= 0 ? " data" : "", cl->osd_num);
}
else
{
@@ -68,7 +70,7 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
// First set state to STOPPED so another stop_client() call doesn't try to free it again
cl->refs++;
cl->peer_state = PEER_STOPPED;
if (cl->osd_num)
if (cl->osd_num && cl->meta_connection_fd < 0)
{
// ...and forget OSD peer
osd_peer_fds.erase(cl->osd_num);
@@ -100,9 +102,17 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
#endif
if (cl->osd_num)
{
// Then repeer PGs because cancel_op() callbacks can try to perform
// some actions and we need correct PG states to not do something silly
repeer_pgs(cl->osd_num);
if (cl->meta_connection_fd < 0)
{
// Then repeer PGs because cancel_op() callbacks can try to perform
// some actions and we need correct PG states to not do something silly
repeer_pgs(cl->osd_num);
}
else
{
// FIXME Try to re-establish data connection
// Only when the connection is outbound, but here it's always outbound
}
}
// Then cancel all operations
if (cl->read_op)
@@ -128,6 +138,7 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
delete cl->rdma_conn;
}
#endif
clients_by_addr.erase(addr_to_string(cl->peer_addr));
#endif
// Find the item again because it can be invalidated at this point
it = clients.find(peer_fd);
@@ -135,9 +146,40 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
{
clients.erase(it);
}
// Break metadata/data connection pair
if (cl->data_connection_fd >= 0)
{
// No sense to keep data connection when metadata connection is stopped
auto dc_it = clients.find(cl->data_connection_fd);
cl->data_connection_fd = -1;
if (dc_it != clients.end() && dc_it->second->meta_connection_fd == cl->peer_fd)
{
stop_client(dc_it->second->peer_fd);
}
}
break_data_client_pair(cl);
// Refcount and delete
cl->refs--;
if (cl->refs <= 0 || force_delete)
{
delete cl;
}
}
void osd_messenger_t::break_data_client_pair(osd_client_t *cl)
{
if (cl->meta_connection_fd >= 0)
{
auto dc_it = clients.find(cl->meta_connection_fd);
if (dc_it != clients.end() && dc_it->second->data_connection_fd == cl->peer_fd)
dc_it->second->data_connection_fd = -1;
cl->meta_connection_fd = -1;
}
if (cl->data_connection_fd >= 0)
{
auto dc_it = clients.find(cl->data_connection_fd);
if (dc_it != clients.end() && dc_it->second->meta_connection_fd == cl->peer_fd)
dc_it->second->meta_connection_fd = -1;
cl->data_connection_fd = -1;
}
}

View File

@@ -178,6 +178,37 @@ void osd_t::exec_show_config(osd_op_t *cur_op)
}
}
}
else
{
#endif
if (req_json["data_for"].is_string())
{
auto cli = msgr.clients.at(cur_op->peer_fd);
auto md_it = msgr.clients_by_addr.find(req_json["data_for"].string_value());
if (md_it != msgr.clients_by_addr.end())
{
int md_peer_fd = md_it->second;
auto md_it = msgr.clients.find(md_peer_fd);
if (md_it != msgr.clients.end() && md_it->second->data_connection_cookie != "" &&
req_json["data_cookie"].string_value() == md_it->second->data_connection_cookie)
{
// Break previous metadata/data connections for both FDs, if present
msgr.break_data_client_pair(cli);
msgr.break_data_client_pair(md_it->second);
// And setup the new pair
cli->meta_connection_fd = md_it->second->peer_fd;
md_it->second->data_connection_fd = cli->peer_fd;
wire_config["data_for"] = req_json["data_for"];
}
}
}
else if (req_json["data_cookie"].is_string())
{
auto cli = msgr.clients.at(cur_op->peer_fd);
cli->data_connection_cookie = req_json["data_cookie"].string_value();
}
#ifdef WITH_RDMA
}
#endif
if (cur_op->buf)
free(cur_op->buf);

View File

@@ -4,6 +4,8 @@
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "rw_blocking.h"
@@ -123,3 +125,41 @@ int writev_blocking(int fd, iovec *iov, int iovcnt)
}
return done;
}
int sendv_blocking(int fd, iovec *iov, int iovcnt, int flags)
{
struct msghdr msg = { 0 };
int v = 0;
int done = 0;
while (v < iovcnt)
{
msg.msg_iov = iov+v;
msg.msg_iovlen = iovcnt-v;
ssize_t r = sendmsg(fd, &msg, flags);
if (r < 0)
{
if (errno != EAGAIN && errno != EPIPE)
{
perror("sendmsg");
exit(1);
}
continue;
}
done += r;
while (v < iovcnt)
{
if (iov[v].iov_len > r)
{
iov[v].iov_len -= r;
iov[v].iov_base += r;
break;
}
else
{
r -= iov[v].iov_len;
v++;
}
}
}
return done;
}

View File

@@ -10,3 +10,4 @@ int read_blocking(int fd, void *read_buf, size_t remaining);
int write_blocking(int fd, void *write_buf, size_t remaining);
int readv_blocking(int fd, iovec *iov, int iovcnt);
int writev_blocking(int fd, iovec *iov, int iovcnt);
int sendv_blocking(int fd, iovec *iov, int iovcnt, int flags);

View File

@@ -5,6 +5,7 @@
OSD_SIZE=${OSD_SIZE:-1024}
PG_COUNT=${PG_COUNT:-1}
PG_SIZE=${PG_SIZE:-3}
PG_MINSIZE=${PG_MINSIZE:-2}
OSD_COUNT=${OSD_COUNT:-3}
SCHEME=${SCHEME:-ec}
@@ -25,9 +26,9 @@ if [ -n "$GLOBAL_CONF" ]; then
fi
if [ "$SCHEME" = "replicated" ]; then
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":'$PG_SIZE',"pg_minsize":'$((PG_SIZE-1))',"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":'$PG_SIZE',"pg_minsize":'$PG_MINSIZE',"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
else
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"xor","pg_size":'$PG_SIZE',"pg_minsize":'$((PG_SIZE-1))',"parity_chunks":1,"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"xor","pg_size":'$PG_SIZE',"pg_minsize":'$PG_MINSIZE',"parity_chunks":1,"pg_count":'$PG_COUNT',"failure_domain":"osd"}}'
fi
sleep 2

17
tests/test_minsize_1.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash -ex
PG_MINSIZE=1
SCHEME=replicated
. `dirname $0`/run_3osds.sh
kill -INT $OSD1_PID
kill -INT $OSD2_PID
sleep 5
if ! ($ETCDCTL get /vitastor/pg/state/1/ --prefix --print-value-only | jq -s -e '[ .[] | select(.state == ["active", "degraded"]) ] | length == '$PG_COUNT); then
format_error "FAILED: $PG_COUNT PG(s) NOT ACTIVE+DEGRADED"
fi
format_green OK