Compare commits
3 Commits
4cf6dceed7
...
aa79d1db1c
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | aa79d1db1c | |
Vitaliy Filippov | a1fecb7eff | |
Vitaliy Filippov | ff74b19423 |
|
@ -32,7 +32,7 @@ void blockstore_init_meta::handle_event(ring_data_t *data, int buf_num)
|
|||
if (data->res < 0)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
std::string("read metadata failed at offset ") + std::to_string(bufs[buf_num].offset) +
|
||||
std::string("read metadata failed at offset ") + std::to_string(buf_num >= 0 ? bufs[buf_num].offset : last_read_offset) +
|
||||
std::string(": ") + strerror(-data->res)
|
||||
);
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ int blockstore_init_meta::loop()
|
|||
throw std::runtime_error("Failed to allocate metadata read buffer");
|
||||
// Read superblock
|
||||
GET_SQE();
|
||||
last_read_offset = 0;
|
||||
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
|
||||
|
@ -100,6 +101,7 @@ resume_1:
|
|||
{
|
||||
printf("Initializing metadata area\n");
|
||||
GET_SQE();
|
||||
last_read_offset = 0;
|
||||
data->iov = (struct iovec){ metadata_buffer, (size_t)bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
|
||||
|
@ -259,9 +261,11 @@ resume_2:
|
|||
next_offset = entries_to_zero[i]/entries_per_block;
|
||||
for (j = i; j < entries_to_zero.size() && entries_to_zero[j]/entries_per_block == next_offset; j++) {}
|
||||
GET_SQE();
|
||||
last_read_offset = (1+next_offset)*bs->dsk.meta_block_size;
|
||||
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
|
||||
bs->ringloop->submit();
|
||||
submitted++;
|
||||
resume_5:
|
||||
if (submitted > 0)
|
||||
|
@ -278,6 +282,7 @@ resume_5:
|
|||
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
|
||||
bs->ringloop->submit();
|
||||
submitted++;
|
||||
resume_6:
|
||||
if (submitted > 0)
|
||||
|
@ -299,6 +304,7 @@ resume_6:
|
|||
{
|
||||
GET_SQE();
|
||||
my_uring_prep_fsync(sqe, bs->dsk.meta_fd, IORING_FSYNC_DATASYNC);
|
||||
last_read_offset = 0;
|
||||
data->iov = { 0 };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
submitted++;
|
||||
|
|
|
@ -23,6 +23,7 @@ class blockstore_init_meta
|
|||
struct ring_data_t *data;
|
||||
uint64_t md_offset = 0;
|
||||
uint64_t next_offset = 0;
|
||||
uint64_t last_read_offset = 0;
|
||||
uint64_t entries_loaded = 0;
|
||||
unsigned entries_per_block = 0;
|
||||
int i = 0, j = 0;
|
||||
|
|
|
@ -64,10 +64,6 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
|
|||
(new_cfg["parity_chunks"].uint64_value() > 1 ? 1 : 0);
|
||||
}
|
||||
}
|
||||
if (new_cfg["scheme"] != "ec")
|
||||
{
|
||||
new_cfg.erase("parity_chunks");
|
||||
}
|
||||
|
||||
// Check integer values and unknown keys
|
||||
for (auto kv_it = new_cfg.begin(); kv_it != new_cfg.end(); )
|
||||
|
@ -118,6 +114,12 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
|
|||
}
|
||||
}
|
||||
|
||||
// Check after merging
|
||||
if (new_cfg["scheme"] != "ec")
|
||||
{
|
||||
new_cfg.erase("parity_chunks");
|
||||
}
|
||||
|
||||
// Prevent autovivification of object keys. Now we don't modify the config, we just check it
|
||||
json11::Json cfg = new_cfg;
|
||||
|
||||
|
|
|
@ -238,7 +238,8 @@ void cluster_client_t::erase_op(cluster_op_t *op)
|
|||
// which may continue following SYNCs, but these SYNCs
|
||||
// should know about the changed buffer state
|
||||
// This is ugly but this is the way we do it
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
}
|
||||
if (!(flags & OP_IMMEDIATE_COMMIT) || enable_writeback)
|
||||
{
|
||||
|
@ -248,7 +249,8 @@ void cluster_client_t::erase_op(cluster_op_t *op)
|
|||
{
|
||||
// Call callback at the end to avoid inconsistencies in prev_wait
|
||||
// if the callback adds more operations itself
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
}
|
||||
if (flags & OP_FLUSH_BUFFER)
|
||||
{
|
||||
|
@ -548,7 +550,8 @@ void cluster_client_t::execute(cluster_op_t *op)
|
|||
op->opcode != OSD_OP_READ_BITMAP && op->opcode != OSD_OP_READ_CHAIN_BITMAP && op->opcode != OSD_OP_WRITE)
|
||||
{
|
||||
op->retval = -EINVAL;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return;
|
||||
}
|
||||
if (!pgs_loaded)
|
||||
|
@ -586,7 +589,8 @@ void cluster_client_t::execute_internal(cluster_op_t *op)
|
|||
wb->start_writebacks(this, 1);
|
||||
}
|
||||
op->retval = op->len;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return;
|
||||
}
|
||||
if (op->opcode == OSD_OP_WRITE && !(op->flags & OP_IMMEDIATE_COMMIT))
|
||||
|
@ -655,7 +659,8 @@ bool cluster_client_t::check_rw(cluster_op_t *op)
|
|||
if (!pool_id)
|
||||
{
|
||||
op->retval = -EINVAL;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return false;
|
||||
}
|
||||
auto pool_it = st_cli.pool_config.find(pool_id);
|
||||
|
@ -663,7 +668,8 @@ bool cluster_client_t::check_rw(cluster_op_t *op)
|
|||
{
|
||||
// Pools are loaded, but this one is unknown
|
||||
op->retval = -EINVAL;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return false;
|
||||
}
|
||||
// Check alignment
|
||||
|
@ -671,7 +677,8 @@ bool cluster_client_t::check_rw(cluster_op_t *op)
|
|||
op->offset % pool_it->second.bitmap_granularity || op->len % pool_it->second.bitmap_granularity)
|
||||
{
|
||||
op->retval = -EINVAL;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return false;
|
||||
}
|
||||
if (pool_it->second.immediate_commit == IMMEDIATE_ALL)
|
||||
|
@ -684,7 +691,8 @@ bool cluster_client_t::check_rw(cluster_op_t *op)
|
|||
if (ino_it != st_cli.inode_config.end() && ino_it->second.readonly)
|
||||
{
|
||||
op->retval = -EROFS;
|
||||
std::function<void(cluster_op_t*)>(op->callback)(op);
|
||||
auto cb = std::move(op->callback);
|
||||
cb(op);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue