vitastor/blockstore_write.cpp

489 lines
17 KiB
C++
Raw Normal View History

#include "blockstore_impl.h"
2019-11-07 02:24:12 +03:00
bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
2019-11-10 13:27:59 +03:00
{
2019-12-19 13:56:26 +03:00
// Check or assign version number
bool found = false, deleted = false, is_del = (op->opcode == BS_OP_DELETE);
bool is_inflight_big = false;
uint64_t version = 1;
2019-11-18 02:36:53 +03:00
if (dirty_db.size() > 0)
2019-11-10 13:27:59 +03:00
{
2019-11-18 02:36:53 +03:00
auto dirty_it = dirty_db.upper_bound((obj_ver_id){
.oid = op->oid,
.version = UINT64_MAX,
});
2019-11-18 13:37:32 +03:00
dirty_it--; // segfaults when dirty_db is empty
2019-11-18 02:36:53 +03:00
if (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
{
found = true;
version = dirty_it->first.version + 1;
2019-12-01 17:25:59 +03:00
deleted = IS_DELETE(dirty_it->second.state);
is_inflight_big = dirty_it->second.state >= ST_D_IN_FLIGHT &&
dirty_it->second.state < ST_D_SYNCED ||
dirty_it->second.state == ST_J_WAIT_BIG;
2019-11-18 02:36:53 +03:00
}
2019-11-10 13:27:59 +03:00
}
2019-11-18 02:36:53 +03:00
if (!found)
2019-11-10 13:27:59 +03:00
{
2019-11-10 15:17:21 +03:00
auto clean_it = clean_db.find(op->oid);
if (clean_it != clean_db.end())
2019-11-10 13:27:59 +03:00
{
version = clean_it->second.version + 1;
2019-11-10 13:27:59 +03:00
}
else
{
2019-12-01 17:25:59 +03:00
deleted = true;
2019-11-10 13:27:59 +03:00
}
}
if (op->version == 0)
{
op->version = version;
}
else if (op->version < version)
{
// Invalid version requested
op->retval = -EEXIST;
return false;
}
2019-12-01 17:25:59 +03:00
if (deleted && is_del)
{
// Already deleted
op->retval = 0;
return false;
2019-12-01 17:25:59 +03:00
}
if (is_inflight_big && !is_del && !deleted && op->len < block_size &&
immediate_commit != IMMEDIATE_ALL)
{
// Issue an additional sync so that the previous big write can reach the journal
blockstore_op_t *sync_op = new blockstore_op_t;
sync_op->opcode = BS_OP_SYNC;
sync_op->callback = [this, op](blockstore_op_t *sync_op)
{
delete sync_op;
};
enqueue_op(sync_op);
}
2019-11-27 18:07:08 +03:00
#ifdef BLOCKSTORE_DEBUG
2020-03-07 03:39:28 +03:00
if (is_del)
printf("Delete %lu:%lu v%lu\n", op->oid.inode, op->oid.stripe, op->version);
else
printf("Write %lu:%lu v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
2019-11-27 18:07:08 +03:00
#endif
// No strict need to add it into dirty_db here, it's just left
// from the previous implementation where reads waited for writes
2019-11-10 13:27:59 +03:00
dirty_db.emplace((obj_ver_id){
.oid = op->oid,
.version = op->version,
}, (dirty_entry){
2019-12-01 17:25:59 +03:00
.state = (uint32_t)(
is_del
? ST_DEL_IN_FLIGHT
: (op->len == block_size || deleted ? ST_D_IN_FLIGHT : (is_inflight_big ? ST_J_WAIT_BIG : ST_J_IN_FLIGHT))
2019-12-01 17:25:59 +03:00
),
2019-11-10 13:27:59 +03:00
.flags = 0,
.location = 0,
2019-12-01 17:25:59 +03:00
.offset = is_del ? 0 : op->offset,
.len = is_del ? 0 : op->len,
2019-11-14 21:15:59 +03:00
.journal_sector = 0,
2019-11-10 13:27:59 +03:00
});
return true;
2019-11-10 13:27:59 +03:00
}
2019-11-07 02:24:12 +03:00
// First step of the write algorithm: dequeue operation and submit initial write(s)
int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
2019-11-07 02:24:12 +03:00
{
2020-03-10 01:59:15 +03:00
if (PRIV(op)->op_state)
{
return continue_write(op);
}
auto dirty_it = dirty_db.find((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
if (dirty_it->second.state == ST_J_WAIT_BIG)
{
return 0;
}
else if (dirty_it->second.state == ST_D_IN_FLIGHT)
2019-11-07 02:24:12 +03:00
{
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, unsynced_big_writes.size() + 1, sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION))
{
return 0;
}
2019-11-07 02:24:12 +03:00
// Big (redirect) write
2019-11-27 00:50:57 +03:00
uint64_t loc = data_alloc->find_free();
if (loc == UINT64_MAX)
2019-11-07 02:24:12 +03:00
{
// no space
if (flusher->is_active())
{
// hope that some space will be available after flush
PRIV(op)->wait_for = WAIT_FREE;
return 0;
}
2019-11-07 02:24:12 +03:00
op->retval = -ENOSPC;
FINISH_OP(op);
2019-11-07 02:24:12 +03:00
return 1;
}
2019-11-09 02:16:44 +03:00
BS_SUBMIT_GET_SQE(sqe, data);
dirty_it->second.location = loc << block_order;
2019-11-08 02:16:31 +03:00
dirty_it->second.state = ST_D_SUBMITTED;
#ifdef BLOCKSTORE_DEBUG
printf("Allocate block %lu\n", loc);
#endif
2019-11-27 00:50:57 +03:00
data_alloc->set(loc, true);
2020-01-16 00:35:35 +03:00
uint64_t stripe_offset = (op->offset % bitmap_granularity);
uint64_t stripe_end = (op->offset + op->len) % bitmap_granularity;
// Zero fill up to bitmap_granularity
int vcnt = 0;
if (stripe_offset)
{
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, stripe_offset };
}
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len };
if (stripe_end)
{
2020-01-16 00:35:35 +03:00
stripe_end = bitmap_granularity - stripe_end;
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, stripe_end };
}
data->iov.iov_len = op->len + stripe_offset + stripe_end; // to check it in the callback
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
my_uring_prep_writev(
sqe, data_fd, PRIV(op)->iov_zerofill, vcnt, data_offset + (loc << block_order) + op->offset - stripe_offset
2019-11-07 02:24:12 +03:00
);
PRIV(op)->pending_ops = 1;
2020-03-07 16:41:40 +03:00
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
2020-03-10 01:59:15 +03:00
if (immediate_commit != IMMEDIATE_ALL)
{
// Remember big write as unsynced
unsynced_big_writes.push_back((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
PRIV(op)->op_state = 3;
}
else
{
PRIV(op)->op_state = 1;
}
2019-11-07 02:24:12 +03:00
}
else
{
// Small (journaled) write
// First check if the journal has sufficient space
blockstore_journal_check_t space_check(this);
if (unsynced_big_writes.size() && !space_check.check_available(op, unsynced_big_writes.size(), sizeof(journal_entry_big_write), 0)
|| !space_check.check_available(op, 1, sizeof(journal_entry_small_write), op->len + JOURNAL_STABILIZE_RESERVATION))
2019-11-07 02:24:12 +03:00
{
return 0;
}
// There is sufficient space. Get SQE(s)
struct io_uring_sqe *sqe1 = NULL;
2020-03-10 01:59:15 +03:00
if (immediate_commit != IMMEDIATE_NONE ||
(journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_small_write) &&
journal.sector_info[journal.cur_sector].dirty)
{
2020-03-10 01:59:15 +03:00
// Write current journal sector only if it's dirty and full, or in the immediate_commit mode
BS_SUBMIT_GET_SQE_DECL(sqe1);
}
2019-12-21 19:04:36 +03:00
struct io_uring_sqe *sqe2 = NULL;
if (op->len > 0)
{
BS_SUBMIT_GET_SQE_DECL(sqe2);
}
// Got SQEs. Prepare previous journal sector write if required
auto cb = [this, op](ring_data_t *data) { handle_write_event(data, op); };
2020-03-10 01:59:15 +03:00
if (immediate_commit == IMMEDIATE_NONE)
{
2020-03-10 01:59:15 +03:00
if (sqe1)
{
prepare_journal_sector_write(journal, journal.cur_sector, sqe1, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops++;
}
else
{
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
}
}
// Then pre-fill journal entry
journal_entry_small_write *je = (journal_entry_small_write*)
prefill_single_journal_entry(journal, JE_SMALL_WRITE, sizeof(journal_entry_small_write));
2019-11-14 21:15:59 +03:00
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
2019-11-27 18:07:08 +03:00
#ifdef BLOCKSTORE_DEBUG
printf("journal offset %lu is used by %lu:%lu v%lu\n", dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version);
#endif
2019-11-20 00:46:44 +03:00
// Figure out where data will be
2020-01-16 00:35:35 +03:00
journal.next_free = (journal.next_free + op->len) <= journal.len ? journal.next_free : journal_block_size;
je->oid = op->oid;
je->version = op->version;
je->offset = op->offset;
je->len = op->len;
2019-11-20 00:46:44 +03:00
je->data_offset = journal.next_free;
2019-11-27 02:20:38 +03:00
je->crc32_data = crc32c(0, op->buf, op->len);
je->crc32 = je_crc32((journal_entry*)je);
2019-11-09 02:16:44 +03:00
journal.crc32_last = je->crc32;
2020-03-10 01:59:15 +03:00
if (immediate_commit != IMMEDIATE_NONE)
{
prepare_journal_sector_write(journal, journal.cur_sector, sqe1, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops++;
}
2019-12-21 19:04:36 +03:00
if (op->len > 0)
2019-11-28 14:41:03 +03:00
{
2019-12-21 19:04:36 +03:00
// Prepare journal data write
if (journal.inmemory)
{
// Copy data
memcpy(journal.buffer + journal.next_free, op->buf, op->len);
}
ring_data_t *data2 = ((ring_data_t*)sqe2->user_data);
2019-12-21 19:04:36 +03:00
data2->iov = (struct iovec){ op->buf, op->len };
data2->callback = cb;
my_uring_prep_writev(
sqe2, journal.fd, &data2->iov, 1, journal.offset + journal.next_free
);
PRIV(op)->pending_ops++;
2019-12-21 19:04:36 +03:00
}
else
{
// Zero-length overwrite. Allowed to bump object version in EC placement groups without actually writing data
2019-11-28 14:41:03 +03:00
}
dirty_it->second.location = journal.next_free;
2019-11-08 02:16:31 +03:00
dirty_it->second.state = ST_J_SUBMITTED;
journal.next_free += op->len;
if (journal.next_free >= journal.len)
{
2020-01-16 00:35:35 +03:00
journal.next_free = journal_block_size;
}
2020-03-10 01:59:15 +03:00
if (immediate_commit == IMMEDIATE_NONE)
{
// Remember small write as unsynced
unsynced_small_writes.push_back((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
}
if (!PRIV(op)->pending_ops)
{
2020-03-10 01:59:15 +03:00
PRIV(op)->op_state = 4;
continue_write(op);
}
2020-03-10 01:59:15 +03:00
else
{
PRIV(op)->op_state = 3;
}
}
inflight_writes++;
2020-03-10 01:59:15 +03:00
return 1;
}
int blockstore_impl_t::continue_write(blockstore_op_t *op)
{
io_uring_sqe *sqe = NULL;
journal_entry_big_write *je;
auto dirty_it = dirty_db.find((obj_ver_id){
2020-03-10 01:59:15 +03:00
.oid = op->oid,
.version = op->version,
});
2020-03-10 01:59:15 +03:00
if (PRIV(op)->op_state == 2)
goto resume_2;
else if (PRIV(op)->op_state == 4)
goto resume_4;
else
return 1;
resume_2:
// Only for the immediate_commit mode: prepare and submit big_write journal entry
sqe = get_sqe();
if (!sqe)
{
return 0;
}
je = (journal_entry_big_write*)prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write));
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
2020-03-10 01:59:15 +03:00
journal.sector_info[journal.cur_sector].dirty = false;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf("journal offset %lu is used by %lu:%lu v%lu\n", journal.sector_info[journal.cur_sector].offset, op->oid.inode, op->oid.stripe, op->version);
#endif
je->oid = op->oid;
je->version = op->version;
je->offset = op->offset;
je->len = op->len;
je->location = dirty_it->second.location;
2020-03-10 01:59:15 +03:00
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
prepare_journal_sector_write(journal, journal.cur_sector, sqe,
[this, op](ring_data_t *data) { handle_write_event(data, op); });
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops = 1;
PRIV(op)->op_state = 3;
return 1;
resume_4:
// Switch object state
#ifdef BLOCKSTORE_DEBUG
printf("Ack write %lu:%lu v%lu = %d\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
2020-03-10 01:59:15 +03:00
#endif
bool imm = dirty_it->second.state == ST_D_SUBMITTED
2020-03-10 01:59:15 +03:00
? (immediate_commit == IMMEDIATE_ALL)
: (immediate_commit != IMMEDIATE_NONE);
if (imm)
{
auto & unstab = unstable_writes[op->oid];
unstab = unstab < op->version ? op->version : unstab;
}
if (dirty_it->second.state == ST_J_SUBMITTED)
{
dirty_it->second.state = imm ? ST_J_SYNCED : ST_J_WRITTEN;
}
else if (dirty_it->second.state == ST_D_SUBMITTED)
2020-03-10 01:59:15 +03:00
{
dirty_it->second.state = imm ? ST_D_SYNCED : ST_D_WRITTEN;
2020-03-10 01:59:15 +03:00
}
else if (dirty_it->second.state == ST_DEL_SUBMITTED)
2020-03-10 01:59:15 +03:00
{
dirty_it->second.state = imm ? ST_DEL_SYNCED : ST_DEL_WRITTEN;
2019-11-07 02:24:12 +03:00
}
if (immediate_commit == IMMEDIATE_ALL)
2020-03-10 01:59:15 +03:00
{
dirty_it++;
while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
{
if (dirty_it->second.state == ST_J_WAIT_BIG)
{
dirty_it->second.state = ST_J_IN_FLIGHT;
}
dirty_it++;
}
2020-03-10 01:59:15 +03:00
}
inflight_writes--;
2020-03-10 01:59:15 +03:00
// Acknowledge write
op->retval = op->len;
FINISH_OP(op);
2019-11-07 02:24:12 +03:00
return 1;
}
2019-11-08 14:10:24 +03:00
void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *op)
2019-11-08 14:10:24 +03:00
{
2020-01-10 01:23:46 +03:00
live = true;
2019-11-17 22:27:29 +03:00
if (data->res != data->iov.iov_len)
2019-11-08 14:10:24 +03:00
{
inflight_writes--;
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
2019-11-17 22:27:29 +03:00
throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
);
2019-11-08 14:10:24 +03:00
}
PRIV(op)->pending_ops--;
if (PRIV(op)->pending_ops == 0)
2019-11-08 14:10:24 +03:00
{
release_journal_sectors(op);
2020-03-10 01:59:15 +03:00
PRIV(op)->op_state++;
if (!continue_write(op))
{
submit_queue.push_front(op);
}
}
}
void blockstore_impl_t::release_journal_sectors(blockstore_op_t *op)
{
// Release flushed journal sectors
2020-03-07 16:41:40 +03:00
if (PRIV(op)->min_flushed_journal_sector > 0 &&
PRIV(op)->max_flushed_journal_sector > 0)
{
2020-03-07 16:41:40 +03:00
uint64_t s = PRIV(op)->min_flushed_journal_sector;
while (1)
2019-11-09 02:16:44 +03:00
{
journal.sector_info[s-1].usage_count--;
if (s != (1+journal.cur_sector) && journal.sector_info[s-1].usage_count == 0)
{
// We know for sure that we won't write into this sector anymore
uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size;
if ((journal.dirty_start + (journal.dirty_start >= journal.used_start ? 0 : journal.len)) <
(new_ds + (new_ds >= journal.used_start ? 0 : journal.len)))
{
journal.dirty_start = new_ds;
}
}
2020-03-07 16:41:40 +03:00
if (s == PRIV(op)->max_flushed_journal_sector)
break;
s = 1 + s % journal.sector_count;
2019-11-09 02:16:44 +03:00
}
2020-03-07 16:41:40 +03:00
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
}
}
int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
2019-12-01 17:25:59 +03:00
{
auto dirty_it = dirty_db.find((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_del), 0))
{
return 0;
}
2020-03-10 01:59:15 +03:00
io_uring_sqe *sqe = NULL;
if (immediate_commit != IMMEDIATE_NONE ||
(journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
journal.sector_info[journal.cur_sector].dirty)
{
// Write current journal sector only if it's dirty and full, or in the immediate_commit mode
BS_SUBMIT_GET_SQE_DECL(sqe);
}
auto cb = [this, op](ring_data_t *data) { handle_write_event(data, op); };
2019-12-01 17:25:59 +03:00
// Prepare journal sector write
2020-03-10 01:59:15 +03:00
if (immediate_commit == IMMEDIATE_NONE)
{
if (sqe)
{
prepare_journal_sector_write(journal, journal.cur_sector, sqe, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops++;
}
else
{
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
}
}
// Pre-fill journal entry
2019-12-01 17:25:59 +03:00
journal_entry_del *je = (journal_entry_del*)
prefill_single_journal_entry(journal, JE_DELETE, sizeof(struct journal_entry_del));
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf("journal offset %lu is used by %lu:%lu v%lu\n", dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version);
#endif
je->oid = op->oid;
je->version = op->version;
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
dirty_it->second.state = ST_DEL_SUBMITTED;
2020-03-10 01:59:15 +03:00
if (immediate_commit != IMMEDIATE_NONE)
{
prepare_journal_sector_write(journal, journal.cur_sector, sqe, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops++;
// Remember small write as unsynced
unsynced_small_writes.push_back((obj_ver_id){
.oid = op->oid,
.version = op->version,
});
}
if (!PRIV(op)->pending_ops)
{
PRIV(op)->op_state = 4;
continue_write(op);
}
else
{
PRIV(op)->op_state = 3;
}
2019-12-01 17:25:59 +03:00
return 1;
}