2019-11-07 02:24:12 +03:00
|
|
|
#include "blockstore.h"
|
|
|
|
|
2019-11-10 13:27:59 +03:00
|
|
|
void blockstore::enqueue_write(blockstore_operation *op)
|
|
|
|
{
|
|
|
|
// Assign version number
|
|
|
|
auto dirty_it = dirty_db.upper_bound((obj_ver_id){
|
|
|
|
.oid = op->oid,
|
|
|
|
.version = UINT64_MAX,
|
|
|
|
});
|
|
|
|
dirty_it--;
|
|
|
|
if (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
|
|
|
|
{
|
|
|
|
op->version = dirty_it->first.version + 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-11-10 15:17:21 +03:00
|
|
|
auto clean_it = clean_db.find(op->oid);
|
|
|
|
if (clean_it != clean_db.end())
|
2019-11-10 13:27:59 +03:00
|
|
|
{
|
|
|
|
op->version = clean_it->second.version + 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
op->version = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Immediately add the operation into dirty_db, so subsequent reads could see it
|
|
|
|
dirty_db.emplace((obj_ver_id){
|
|
|
|
.oid = op->oid,
|
|
|
|
.version = op->version,
|
|
|
|
}, (dirty_entry){
|
|
|
|
.state = ST_IN_FLIGHT,
|
|
|
|
.flags = 0,
|
|
|
|
.location = 0,
|
|
|
|
.offset = op->offset,
|
|
|
|
.size = op->len,
|
|
|
|
});
|
2019-11-11 14:10:23 +03:00
|
|
|
// Remember write as unsynced here, so external consumers could get
|
|
|
|
// the list of dirty objects to sync just before issuing a SYNC request
|
2019-11-12 19:30:28 +03:00
|
|
|
if (op->len == block_size || op->version == 1)
|
2019-11-11 14:10:23 +03:00
|
|
|
{
|
|
|
|
// Remember big write as unsynced
|
|
|
|
unsynced_big_writes.push_back((obj_ver_id){
|
|
|
|
.oid = op->oid,
|
|
|
|
.version = op->version,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Remember small write as unsynced
|
|
|
|
unsynced_small_writes.push_back((obj_ver_id){
|
|
|
|
.oid = op->oid,
|
|
|
|
.version = op->version,
|
|
|
|
});
|
|
|
|
}
|
2019-11-10 13:27:59 +03:00
|
|
|
}
|
|
|
|
|
2019-11-07 02:24:12 +03:00
|
|
|
// First step of the write algorithm: dequeue operation and submit initial write(s)
|
|
|
|
int blockstore::dequeue_write(blockstore_operation *op)
|
|
|
|
{
|
2019-11-08 00:19:17 +03:00
|
|
|
auto dirty_it = dirty_db.find((obj_ver_id){
|
|
|
|
.oid = op->oid,
|
|
|
|
.version = op->version,
|
|
|
|
});
|
2019-11-12 19:30:28 +03:00
|
|
|
if (op->len == block_size || op->version == 1)
|
2019-11-07 02:24:12 +03:00
|
|
|
{
|
|
|
|
// Big (redirect) write
|
|
|
|
uint64_t loc = allocator_find_free(data_alloc);
|
2019-11-12 18:16:03 +03:00
|
|
|
if (loc == UINT64_MAX)
|
2019-11-07 02:24:12 +03:00
|
|
|
{
|
|
|
|
// no space
|
|
|
|
op->retval = -ENOSPC;
|
|
|
|
op->callback(op);
|
|
|
|
return 1;
|
|
|
|
}
|
2019-11-09 02:16:44 +03:00
|
|
|
BS_SUBMIT_GET_SQE(sqe, data);
|
2019-11-08 00:19:17 +03:00
|
|
|
dirty_it->second.location = loc << block_order;
|
2019-11-08 02:16:31 +03:00
|
|
|
dirty_it->second.state = ST_D_SUBMITTED;
|
2019-11-07 02:24:12 +03:00
|
|
|
allocator_set(data_alloc, loc, true);
|
2019-11-12 19:30:28 +03:00
|
|
|
int vcnt = 0;
|
|
|
|
if (op->version == 1 && op->len != block_size)
|
|
|
|
{
|
|
|
|
// zero fill newly allocated object
|
2019-11-12 20:55:17 +03:00
|
|
|
// This thing turns new small writes into big writes
|
|
|
|
// So FIXME: consider writing an empty big_write as version 1 instead of zero-filling here
|
2019-11-12 19:30:28 +03:00
|
|
|
if (op->offset > 0)
|
|
|
|
op->iov_zerofill[vcnt++] = (struct iovec){ zero_object, op->offset };
|
|
|
|
op->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len };
|
|
|
|
if (op->offset+op->len < block_size)
|
|
|
|
op->iov_zerofill[vcnt++] = (struct iovec){ zero_object, block_size - (op->offset + op->len) };
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
vcnt = 1;
|
|
|
|
op->iov_zerofill[0] = (struct iovec){ op->buf, op->len };
|
|
|
|
}
|
2019-11-07 02:24:12 +03:00
|
|
|
data->op = op;
|
|
|
|
io_uring_prep_writev(
|
2019-11-12 19:30:28 +03:00
|
|
|
sqe, data_fd, op->iov_zerofill, vcnt, data_offset + (loc << block_order)
|
2019-11-07 02:24:12 +03:00
|
|
|
);
|
|
|
|
op->pending_ops = 1;
|
2019-11-09 02:16:44 +03:00
|
|
|
op->min_used_journal_sector = op->max_used_journal_sector = 0;
|
2019-11-07 02:24:12 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Small (journaled) write
|
|
|
|
// First check if the journal has sufficient space
|
2019-11-08 00:19:17 +03:00
|
|
|
// FIXME Always two SQEs for now. Although it's possible to send 1 sometimes
|
2019-11-11 00:28:14 +03:00
|
|
|
//two_sqes = (512 - journal.in_sector_pos < sizeof(struct journal_entry_small_write)
|
|
|
|
// ? (journal.len - next_pos < op->len)
|
|
|
|
// : (journal.sector_info[journal.cur_sector].offset + 512 != journal.next_free ||
|
|
|
|
// journal.len - next_pos < op->len);
|
|
|
|
blockstore_journal_check_t space_check(this);
|
|
|
|
if (!space_check.check_available(op, 1, sizeof(journal_entry_small_write), op->len))
|
2019-11-07 02:24:12 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
// There is sufficient space. Get SQE(s)
|
2019-11-12 19:30:28 +03:00
|
|
|
BS_SUBMIT_GET_ONLY_SQE(sqe1);
|
2019-11-09 02:16:44 +03:00
|
|
|
BS_SUBMIT_GET_SQE(sqe2, data2);
|
2019-11-07 02:24:12 +03:00
|
|
|
// Got SQEs. Prepare journal sector write
|
2019-11-11 00:28:14 +03:00
|
|
|
journal_entry_small_write *je = (journal_entry_small_write*)
|
|
|
|
prefill_single_journal_entry(journal, JE_SMALL_WRITE, sizeof(struct journal_entry_small_write));
|
|
|
|
je->oid = op->oid;
|
|
|
|
je->version = op->version;
|
|
|
|
je->offset = op->offset;
|
|
|
|
je->len = op->len;
|
2019-11-07 16:58:30 +03:00
|
|
|
je->crc32 = je_crc32((journal_entry*)je);
|
2019-11-09 02:16:44 +03:00
|
|
|
journal.crc32_last = je->crc32;
|
2019-11-11 18:24:04 +03:00
|
|
|
prepare_journal_sector_write(op, journal, sqe1);
|
2019-11-11 00:28:14 +03:00
|
|
|
op->min_used_journal_sector = op->max_used_journal_sector = 1 + journal.cur_sector;
|
2019-11-07 02:24:12 +03:00
|
|
|
// Prepare journal data write
|
2019-11-11 00:28:14 +03:00
|
|
|
journal.next_free = (journal.next_free + op->len) < journal.len ? journal.next_free : 512;
|
2019-11-07 02:24:12 +03:00
|
|
|
data2->iov = (struct iovec){ op->buf, op->len };
|
|
|
|
data2->op = op;
|
|
|
|
io_uring_prep_writev(
|
2019-11-07 16:58:30 +03:00
|
|
|
sqe2, journal.fd, &data2->iov, 1, journal.offset + journal.next_free
|
2019-11-07 02:24:12 +03:00
|
|
|
);
|
2019-11-08 00:19:17 +03:00
|
|
|
dirty_it->second.location = journal.next_free;
|
2019-11-08 02:16:31 +03:00
|
|
|
dirty_it->second.state = ST_J_SUBMITTED;
|
2019-11-07 16:58:30 +03:00
|
|
|
journal.next_free += op->len;
|
2019-11-07 02:24:12 +03:00
|
|
|
op->pending_ops = 2;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
2019-11-08 14:10:24 +03:00
|
|
|
|
2019-11-10 01:40:48 +03:00
|
|
|
void blockstore::handle_write_event(ring_data_t *data, blockstore_operation *op)
|
2019-11-08 14:10:24 +03:00
|
|
|
{
|
2019-11-10 01:40:48 +03:00
|
|
|
if (data->res < 0)
|
2019-11-08 14:10:24 +03:00
|
|
|
{
|
2019-11-10 01:40:48 +03:00
|
|
|
// write error
|
|
|
|
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
|
|
|
|
throw new std::runtime_error("write operation failed. in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111");
|
2019-11-08 14:10:24 +03:00
|
|
|
}
|
2019-11-10 01:40:48 +03:00
|
|
|
op->pending_ops--;
|
2019-11-10 12:46:58 +03:00
|
|
|
if (op->pending_ops == 0)
|
2019-11-08 14:10:24 +03:00
|
|
|
{
|
2019-11-10 12:46:58 +03:00
|
|
|
// Release used journal sectors
|
|
|
|
if (op->min_used_journal_sector > 0)
|
2019-11-09 02:16:44 +03:00
|
|
|
{
|
2019-11-10 12:46:58 +03:00
|
|
|
for (uint64_t s = op->min_used_journal_sector; s <= op->max_used_journal_sector; s++)
|
|
|
|
{
|
|
|
|
journal.sector_info[s-1].usage_count--;
|
|
|
|
}
|
|
|
|
op->min_used_journal_sector = op->max_used_journal_sector = 0;
|
2019-11-09 02:16:44 +03:00
|
|
|
}
|
2019-11-10 12:46:58 +03:00
|
|
|
// Switch object state
|
2019-11-10 13:49:26 +03:00
|
|
|
auto & dirty_entry = dirty_db[(obj_ver_id){
|
2019-11-10 01:40:48 +03:00
|
|
|
.oid = op->oid,
|
|
|
|
.version = op->version,
|
2019-11-10 13:49:26 +03:00
|
|
|
}];
|
|
|
|
if (dirty_entry.state == ST_J_SUBMITTED)
|
|
|
|
{
|
|
|
|
dirty_entry.state = ST_J_WRITTEN;
|
|
|
|
}
|
|
|
|
else if (dirty_entry.state == ST_D_SUBMITTED)
|
|
|
|
{
|
|
|
|
dirty_entry.state = ST_D_WRITTEN;
|
|
|
|
}
|
|
|
|
else if (dirty_entry.state == ST_DEL_SUBMITTED)
|
|
|
|
{
|
|
|
|
dirty_entry.state = ST_DEL_WRITTEN;
|
|
|
|
}
|
2019-11-10 12:46:58 +03:00
|
|
|
// Acknowledge write without sync
|
2019-11-10 01:40:48 +03:00
|
|
|
op->retval = op->len;
|
|
|
|
op->callback(op);
|
2019-11-08 14:10:24 +03:00
|
|
|
}
|
|
|
|
}
|