2019-11-10 14:37:45 +03:00
|
|
|
#include "blockstore.h"
|
|
|
|
|
2019-11-11 02:53:19 +03:00
|
|
|
// Stabilize small write:
|
|
|
|
// 1) Copy data from the journal to the data device
|
|
|
|
// Sync it before writing metadata if we want to keep metadata consistent
|
|
|
|
// Overall it's optional because it can be replayed from the journal until
|
|
|
|
// it's cleared, and reads are also fulfilled from the journal
|
|
|
|
// 2) Increase version on the metadata device and sync it
|
|
|
|
// 3) Advance clean_db entry's version, clear previous journal entries
|
|
|
|
//
|
|
|
|
// This makes 1 4K small write+sync look like:
|
|
|
|
// 512b+4K (journal) + sync + 512b (journal) + sync + 4K (data) [+ sync?] + 512b (metadata) + sync.
|
|
|
|
// WA = 2.375. It's not the best, SSD FTL-like redirect-write with defragmentation
|
|
|
|
// could probably be lower even with defragmentation. But it's fixed and it's still
|
|
|
|
// better than in Ceph. :)
|
|
|
|
|
|
|
|
// Stabilize big write:
|
|
|
|
// 1) Copy metadata from the journal to the metadata device
|
|
|
|
// 2) Move dirty_db entry to clean_db and clear previous journal entries
|
|
|
|
//
|
|
|
|
// This makes 1 128K big write+sync look like:
|
|
|
|
// 128K (data) + sync + 512b (journal) + sync + 512b (journal) + sync + 512b (metadata) + sync.
|
|
|
|
// WA = 1.012. Very good :)
|
|
|
|
|
|
|
|
// AND We must do it in batches, for the sake of reduced fsync call count
|
2019-11-11 14:10:23 +03:00
|
|
|
// AND We must know what we stabilize. Basic workflow is like:
|
|
|
|
// 1) primary OSD receives sync request
|
|
|
|
// 2) it determines his own unsynced writes from blockstore's information
|
|
|
|
// just before submitting fsync
|
|
|
|
// 3) it submits syncs to blockstore and peers
|
|
|
|
// 4) after everyone acks sync it takes the object list and sends stabilize requests to everyone
|
2019-11-11 02:53:19 +03:00
|
|
|
|
2019-11-10 14:37:45 +03:00
|
|
|
int blockstore::dequeue_stable(blockstore_operation *op)
|
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
obj_ver_id* v;
|
|
|
|
int i, todo = 0;
|
|
|
|
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
2019-11-10 14:37:45 +03:00
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
auto dirty_it = dirty_db.find(*v);
|
|
|
|
if (dirty_it == dirty_db.end())
|
2019-11-10 14:37:45 +03:00
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
auto clean_it = clean_db.find(v->oid);
|
|
|
|
if (clean_it == clean_db.end() || clean_it->second.version < v->version)
|
|
|
|
{
|
|
|
|
// No such object version
|
|
|
|
op->retval = EINVAL;
|
|
|
|
op->callback(op);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Already stable
|
|
|
|
}
|
2019-11-10 14:37:45 +03:00
|
|
|
}
|
2019-11-11 14:10:23 +03:00
|
|
|
else if (IS_UNSYNCED(dirty_it->second.state))
|
2019-11-10 14:37:45 +03:00
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
// Object not synced yet. Caller must sync it first
|
|
|
|
op->retval = EAGAIN;
|
|
|
|
op->callback(op);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else if (!IS_STABLE(dirty_it->second.state))
|
|
|
|
{
|
|
|
|
todo++;
|
2019-11-10 14:37:45 +03:00
|
|
|
}
|
|
|
|
}
|
2019-11-11 14:10:23 +03:00
|
|
|
if (!todo)
|
2019-11-10 14:37:45 +03:00
|
|
|
{
|
|
|
|
// Already stable
|
|
|
|
op->retval = 0;
|
|
|
|
op->callback(op);
|
|
|
|
return 1;
|
|
|
|
}
|
2019-11-10 15:00:01 +03:00
|
|
|
// Check journal space
|
2019-11-11 00:28:14 +03:00
|
|
|
blockstore_journal_check_t space_check(this);
|
2019-11-11 14:10:23 +03:00
|
|
|
if (!space_check.check_available(op, todo, sizeof(journal_entry_stable), 0))
|
2019-11-10 15:00:01 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2019-11-11 14:10:23 +03:00
|
|
|
// There is sufficient space. Get SQEs
|
2019-11-11 18:24:04 +03:00
|
|
|
struct io_uring_sqe *sqe[space_check.sectors_required];
|
|
|
|
for (i = 0; i < space_check.sectors_required; i++)
|
2019-11-11 14:10:23 +03:00
|
|
|
{
|
|
|
|
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
|
|
|
}
|
|
|
|
// Prepare and submit journal entries
|
|
|
|
int s = 0, cur_sector = -1;
|
|
|
|
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
|
|
|
{
|
|
|
|
journal_entry_stable *je = (journal_entry_stable*)
|
|
|
|
prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable));
|
|
|
|
je->oid = v->oid;
|
|
|
|
je->version = v->version;
|
|
|
|
je->crc32 = je_crc32((journal_entry*)je);
|
|
|
|
journal.crc32_last = je->crc32;
|
|
|
|
if (cur_sector != journal.cur_sector)
|
|
|
|
{
|
2019-11-11 18:24:04 +03:00
|
|
|
if (cur_sector == -1)
|
|
|
|
op->min_used_journal_sector = 1 + journal.cur_sector;
|
2019-11-11 14:10:23 +03:00
|
|
|
cur_sector = journal.cur_sector;
|
2019-11-11 18:24:04 +03:00
|
|
|
prepare_journal_sector_write(op, journal, sqe[s++]);
|
2019-11-11 14:10:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
op->max_used_journal_sector = 1 + journal.cur_sector;
|
2019-11-11 18:24:04 +03:00
|
|
|
op->pending_ops = s;
|
2019-11-10 15:00:01 +03:00
|
|
|
return 1;
|
2019-11-10 14:37:45 +03:00
|
|
|
}
|
|
|
|
|
2019-11-11 02:53:19 +03:00
|
|
|
int blockstore::continue_stable(blockstore_operation *op)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 14:37:45 +03:00
|
|
|
void blockstore::handle_stable_event(ring_data_t *data, blockstore_operation *op)
|
|
|
|
{
|
|
|
|
if (data->res < 0)
|
|
|
|
{
|
|
|
|
// sync error
|
|
|
|
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
|
|
|
|
throw new std::runtime_error("write operation failed. in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111");
|
|
|
|
}
|
|
|
|
op->pending_ops--;
|
|
|
|
if (op->pending_ops == 0)
|
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
// First step: mark dirty_db entries as stable, acknowledge op completion
|
|
|
|
obj_ver_id* v;
|
|
|
|
int i;
|
|
|
|
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
2019-11-10 15:00:01 +03:00
|
|
|
{
|
2019-11-11 14:10:23 +03:00
|
|
|
// Mark all dirty_db entries up to op->version as stable
|
2019-11-12 13:52:27 +03:00
|
|
|
auto dirty_it = dirty_db.find(*v);
|
2019-11-11 14:10:23 +03:00
|
|
|
if (dirty_it != dirty_db.end())
|
|
|
|
{
|
|
|
|
do
|
|
|
|
{
|
|
|
|
if (dirty_it->second.state == ST_J_SYNCED)
|
|
|
|
{
|
|
|
|
dirty_it->second.state = ST_J_STABLE;
|
|
|
|
}
|
|
|
|
else if (dirty_it->second.state == ST_D_META_SYNCED)
|
|
|
|
{
|
|
|
|
dirty_it->second.state = ST_D_STABLE;
|
|
|
|
}
|
2019-11-12 13:52:27 +03:00
|
|
|
else if (IS_STABLE(dirty_it->second.state))
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2019-11-11 14:10:23 +03:00
|
|
|
dirty_it--;
|
|
|
|
} while (dirty_it != dirty_db.begin() && dirty_it->first.oid == v->oid);
|
2019-11-12 13:52:27 +03:00
|
|
|
flusher.flush_queue.push_back(*v);
|
2019-11-11 14:10:23 +03:00
|
|
|
}
|
2019-11-10 15:00:01 +03:00
|
|
|
}
|
2019-11-12 12:02:11 +03:00
|
|
|
// Acknowledge op
|
|
|
|
op->retval = 0;
|
|
|
|
op->callback(op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-12 18:16:03 +03:00
|
|
|
struct copy_buffer_t
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
|
|
|
uint64_t offset, len;
|
2019-11-12 18:16:03 +03:00
|
|
|
void *buf;
|
2019-11-12 12:02:11 +03:00
|
|
|
};
|
|
|
|
|
2019-11-12 13:52:27 +03:00
|
|
|
class journal_flusher_t
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
2019-11-12 13:52:27 +03:00
|
|
|
blockstore *bs;
|
2019-11-12 18:16:03 +03:00
|
|
|
int wait_state, wait_count;
|
|
|
|
struct io_uring_sqe *sqe;
|
|
|
|
struct ring_data_t *data;
|
|
|
|
bool skip_copy;
|
2019-11-12 12:02:11 +03:00
|
|
|
obj_ver_id cur;
|
|
|
|
std::map<obj_ver_id, dirty_entry>::iterator dirty_it;
|
2019-11-12 18:16:03 +03:00
|
|
|
std::vector<copy_buffer_t> v;
|
|
|
|
std::vector<copy_buffer_t>::iterator it;
|
|
|
|
uint64_t offset, len, submit_len, clean_loc;
|
2019-11-12 13:52:27 +03:00
|
|
|
|
|
|
|
public:
|
2019-11-12 18:16:03 +03:00
|
|
|
journal_flusher_t(int flush_count);
|
2019-11-12 13:52:27 +03:00
|
|
|
std::deque<obj_ver_id> flush_queue;
|
2019-11-12 18:16:03 +03:00
|
|
|
void loop();
|
2019-11-12 12:02:11 +03:00
|
|
|
};
|
|
|
|
|
2019-11-12 18:16:03 +03:00
|
|
|
journal_flusher_t::journal_flusher_t(int flusher_count)
|
2019-11-12 13:52:27 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-11-12 18:16:03 +03:00
|
|
|
void journal_flusher_t::loop()
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
// This is much better than implementing the whole function as an FSM
|
|
|
|
// Maybe I should consider a coroutine library like https://github.com/hnes/libaco ...
|
|
|
|
if (wait_state == 1)
|
|
|
|
goto resume_1;
|
|
|
|
else if (wait_state == 3)
|
|
|
|
goto resume_3;
|
|
|
|
else if (wait_state == 4)
|
|
|
|
goto resume_4;
|
|
|
|
else if (wait_state == 5)
|
|
|
|
goto resume_5;
|
|
|
|
if (!flush_queue.size())
|
|
|
|
return;
|
|
|
|
cur = flush_queue.front();
|
|
|
|
flush_queue.pop_front();
|
|
|
|
dirty_it = bs->dirty_db.find(cur);
|
|
|
|
if (dirty_it != bs->dirty_db.end())
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
v.clear();
|
|
|
|
wait_count = 0;
|
|
|
|
clean_loc = UINT64_MAX;
|
|
|
|
skip_copy = false;
|
|
|
|
do
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
if (dirty_it->second.state == ST_J_STABLE)
|
2019-11-12 12:02:11 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
// First we submit all reads
|
|
|
|
offset = dirty_it->second.offset;
|
|
|
|
len = dirty_it->second.size;
|
|
|
|
it = v.begin();
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
for (; it != v.end(); it++)
|
|
|
|
if (it->offset >= offset)
|
|
|
|
break;
|
|
|
|
if (it == v.end() || it->offset > offset)
|
|
|
|
{
|
|
|
|
submit_len = it->offset >= offset+len ? len : it->offset-offset;
|
|
|
|
resume_1:
|
|
|
|
sqe = bs->get_sqe();
|
|
|
|
if (!sqe)
|
|
|
|
{
|
|
|
|
// Can't submit read, ring is full
|
|
|
|
wait_state = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
v.insert(it, (copy_buffer_t){ .offset = offset, .len = submit_len, .buf = memalign(512, submit_len) });
|
|
|
|
data = ((ring_data_t*)sqe->user_data);
|
|
|
|
data->iov = (struct iovec){ v.end()->buf, (size_t)submit_len };
|
|
|
|
data->op = this;
|
|
|
|
io_uring_prep_readv(
|
|
|
|
sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + dirty_it->second.location + offset
|
|
|
|
);
|
|
|
|
wait_count++;
|
|
|
|
}
|
|
|
|
if (it == v.end() || it->offset+it->len >= offset+len)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// So subsequent stabilizers don't flush the entry again
|
2019-11-12 19:30:28 +03:00
|
|
|
dirty_it->second.state = ST_J_MOVE_READ_SUBMITTED;
|
2019-11-12 12:02:11 +03:00
|
|
|
}
|
2019-11-12 18:16:03 +03:00
|
|
|
else if (dirty_it->second.state == ST_D_STABLE)
|
|
|
|
{
|
|
|
|
// Copy last STABLE entry metadata
|
|
|
|
if (!skip_copy)
|
|
|
|
{
|
|
|
|
clean_loc = dirty_it->second.location;
|
|
|
|
}
|
|
|
|
skip_copy = true;
|
|
|
|
}
|
|
|
|
else if (IS_STABLE(dirty_it->second.state))
|
|
|
|
{
|
2019-11-12 12:02:11 +03:00
|
|
|
break;
|
2019-11-12 18:16:03 +03:00
|
|
|
}
|
|
|
|
dirty_it--;
|
|
|
|
} while (dirty_it != bs->dirty_db.begin() && dirty_it->first.oid == cur.oid);
|
|
|
|
if (clean_loc == UINT64_MAX)
|
2019-11-12 13:52:27 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
// Find it in clean_db
|
|
|
|
auto clean_it = bs->clean_db.find(cur.oid);
|
|
|
|
if (clean_it == bs->clean_db.end())
|
|
|
|
{
|
2019-11-12 19:30:28 +03:00
|
|
|
// Object not present at all. This is a bug.
|
|
|
|
throw new std::runtime_error("BUG: Object we are trying to flush not allocated on the data device");
|
2019-11-12 18:16:03 +03:00
|
|
|
}
|
2019-11-12 13:52:27 +03:00
|
|
|
else
|
2019-11-12 18:16:03 +03:00
|
|
|
clean_loc = clean_it->second.location;
|
2019-11-12 13:52:27 +03:00
|
|
|
}
|
2019-11-12 18:16:03 +03:00
|
|
|
wait_state = 3;
|
|
|
|
resume_3:
|
|
|
|
// After reads complete we submit writes
|
|
|
|
if (wait_count == 0)
|
2019-11-12 13:52:27 +03:00
|
|
|
{
|
2019-11-12 18:16:03 +03:00
|
|
|
for (it = v.begin(); it != v.end(); it++)
|
|
|
|
{
|
|
|
|
resume_4:
|
|
|
|
sqe = bs->get_sqe();
|
|
|
|
if (!sqe)
|
|
|
|
{
|
|
|
|
// Can't submit a write, ring is full
|
|
|
|
wait_state = 4;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
data = ((ring_data_t*)sqe->user_data);
|
|
|
|
data->iov = (struct iovec){ it->buf, (size_t)it->len };
|
|
|
|
data->op = this;
|
|
|
|
io_uring_prep_writev(
|
|
|
|
sqe, bs->data_fd, &data->iov, 1, bs->data_offset + clean_loc + it->offset
|
|
|
|
);
|
|
|
|
wait_count++;
|
|
|
|
}
|
|
|
|
wait_state = 5;
|
|
|
|
resume_5:
|
|
|
|
// Done, free all buffers
|
|
|
|
if (wait_count == 0)
|
|
|
|
{
|
|
|
|
for (it = v.begin(); it != v.end(); it++)
|
|
|
|
{
|
|
|
|
free(it->buf);
|
|
|
|
}
|
|
|
|
v.clear();
|
|
|
|
wait_state = 0;
|
|
|
|
}
|
2019-11-12 13:52:27 +03:00
|
|
|
}
|
2019-11-10 14:37:45 +03:00
|
|
|
}
|
|
|
|
}
|