From 53f6aba3e6e63f7f993bf5c11cdaffab0dc24753 Mon Sep 17 00:00:00 2001 From: Vitaliy Filippov Date: Sun, 24 May 2020 17:22:12 +0300 Subject: [PATCH] Die when journal_sector_buffer_count is too small --- blockstore_journal.cpp | 20 ++++++++++++++++---- blockstore_journal.h | 2 +- osd_primary_subops.cpp | 2 ++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/blockstore_journal.cpp b/blockstore_journal.cpp index c4684bb3..21e70f50 100644 --- a/blockstore_journal.cpp +++ b/blockstore_journal.cpp @@ -6,18 +6,24 @@ blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs) sectors_required = 0; next_pos = bs->journal.next_free; next_sector = bs->journal.cur_sector; + first_sector = -1; next_in_pos = bs->journal.in_sector_pos; right_dir = next_pos >= bs->journal.used_start; } // Check if we can write entries of bytes and data bytes after them to the journal -int blockstore_journal_check_t::check_available(blockstore_op_t *op, int required, int size, int data_after) +int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries_required, int size, int data_after) { + int required = entries_required; while (1) { int fits = (bs->journal.block_size - next_in_pos) / size; if (fits > 0) { + if (first_sector == -1) + { + first_sector = next_sector; + } required -= fits; next_in_pos += fits * size; sectors_required++; @@ -38,10 +44,15 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int require right_dir = false; } next_in_pos = 0; - if (bs->journal.sector_info[next_sector].usage_count > 0 || - bs->journal.sector_info[next_sector].dirty) + next_sector = ((next_sector + 1) % bs->journal.sector_count); + if (next_sector == first_sector) { - next_sector = ((next_sector + 1) % bs->journal.sector_count); + // next_sector may wrap when all sectors are flushed and the incoming batch is too big + // This is an error condition, we can't wait for anything in this case + throw std::runtime_error( + "Blockstore journal_sector_buffer_count="+std::to_string(bs->journal.sector_count)+ + " is too small for a batch of "+std::to_string(entries_required)+" entries of "+std::to_string(size)+" bytes" + ); } if (bs->journal.sector_info[next_sector].usage_count > 0 || bs->journal.sector_info[next_sector].dirty) @@ -107,6 +118,7 @@ journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, { // Also select next sector buffer in memory journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count); + assert(!journal.sector_info[journal.cur_sector].usage_count); } else { diff --git a/blockstore_journal.h b/blockstore_journal.h index c3cf6157..ef6af539 100644 --- a/blockstore_journal.h +++ b/blockstore_journal.h @@ -166,7 +166,7 @@ struct blockstore_journal_check_t { blockstore_impl_t *bs; uint64_t next_pos, next_sector, next_in_pos; - int sectors_required; + int sectors_required, first_sector; bool right_dir; // writing to the end or the beginning of the ring buffer blockstore_journal_check_t(blockstore_impl_t *bs); diff --git a/osd_primary_subops.cpp b/osd_primary_subops.cpp index 0e78cc4b..ec7ebe21 100644 --- a/osd_primary_subops.cpp +++ b/osd_primary_subops.cpp @@ -2,6 +2,8 @@ void osd_t::autosync() { + // FIXME Autosync based on the number of unstable writes to prevent + // "journal_sector_buffer_count is too low for this batch" errors if (immediate_commit != IMMEDIATE_ALL && !autosync_op) { autosync_op = new osd_op_t();