diff --git a/src/blockstore_impl.cpp b/src/blockstore_impl.cpp index 2e9f3086..d8ccff9c 100644 --- a/src/blockstore_impl.cpp +++ b/src/blockstore_impl.cpp @@ -384,6 +384,10 @@ void blockstore_impl_t::enqueue_op(blockstore_op_t *op) ringloop->set_immediate([op]() { std::function(op->callback)(op); }); return; } + if (op->opcode == BS_OP_SYNC) + { + unsynced_queued_ops = 0; + } init_op(op); submit_queue.push_back(op); ringloop->wakeup(); diff --git a/src/blockstore_impl.h b/src/blockstore_impl.h index d2d5e493..d833b8a8 100644 --- a/src/blockstore_impl.h +++ b/src/blockstore_impl.h @@ -262,6 +262,8 @@ class blockstore_impl_t int throttle_target_parallelism = 1; // Minimum difference in microseconds between target and real execution times to throttle the response int throttle_threshold_us = 50; + // Maximum writes between automatically added fsync operations + uint64_t autosync_writes = 128; /******* END OF OPTIONS *******/ struct ring_consumer_t ring_consumer; @@ -273,6 +275,7 @@ class blockstore_impl_t std::vector submit_queue; std::vector unsynced_big_writes, unsynced_small_writes; int unsynced_big_write_count = 0; + int unsynced_queued_ops = 0; allocator *data_alloc = NULL; uint8_t *zero_object; diff --git a/src/blockstore_open.cpp b/src/blockstore_open.cpp index eaead712..7c57dbde 100644 --- a/src/blockstore_open.cpp +++ b/src/blockstore_open.cpp @@ -19,6 +19,10 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init) throttle_target_mbs = strtoull(config["throttle_target_mbs"].c_str(), NULL, 10); throttle_target_parallelism = strtoull(config["throttle_target_parallelism"].c_str(), NULL, 10); throttle_threshold_us = strtoull(config["throttle_threshold_us"].c_str(), NULL, 10); + if (config.find("autosync_writes") != config.end()) + { + autosync_writes = strtoull(config["autosync_writes"].c_str(), NULL, 10); + } if (!max_flusher_count) { max_flusher_count = 256; diff --git a/src/blockstore_write.cpp b/src/blockstore_write.cpp index 88972377..4966da50 100644 --- a/src/blockstore_write.cpp +++ b/src/blockstore_write.cpp @@ -127,8 +127,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op) return false; } } - if (wait_big && !is_del && !deleted && op->len < dsk.data_block_size && - immediate_commit != IMMEDIATE_ALL) + bool imm = (op->len < dsk.data_block_size ? (immediate_commit != IMMEDIATE_NONE) : (immediate_commit == IMMEDIATE_ALL)); + if (wait_big && !is_del && !deleted && op->len < dsk.data_block_size && !imm || + !imm && unsynced_queued_ops >= autosync_writes) { // Issue an additional sync so that the previous big write can reach the journal blockstore_op_t *sync_op = new blockstore_op_t; @@ -139,6 +140,8 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op) }; enqueue_op(sync_op); } + else if (!imm) + unsynced_queued_ops++; #ifdef BLOCKSTORE_DEBUG if (is_del) printf("Delete %lx:%lx v%lu\n", op->oid.inode, op->oid.stripe, op->version);