From ba238245618e41ab37e48ab1f26d99c59f2a0f4e Mon Sep 17 00:00:00 2001 From: Vitaliy Filippov Date: Mon, 6 Jan 2020 21:01:11 +0300 Subject: [PATCH] Allow to disable zero fill --- blockstore_impl.h | 5 +++++ blockstore_open.cpp | 4 ++++ blockstore_write.cpp | 9 +++++---- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/blockstore_impl.h b/blockstore_impl.h index d3559e2b..62e87611 100644 --- a/blockstore_impl.h +++ b/blockstore_impl.h @@ -195,7 +195,12 @@ class blockstore_impl_t bool readonly = false; // FIXME: separate flags for data, metadata and journal + // It is safe to disable fsync() if drive write cache is writethrough bool disable_fsync = false; + // It is safe to disable zero fill if drive is zeroed before formatting. + // For example, with TRIM and Deterministic Read Zeroes after TRIM. + // FIXME: OP_DELETE should trim/zero out the block. + bool zerofill_enabled = false; bool inmemory_meta = false; void *metadata_buffer = NULL; diff --git a/blockstore_open.cpp b/blockstore_open.cpp index 52002171..4741ec8f 100644 --- a/blockstore_open.cpp +++ b/blockstore_open.cpp @@ -10,6 +10,10 @@ void blockstore_impl_t::calc_lengths(blockstore_config_t & config) { disable_fsync = true; } + if (config["zerofill"] == "true" || config["zerofill"] == "1" || config["zerofill"] == "yes") + { + zerofill_enabled = true; + } // data data_len = data_size - data_offset; if (data_fd == meta_fd && data_offset < meta_offset) diff --git a/blockstore_write.cpp b/blockstore_write.cpp index 3a85b556..7089519e 100644 --- a/blockstore_write.cpp +++ b/blockstore_write.cpp @@ -102,10 +102,10 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op) #endif data_alloc->set(loc, true); int vcnt = 0; - if (op->version == 1 && op->len != block_size) + uint64_t stripe_offset = 0; + if (op->len != block_size && zerofill_enabled) { - // Zero fill newly allocated object. First write is always a big write - // FIXME: Add "no-zero-fill" mode which will just leave random garbage (insecure, but may be useful) + // Zero fill newly allocated object if required if (op->offset > 0) PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, op->offset }; PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len }; @@ -118,10 +118,11 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op) vcnt = 1; PRIV(op)->iov_zerofill[0] = (struct iovec){ op->buf, op->len }; data->iov.iov_len = op->len; // to check it in the callback + stripe_offset = op->offset; } data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); }; my_uring_prep_writev( - sqe, data_fd, PRIV(op)->iov_zerofill, vcnt, data_offset + (loc << block_order) + sqe, data_fd, PRIV(op)->iov_zerofill, vcnt, data_offset + (loc << block_order) + stripe_offset ); PRIV(op)->pending_ops = 1; PRIV(op)->min_used_journal_sector = PRIV(op)->max_used_journal_sector = 0;