diff --git a/blockstore_init.cpp b/blockstore_init.cpp index 199ccc6c..7ba57655 100644 --- a/blockstore_init.cpp +++ b/blockstore_init.cpp @@ -310,7 +310,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len) }, (dirty_entry){ .state = ST_D_META_SYNCED, .flags = 0, - .location = je->big_write.block, + .location = je->big_write.location, .offset = 0, .size = bs->block_size, }); diff --git a/blockstore_journal.h b/blockstore_journal.h index 3b9edf13..7afda812 100644 --- a/blockstore_journal.h +++ b/blockstore_journal.h @@ -49,7 +49,7 @@ struct __attribute__((__packed__)) journal_entry_big_write uint32_t crc32_prev; object_id oid; uint64_t version; - uint64_t block; + uint64_t location; }; struct __attribute__((__packed__)) journal_entry_stable diff --git a/blockstore_sync.cpp b/blockstore_sync.cpp index 7b9c7151..6e83191a 100644 --- a/blockstore_sync.cpp +++ b/blockstore_sync.cpp @@ -79,7 +79,7 @@ int blockstore::continue_sync(blockstore_operation *op) prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write)); je->oid = it->oid; je->version = it->version; - je->block = dirty_db[*it].location; + je->location = dirty_db[*it].location; je->crc32 = je_crc32((journal_entry*)je); journal.crc32_last = je->crc32; it++; diff --git a/blockstore_write.cpp b/blockstore_write.cpp index 46bd943b..de5220ce 100644 --- a/blockstore_write.cpp +++ b/blockstore_write.cpp @@ -81,8 +81,8 @@ int blockstore::dequeue_write(blockstore_operation *op) if (op->version == 1 && op->len != block_size) { // zero fill newly allocated object - // FIXME: it's not so good because it turns new small writes into big writes - // but it's the first and the simplest implementation + // This thing turns new small writes into big writes + // So FIXME: consider writing an empty big_write as version 1 instead of zero-filling here if (op->offset > 0) op->iov_zerofill[vcnt++] = (struct iovec){ zero_object, op->offset }; op->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len };