Compare commits
30 Commits
Author | SHA1 | Date | |
---|---|---|---|
b9e7d31aa1 | |||
2d9f09dcb6 | |||
7cc59260c5 | |||
ca0a11ec85 | |||
51c0b5afee | |||
e1e01d042e | |||
534a4a657e | |||
9b5d8b9ad4 | |||
e66ed47515 | |||
036c6d4c42 | |||
4cb79a3bf8 | |||
3bf53754c2 | |||
6023cac361 | |||
915d04c446 | |||
21e06ea40d | |||
9ef7f865b0 | |||
9dd20a31aa | |||
28be049909 | |||
78fbaacf1f | |||
1526c5a213 | |||
c7cc414c90 | |||
f4ea313707 | |||
b88b76f316 | |||
4a17a61d1f | |||
ccabbbfbcb | |||
26dac57083 | |||
44a53d8352 | |||
9d80bd2d98 | |||
322a38a144 | |||
1018764c91 |
6
Makefile
6
Makefile
@@ -30,13 +30,13 @@ dump_journal: dump_journal.cpp crc32c.o blockstore_journal.h
|
|||||||
libblockstore.so: $(BLOCKSTORE_OBJS)
|
libblockstore.so: $(BLOCKSTORE_OBJS)
|
||||||
g++ $(CXXFLAGS) -o $@ -shared $(BLOCKSTORE_OBJS) -ltcmalloc_minimal -luring
|
g++ $(CXXFLAGS) -o $@ -shared $(BLOCKSTORE_OBJS) -ltcmalloc_minimal -luring
|
||||||
libfio_blockstore.so: ./libblockstore.so fio_engine.o json11.o
|
libfio_blockstore.so: ./libblockstore.so fio_engine.o json11.o
|
||||||
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor' -shared -o $@ fio_engine.o json11.o ./libblockstore.so -ltcmalloc_minimal -luring
|
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor',-rpath,'$$ORIGIN' -shared -o $@ fio_engine.o json11.o libblockstore.so -ltcmalloc_minimal -luring
|
||||||
|
|
||||||
OSD_OBJS := osd.o osd_secondary.o msgr_receive.o msgr_send.o osd_peering.o osd_flush.o osd_peering_pg.o \
|
OSD_OBJS := osd.o osd_secondary.o msgr_receive.o msgr_send.o osd_peering.o osd_flush.o osd_peering_pg.o \
|
||||||
osd_primary.o osd_primary_subops.o etcd_state_client.o messenger.o osd_cluster.o http_client.o osd_ops.o pg_states.o \
|
osd_primary.o osd_primary_subops.o etcd_state_client.o messenger.o osd_cluster.o http_client.o osd_ops.o pg_states.o \
|
||||||
osd_rmw.o json11.o base64.o timerfd_manager.o epoll_manager.o
|
osd_rmw.o json11.o base64.o timerfd_manager.o epoll_manager.o
|
||||||
osd: ./libblockstore.so osd_main.cpp osd.h osd_ops.h $(OSD_OBJS)
|
osd: ./libblockstore.so osd_main.cpp osd.h osd_ops.h $(OSD_OBJS)
|
||||||
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor' -o $@ osd_main.cpp $(OSD_OBJS) ./libblockstore.so -ltcmalloc_minimal -luring -lJerasure
|
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor',-rpath,'$$ORIGIN' -o $@ osd_main.cpp $(OSD_OBJS) libblockstore.so -ltcmalloc_minimal -luring -lJerasure
|
||||||
|
|
||||||
stub_osd: stub_osd.o rw_blocking.o
|
stub_osd: stub_osd.o rw_blocking.o
|
||||||
g++ $(CXXFLAGS) -o $@ stub_osd.o rw_blocking.o -ltcmalloc_minimal
|
g++ $(CXXFLAGS) -o $@ stub_osd.o rw_blocking.o -ltcmalloc_minimal
|
||||||
@@ -76,7 +76,7 @@ qemu_driver.so: qemu_driver.o qemu_proxy.o $(FIO_CLUSTER_OBJS)
|
|||||||
g++ $(CXXFLAGS) -ltcmalloc_minimal -shared -o $@ $(FIO_CLUSTER_OBJS) qemu_driver.o qemu_proxy.o -luring
|
g++ $(CXXFLAGS) -ltcmalloc_minimal -shared -o $@ $(FIO_CLUSTER_OBJS) qemu_driver.o qemu_proxy.o -luring
|
||||||
|
|
||||||
test_blockstore: ./libblockstore.so test_blockstore.cpp timerfd_interval.o
|
test_blockstore: ./libblockstore.so test_blockstore.cpp timerfd_interval.o
|
||||||
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor' -o test_blockstore test_blockstore.cpp timerfd_interval.o ./libblockstore.so -ltcmalloc_minimal -luring
|
g++ $(CXXFLAGS) -Wl,-rpath,'$(LIBDIR)/vitastor',-rpath,'$$ORIGIN' -o test_blockstore test_blockstore.cpp timerfd_interval.o libblockstore.so -ltcmalloc_minimal -luring
|
||||||
test_shit: test_shit.cpp osd_peering_pg.o
|
test_shit: test_shit.cpp osd_peering_pg.o
|
||||||
g++ $(CXXFLAGS) -o test_shit test_shit.cpp -luring -lm
|
g++ $(CXXFLAGS) -o test_shit test_shit.cpp -luring -lm
|
||||||
test_allocator: test_allocator.cpp allocator.o
|
test_allocator: test_allocator.cpp allocator.o
|
||||||
|
12
README.md
12
README.md
@@ -16,7 +16,8 @@ breaking changes in the future. However, the following is implemented:
|
|||||||
|
|
||||||
- Basic part: highly-available block storage with symmetric clustering and no SPOF
|
- Basic part: highly-available block storage with symmetric clustering and no SPOF
|
||||||
- Performance ;-D
|
- Performance ;-D
|
||||||
- Two redundancy schemes: Replication and XOR n+1 (simplest case of EC)
|
- Multiple redundancy schemes: Replication, XOR n+1, Reed-Solomon erasure codes
|
||||||
|
based on jerasure library with any number of data and parity drives in a group
|
||||||
- Configuration via simple JSON data structures in etcd
|
- Configuration via simple JSON data structures in etcd
|
||||||
- Automatic data distribution over OSDs, with support for:
|
- Automatic data distribution over OSDs, with support for:
|
||||||
- Mathematical optimization for better uniformity and less data movement
|
- Mathematical optimization for better uniformity and less data movement
|
||||||
@@ -39,8 +40,6 @@ breaking changes in the future. However, the following is implemented:
|
|||||||
- OSD creation tool (OSDs currently have to be created by hand)
|
- OSD creation tool (OSDs currently have to be created by hand)
|
||||||
- Other administrative tools
|
- Other administrative tools
|
||||||
- Per-inode I/O and space usage statistics
|
- Per-inode I/O and space usage statistics
|
||||||
- jerasure EC support with any number of data and parity drives in a group
|
|
||||||
- Parallel usage of multiple network interfaces
|
|
||||||
- Proxmox and OpenNebula plugins
|
- Proxmox and OpenNebula plugins
|
||||||
- iSCSI proxy
|
- iSCSI proxy
|
||||||
- Inode metadata storage in etcd
|
- Inode metadata storage in etcd
|
||||||
@@ -50,6 +49,7 @@ breaking changes in the future. However, the following is implemented:
|
|||||||
- Checksums
|
- Checksums
|
||||||
- SSD+HDD optimizations, possibly including tiered storage and soft journal flushes
|
- SSD+HDD optimizations, possibly including tiered storage and soft journal flushes
|
||||||
- RDMA and NVDIMM support
|
- RDMA and NVDIMM support
|
||||||
|
- Web GUI
|
||||||
- Compression (possibly)
|
- Compression (possibly)
|
||||||
- Read caching using system page cache (possibly)
|
- Read caching using system page cache (possibly)
|
||||||
|
|
||||||
@@ -353,6 +353,7 @@ and calculate disk offsets almost by hand. This will be fixed in near future.
|
|||||||
- Create global configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/global '{"immediate_commit":"all"}'`
|
- Create global configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/global '{"immediate_commit":"all"}'`
|
||||||
(if all your drives have capacitors).
|
(if all your drives have capacitors).
|
||||||
- Create pool configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'`.
|
- Create pool configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'`.
|
||||||
|
For jerasure pools the configuration should look like the following: `2:{"name":"ecpool","scheme":"jerasure","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}`.
|
||||||
- Calculate offsets for your drives with `node /usr/lib/vitastor/mon/simple-offsets.js --device /dev/sdX`.
|
- Calculate offsets for your drives with `node /usr/lib/vitastor/mon/simple-offsets.js --device /dev/sdX`.
|
||||||
- Make systemd units for your OSDs. Look at `/usr/lib/vitastor/mon/make-units.sh` for example.
|
- Make systemd units for your OSDs. Look at `/usr/lib/vitastor/mon/make-units.sh` for example.
|
||||||
Notable configuration variables from the example:
|
Notable configuration variables from the example:
|
||||||
@@ -398,10 +399,7 @@ and calculate disk offsets almost by hand. This will be fixed in near future.
|
|||||||
|
|
||||||
- Object deletion requests may currently lead to 'incomplete' objects if your OSDs crash during
|
- Object deletion requests may currently lead to 'incomplete' objects if your OSDs crash during
|
||||||
deletion because proper handling of object cleanup in a cluster should be "three-phase"
|
deletion because proper handling of object cleanup in a cluster should be "three-phase"
|
||||||
and it's currently not implemented. Inode removal tool currently can't handle unclean
|
and it's currently not implemented. Just to repeat the removal again in this case.
|
||||||
objects, so incomplete objects become undeletable. This will be fixed in near future
|
|
||||||
by allowing the inode removal tool to delete unclean objects. With this problem fixed
|
|
||||||
you'll be able just to repeat the removal again.
|
|
||||||
|
|
||||||
## Implementation Principles
|
## Implementation Principles
|
||||||
|
|
||||||
|
@@ -76,6 +76,9 @@ void journal_flusher_t::loop()
|
|||||||
|
|
||||||
void journal_flusher_t::enqueue_flush(obj_ver_id ov)
|
void journal_flusher_t::enqueue_flush(obj_ver_id ov)
|
||||||
{
|
{
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("enqueue_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
|
||||||
|
#endif
|
||||||
auto it = flush_versions.find(ov.oid);
|
auto it = flush_versions.find(ov.oid);
|
||||||
if (it != flush_versions.end())
|
if (it != flush_versions.end())
|
||||||
{
|
{
|
||||||
@@ -94,8 +97,11 @@ void journal_flusher_t::enqueue_flush(obj_ver_id ov)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void journal_flusher_t::unshift_flush(obj_ver_id ov)
|
void journal_flusher_t::unshift_flush(obj_ver_id ov, bool force)
|
||||||
{
|
{
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("unshift_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
|
||||||
|
#endif
|
||||||
auto it = flush_versions.find(ov.oid);
|
auto it = flush_versions.find(ov.oid);
|
||||||
if (it != flush_versions.end())
|
if (it != flush_versions.end())
|
||||||
{
|
{
|
||||||
@@ -105,15 +111,38 @@ void journal_flusher_t::unshift_flush(obj_ver_id ov)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
flush_versions[ov.oid] = ov.version;
|
flush_versions[ov.oid] = ov.version;
|
||||||
|
if (!force)
|
||||||
|
flush_queue.push_front(ov.oid);
|
||||||
}
|
}
|
||||||
flush_queue.push_front(ov.oid);
|
if (force)
|
||||||
if (!dequeuing && (flush_queue.size() >= flusher_start_threshold || trim_wanted > 0))
|
flush_queue.push_front(ov.oid);
|
||||||
|
if (force || !dequeuing && (flush_queue.size() >= flusher_start_threshold || trim_wanted > 0))
|
||||||
{
|
{
|
||||||
dequeuing = true;
|
dequeuing = true;
|
||||||
bs->ringloop->wakeup();
|
bs->ringloop->wakeup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void journal_flusher_t::remove_flush(object_id oid)
|
||||||
|
{
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("undo_flush %lx:%lx\n", oid.inode, oid.stripe);
|
||||||
|
#endif
|
||||||
|
auto v_it = flush_versions.find(oid);
|
||||||
|
if (v_it != flush_versions.end())
|
||||||
|
{
|
||||||
|
flush_versions.erase(v_it);
|
||||||
|
for (auto q_it = flush_queue.begin(); q_it != flush_queue.end(); q_it++)
|
||||||
|
{
|
||||||
|
if (*q_it == oid)
|
||||||
|
{
|
||||||
|
flush_queue.erase(q_it);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void journal_flusher_t::request_trim()
|
void journal_flusher_t::request_trim()
|
||||||
{
|
{
|
||||||
dequeuing = true;
|
dequeuing = true;
|
||||||
@@ -194,6 +223,7 @@ bool journal_flusher_co::loop()
|
|||||||
resume_0:
|
resume_0:
|
||||||
if (!flusher->flush_queue.size() || !flusher->dequeuing)
|
if (!flusher->flush_queue.size() || !flusher->dequeuing)
|
||||||
{
|
{
|
||||||
|
stop_flusher:
|
||||||
if (flusher->trim_wanted > 0 && flusher->journal_trim_counter > 0)
|
if (flusher->trim_wanted > 0 && flusher->journal_trim_counter > 0)
|
||||||
{
|
{
|
||||||
// Attempt forced trim
|
// Attempt forced trim
|
||||||
@@ -298,9 +328,7 @@ resume_0:
|
|||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf("No older flushes, stopping\n");
|
printf("No older flushes, stopping\n");
|
||||||
#endif
|
#endif
|
||||||
flusher->dequeuing = false;
|
goto stop_flusher;
|
||||||
wait_state = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -319,8 +347,8 @@ resume_1:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Writes and deletes shouldn't happen at the same time
|
// Writes and deletes shouldn't happen at the same time
|
||||||
assert(!(copy_count > 0 || has_writes) || !has_delete);
|
assert(!has_writes || !has_delete);
|
||||||
if (copy_count == 0 && !has_writes && !has_delete || has_delete && old_clean_loc == UINT64_MAX)
|
if (!has_writes && !has_delete || has_delete && old_clean_loc == UINT64_MAX)
|
||||||
{
|
{
|
||||||
// Nothing to flush
|
// Nothing to flush
|
||||||
bs->erase_dirty(dirty_start, std::next(dirty_end), clean_loc);
|
bs->erase_dirty(dirty_start, std::next(dirty_end), clean_loc);
|
||||||
@@ -445,8 +473,8 @@ resume_1:
|
|||||||
clean_disk_entry *new_entry = (clean_disk_entry*)(meta_new.buf + meta_new.pos*bs->clean_entry_size);
|
clean_disk_entry *new_entry = (clean_disk_entry*)(meta_new.buf + meta_new.pos*bs->clean_entry_size);
|
||||||
if (new_entry->oid.inode != 0 && new_entry->oid != cur.oid)
|
if (new_entry->oid.inode != 0 && new_entry->oid != cur.oid)
|
||||||
{
|
{
|
||||||
printf("Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %lx (%lx:%lx) with %lx:%lx\n",
|
printf("Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %lu (%lx:%lx) with %lx:%lx\n",
|
||||||
clean_loc, new_entry->oid.inode, new_entry->oid.stripe, cur.oid.inode, cur.oid.stripe);
|
clean_loc >> bs->block_order, new_entry->oid.inode, new_entry->oid.stripe, cur.oid.inode, cur.oid.stripe);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
new_entry->oid = cur.oid;
|
new_entry->oid = cur.oid;
|
||||||
@@ -513,7 +541,7 @@ resume_1:
|
|||||||
if (repeat_it != flusher->sync_to_repeat.end() && repeat_it->second > cur.version)
|
if (repeat_it != flusher->sync_to_repeat.end() && repeat_it->second > cur.version)
|
||||||
{
|
{
|
||||||
// Requeue version
|
// Requeue version
|
||||||
flusher->unshift_flush({ .oid = cur.oid, .version = repeat_it->second });
|
flusher->unshift_flush({ .oid = cur.oid, .version = repeat_it->second }, false);
|
||||||
}
|
}
|
||||||
flusher->sync_to_repeat.erase(repeat_it);
|
flusher->sync_to_repeat.erase(repeat_it);
|
||||||
trim_journal:
|
trim_journal:
|
||||||
@@ -602,7 +630,7 @@ bool journal_flusher_co::scan_dirty(int wait_base)
|
|||||||
{
|
{
|
||||||
char err[1024];
|
char err[1024];
|
||||||
snprintf(
|
snprintf(
|
||||||
err, 1024, "BUG: Unexpected dirty_entry %lx:%lx v%lu state during flush: %d",
|
err, 1024, "BUG: Unexpected dirty_entry %lx:%lx v%lu unstable state during flush: %d",
|
||||||
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.state
|
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.state
|
||||||
);
|
);
|
||||||
throw std::runtime_error(err);
|
throw std::runtime_error(err);
|
||||||
|
@@ -107,5 +107,6 @@ public:
|
|||||||
void request_trim();
|
void request_trim();
|
||||||
void release_trim();
|
void release_trim();
|
||||||
void enqueue_flush(obj_ver_id oid);
|
void enqueue_flush(obj_ver_id oid);
|
||||||
void unshift_flush(obj_ver_id oid);
|
void unshift_flush(obj_ver_id oid, bool force);
|
||||||
|
void remove_flush(object_id oid);
|
||||||
};
|
};
|
||||||
|
@@ -287,7 +287,7 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
|
|||||||
else if (PRIV(op)->wait_for == WAIT_JOURNAL_BUFFER)
|
else if (PRIV(op)->wait_for == WAIT_JOURNAL_BUFFER)
|
||||||
{
|
{
|
||||||
int next = ((journal.cur_sector + 1) % journal.sector_count);
|
int next = ((journal.cur_sector + 1) % journal.sector_count);
|
||||||
if (journal.sector_info[next].usage_count > 0 ||
|
if (journal.sector_info[next].flush_count > 0 ||
|
||||||
journal.sector_info[next].dirty)
|
journal.sector_info[next].dirty)
|
||||||
{
|
{
|
||||||
// do not submit
|
// do not submit
|
||||||
|
@@ -111,7 +111,7 @@ void blockstore_init_meta::handle_entries(void* entries, unsigned count, int blo
|
|||||||
{
|
{
|
||||||
// free the previous block
|
// free the previous block
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf("Free block %lu (new location is %lu)\n", clean_it->second.location >> block_order, done_cnt+i >> block_order);
|
printf("Free block %lu (new location is %lu)\n", clean_it->second.location >> block_order, done_cnt+i);
|
||||||
#endif
|
#endif
|
||||||
bs->data_alloc->set(clean_it->second.location >> block_order, false);
|
bs->data_alloc->set(clean_it->second.location >> block_order, false);
|
||||||
}
|
}
|
||||||
@@ -557,9 +557,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
|||||||
{
|
{
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf(
|
printf(
|
||||||
"je_big_write%s oid=%lx:%lx ver=%lu loc=%08lx\n",
|
"je_big_write%s oid=%lx:%lx ver=%lu loc=%lu\n",
|
||||||
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
|
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
|
||||||
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location
|
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location >> bs->block_order
|
||||||
);
|
);
|
||||||
#endif
|
#endif
|
||||||
auto dirty_it = bs->dirty_db.upper_bound((obj_ver_id){
|
auto dirty_it = bs->dirty_db.upper_bound((obj_ver_id){
|
||||||
@@ -570,13 +570,18 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
|||||||
{
|
{
|
||||||
dirty_it--;
|
dirty_it--;
|
||||||
if (dirty_it->first.oid == je->big_write.oid &&
|
if (dirty_it->first.oid == je->big_write.oid &&
|
||||||
|
dirty_it->first.version >= je->big_write.version &&
|
||||||
(dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_DELETE)
|
(dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_DELETE)
|
||||||
{
|
{
|
||||||
// It is allowed to overwrite a deleted object with a
|
// It is allowed to overwrite a deleted object with a
|
||||||
// version number less than deletion version number,
|
// version number smaller than deletion version number,
|
||||||
// because the presence of a BIG_WRITE entry means that
|
// because the presence of a BIG_WRITE entry means that
|
||||||
// the data for it is already on disk.
|
// its data and metadata are already flushed.
|
||||||
// Purge all dirty and clean entries for this object.
|
// We don't know if newer versions are flushed, but
|
||||||
|
// the previous delete definitely is.
|
||||||
|
// So we flush previous dirty entries, but retain the clean one.
|
||||||
|
// This feature is required for writes happening shortly
|
||||||
|
// after deletes.
|
||||||
auto dirty_end = dirty_it;
|
auto dirty_end = dirty_it;
|
||||||
dirty_end++;
|
dirty_end++;
|
||||||
while (1)
|
while (1)
|
||||||
@@ -592,13 +597,14 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bs->erase_dirty(dirty_it, dirty_end, UINT64_MAX);
|
|
||||||
auto clean_it = bs->clean_db.find(je->big_write.oid);
|
auto clean_it = bs->clean_db.find(je->big_write.oid);
|
||||||
if (clean_it != bs->clean_db.end())
|
bs->erase_dirty(
|
||||||
{
|
dirty_it, dirty_end,
|
||||||
bs->data_alloc->set(clean_it->second.location >> bs->block_order, false);
|
clean_it != bs->clean_db.end() ? clean_it->second.location : UINT64_MAX
|
||||||
bs->clean_db.erase(clean_it);
|
);
|
||||||
}
|
// Remove it from the flusher's queue, too
|
||||||
|
// Otherwise it may end up referring to a small unstable write after reading the rest of the journal
|
||||||
|
bs->flusher->remove_flush(je->big_write.oid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto clean_it = bs->clean_db.find(je->big_write.oid);
|
auto clean_it = bs->clean_db.find(je->big_write.oid);
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs)
|
blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs)
|
||||||
{
|
{
|
||||||
this->bs = bs;
|
this->bs = bs;
|
||||||
sectors_required = 0;
|
sectors_to_write = 0;
|
||||||
next_pos = bs->journal.next_free;
|
next_pos = bs->journal.next_free;
|
||||||
next_sector = bs->journal.cur_sector;
|
next_sector = bs->journal.cur_sector;
|
||||||
first_sector = -1;
|
first_sector = -1;
|
||||||
@@ -20,23 +20,26 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
|||||||
int required = entries_required;
|
int required = entries_required;
|
||||||
while (1)
|
while (1)
|
||||||
{
|
{
|
||||||
int fits = bs->journal.no_same_sector_overwrites && bs->journal.sector_info[next_sector].written
|
int fits = bs->journal.no_same_sector_overwrites && next_pos == bs->journal.next_free && bs->journal.sector_info[next_sector].written
|
||||||
? 0
|
? 0
|
||||||
: (bs->journal.block_size - next_in_pos) / size;
|
: (bs->journal.block_size - next_in_pos) / size;
|
||||||
if (fits > 0)
|
if (fits > 0)
|
||||||
{
|
{
|
||||||
|
if (fits > required)
|
||||||
|
{
|
||||||
|
fits = required;
|
||||||
|
}
|
||||||
if (first_sector == -1)
|
if (first_sector == -1)
|
||||||
{
|
{
|
||||||
first_sector = next_sector;
|
first_sector = next_sector;
|
||||||
}
|
}
|
||||||
required -= fits;
|
required -= fits;
|
||||||
next_in_pos += fits * size;
|
next_in_pos += fits * size;
|
||||||
sectors_required++;
|
sectors_to_write++;
|
||||||
}
|
}
|
||||||
else if (bs->journal.sector_info[next_sector].dirty)
|
else if (bs->journal.sector_info[next_sector].dirty)
|
||||||
{
|
{
|
||||||
// sectors_required is more like "sectors to write"
|
sectors_to_write++;
|
||||||
sectors_required++;
|
|
||||||
}
|
}
|
||||||
if (required <= 0)
|
if (required <= 0)
|
||||||
{
|
{
|
||||||
@@ -59,7 +62,7 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
|||||||
" is too small for a batch of "+std::to_string(entries_required)+" entries of "+std::to_string(size)+" bytes"
|
" is too small for a batch of "+std::to_string(entries_required)+" entries of "+std::to_string(size)+" bytes"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (bs->journal.sector_info[next_sector].usage_count > 0 ||
|
if (bs->journal.sector_info[next_sector].flush_count > 0 ||
|
||||||
bs->journal.sector_info[next_sector].dirty)
|
bs->journal.sector_info[next_sector].dirty)
|
||||||
{
|
{
|
||||||
// No memory buffer available. Wait for it.
|
// No memory buffer available. Wait for it.
|
||||||
@@ -71,17 +74,18 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
|||||||
dirty++;
|
dirty++;
|
||||||
used++;
|
used++;
|
||||||
}
|
}
|
||||||
if (bs->journal.sector_info[i].usage_count > 0)
|
if (bs->journal.sector_info[i].flush_count > 0)
|
||||||
{
|
{
|
||||||
used++;
|
used++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// In fact, it's even more rare than "ran out of journal space", so print a warning
|
// In fact, it's even more rare than "ran out of journal space", so print a warning
|
||||||
printf(
|
printf(
|
||||||
"Ran out of journal sector buffers: %d/%lu buffers used (%d dirty), next buffer (%ld) is %s and flushed %lu times\n",
|
"Ran out of journal sector buffers: %d/%lu buffers used (%d dirty), next buffer (%ld)"
|
||||||
|
" is %s and flushed %lu times. Consider increasing \'journal_sector_buffer_count\'\n",
|
||||||
used, bs->journal.sector_count, dirty, next_sector,
|
used, bs->journal.sector_count, dirty, next_sector,
|
||||||
bs->journal.sector_info[next_sector].dirty ? "dirty" : "not dirty",
|
bs->journal.sector_info[next_sector].dirty ? "dirty" : "not dirty",
|
||||||
bs->journal.sector_info[next_sector].usage_count
|
bs->journal.sector_info[next_sector].flush_count
|
||||||
);
|
);
|
||||||
PRIV(op)->wait_for = WAIT_JOURNAL_BUFFER;
|
PRIV(op)->wait_for = WAIT_JOURNAL_BUFFER;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -100,11 +104,8 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
|||||||
{
|
{
|
||||||
// No space in the journal. Wait until used_start changes.
|
// No space in the journal. Wait until used_start changes.
|
||||||
printf(
|
printf(
|
||||||
"Ran out of journal space (free space: %lu bytes, sectors to write: %d)\n",
|
"Ran out of journal space (used_start=%08lx, next_free=%08lx, dirty_start=%08lx)\n",
|
||||||
(bs->journal.next_free >= bs->journal.used_start
|
bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start
|
||||||
? bs->journal.len-bs->journal.block_size - (bs->journal.next_free-bs->journal.used_start)
|
|
||||||
: bs->journal.used_start - bs->journal.next_free),
|
|
||||||
sectors_required
|
|
||||||
);
|
);
|
||||||
PRIV(op)->wait_for = WAIT_JOURNAL;
|
PRIV(op)->wait_for = WAIT_JOURNAL;
|
||||||
bs->flusher->request_trim();
|
bs->flusher->request_trim();
|
||||||
@@ -116,22 +117,21 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
|||||||
|
|
||||||
journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size)
|
journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size)
|
||||||
{
|
{
|
||||||
if (journal.block_size - journal.in_sector_pos < size ||
|
if (!journal.entry_fits(size))
|
||||||
journal.no_same_sector_overwrites && journal.sector_info[journal.cur_sector].written)
|
|
||||||
{
|
{
|
||||||
assert(!journal.sector_info[journal.cur_sector].dirty);
|
assert(!journal.sector_info[journal.cur_sector].dirty);
|
||||||
// Move to the next journal sector
|
// Move to the next journal sector
|
||||||
journal.sector_info[journal.cur_sector].written = false;
|
if (journal.sector_info[journal.cur_sector].flush_count > 0)
|
||||||
if (journal.sector_info[journal.cur_sector].usage_count > 0)
|
|
||||||
{
|
{
|
||||||
// Also select next sector buffer in memory
|
// Also select next sector buffer in memory
|
||||||
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
|
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
|
||||||
assert(!journal.sector_info[journal.cur_sector].usage_count);
|
assert(!journal.sector_info[journal.cur_sector].flush_count);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
journal.dirty_start = journal.next_free;
|
journal.dirty_start = journal.next_free;
|
||||||
}
|
}
|
||||||
|
journal.sector_info[journal.cur_sector].written = false;
|
||||||
journal.sector_info[journal.cur_sector].offset = journal.next_free;
|
journal.sector_info[journal.cur_sector].offset = journal.next_free;
|
||||||
journal.in_sector_pos = 0;
|
journal.in_sector_pos = 0;
|
||||||
journal.next_free = (journal.next_free+journal.block_size) < journal.len ? journal.next_free + journal.block_size : journal.block_size;
|
journal.next_free = (journal.next_free+journal.block_size) < journal.len ? journal.next_free + journal.block_size : journal.block_size;
|
||||||
@@ -157,7 +157,7 @@ void prepare_journal_sector_write(journal_t & journal, int cur_sector, io_uring_
|
|||||||
{
|
{
|
||||||
journal.sector_info[cur_sector].dirty = false;
|
journal.sector_info[cur_sector].dirty = false;
|
||||||
journal.sector_info[cur_sector].written = true;
|
journal.sector_info[cur_sector].written = true;
|
||||||
journal.sector_info[cur_sector].usage_count++;
|
journal.sector_info[cur_sector].flush_count++;
|
||||||
ring_data_t *data = ((ring_data_t*)sqe->user_data);
|
ring_data_t *data = ((ring_data_t*)sqe->user_data);
|
||||||
data->iov = (struct iovec){
|
data->iov = (struct iovec){
|
||||||
(journal.inmemory
|
(journal.inmemory
|
||||||
|
@@ -133,7 +133,7 @@ inline uint32_t je_crc32(journal_entry *je)
|
|||||||
struct journal_sector_info_t
|
struct journal_sector_info_t
|
||||||
{
|
{
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
uint64_t usage_count;
|
uint64_t flush_count;
|
||||||
bool written;
|
bool written;
|
||||||
bool dirty;
|
bool dirty;
|
||||||
};
|
};
|
||||||
@@ -170,13 +170,18 @@ struct journal_t
|
|||||||
~journal_t();
|
~journal_t();
|
||||||
bool trim();
|
bool trim();
|
||||||
uint64_t get_trim_pos();
|
uint64_t get_trim_pos();
|
||||||
|
inline bool entry_fits(int size)
|
||||||
|
{
|
||||||
|
return !(block_size - in_sector_pos < size ||
|
||||||
|
no_same_sector_overwrites && sector_info[cur_sector].written);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct blockstore_journal_check_t
|
struct blockstore_journal_check_t
|
||||||
{
|
{
|
||||||
blockstore_impl_t *bs;
|
blockstore_impl_t *bs;
|
||||||
uint64_t next_pos, next_sector, next_in_pos;
|
uint64_t next_pos, next_sector, next_in_pos;
|
||||||
int sectors_required, first_sector;
|
int sectors_to_write, first_sector;
|
||||||
bool right_dir; // writing to the end or the beginning of the ring buffer
|
bool right_dir; // writing to the end or the beginning of the ring buffer
|
||||||
|
|
||||||
blockstore_journal_check_t(blockstore_impl_t *bs);
|
blockstore_journal_check_t(blockstore_impl_t *bs);
|
||||||
|
@@ -75,44 +75,35 @@ skip_ov:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// There is sufficient space. Get SQEs
|
// There is sufficient space. Get SQEs
|
||||||
struct io_uring_sqe *sqe[space_check.sectors_required];
|
struct io_uring_sqe *sqe[space_check.sectors_to_write];
|
||||||
for (i = 0; i < space_check.sectors_required; i++)
|
for (i = 0; i < space_check.sectors_to_write; i++)
|
||||||
{
|
{
|
||||||
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
||||||
}
|
}
|
||||||
// Prepare and submit journal entries
|
// Prepare and submit journal entries
|
||||||
auto cb = [this, op](ring_data_t *data) { handle_rollback_event(data, op); };
|
auto cb = [this, op](ring_data_t *data) { handle_rollback_event(data, op); };
|
||||||
int s = 0, cur_sector = -1;
|
int s = 0, cur_sector = -1;
|
||||||
if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_rollback) &&
|
|
||||||
journal.sector_info[journal.cur_sector].dirty)
|
|
||||||
{
|
|
||||||
if (cur_sector == -1)
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
}
|
|
||||||
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
||||||
{
|
{
|
||||||
|
if (!journal.entry_fits(sizeof(journal_entry_rollback)) &&
|
||||||
|
journal.sector_info[journal.cur_sector].dirty)
|
||||||
|
{
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
|
cur_sector = journal.cur_sector;
|
||||||
|
}
|
||||||
journal_entry_rollback *je = (journal_entry_rollback*)
|
journal_entry_rollback *je = (journal_entry_rollback*)
|
||||||
prefill_single_journal_entry(journal, JE_ROLLBACK, sizeof(journal_entry_rollback));
|
prefill_single_journal_entry(journal, JE_ROLLBACK, sizeof(journal_entry_rollback));
|
||||||
journal.sector_info[journal.cur_sector].dirty = false;
|
|
||||||
je->oid = v->oid;
|
je->oid = v->oid;
|
||||||
je->version = v->version;
|
je->version = v->version;
|
||||||
je->crc32 = je_crc32((journal_entry*)je);
|
je->crc32 = je_crc32((journal_entry*)je);
|
||||||
journal.crc32_last = je->crc32;
|
journal.crc32_last = je->crc32;
|
||||||
if (cur_sector != journal.cur_sector)
|
|
||||||
{
|
|
||||||
// Write previous sector. We should write the sector only after filling it,
|
|
||||||
// because otherwise we'll write a lot more sectors in the "no_same_sector_overwrite" mode
|
|
||||||
if (cur_sector != -1)
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
else
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (cur_sector != -1)
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
assert(s == space_check.sectors_to_write);
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->pending_ops = s;
|
PRIV(op)->pending_ops = s;
|
||||||
PRIV(op)->op_state = 1;
|
PRIV(op)->op_state = 1;
|
||||||
@@ -243,6 +234,9 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
|
|||||||
if (IS_DELETE(dirty_it->second.state))
|
if (IS_DELETE(dirty_it->second.state))
|
||||||
{
|
{
|
||||||
object_id oid = dirty_it->first.oid;
|
object_id oid = dirty_it->first.oid;
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("Unblock writes-after-delete %lx:%lx v%lx\n", oid.inode, oid.stripe, dirty_it->first.version);
|
||||||
|
#endif
|
||||||
dirty_it = dirty_end;
|
dirty_it = dirty_end;
|
||||||
// Unblock operations blocked by delete flushing
|
// Unblock operations blocked by delete flushing
|
||||||
uint32_t next_state = BS_ST_IN_FLIGHT;
|
uint32_t next_state = BS_ST_IN_FLIGHT;
|
||||||
|
@@ -98,45 +98,36 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// There is sufficient space. Get SQEs
|
// There is sufficient space. Get SQEs
|
||||||
struct io_uring_sqe *sqe[space_check.sectors_required];
|
struct io_uring_sqe *sqe[space_check.sectors_to_write];
|
||||||
for (i = 0; i < space_check.sectors_required; i++)
|
for (i = 0; i < space_check.sectors_to_write; i++)
|
||||||
{
|
{
|
||||||
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
||||||
}
|
}
|
||||||
// Prepare and submit journal entries
|
// Prepare and submit journal entries
|
||||||
auto cb = [this, op](ring_data_t *data) { handle_stable_event(data, op); };
|
auto cb = [this, op](ring_data_t *data) { handle_stable_event(data, op); };
|
||||||
int s = 0, cur_sector = -1;
|
int s = 0, cur_sector = -1;
|
||||||
if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_stable) &&
|
|
||||||
journal.sector_info[journal.cur_sector].dirty)
|
|
||||||
{
|
|
||||||
if (cur_sector == -1)
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
}
|
|
||||||
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
|
||||||
{
|
{
|
||||||
// FIXME: Only stabilize versions that aren't stable yet
|
// FIXME: Only stabilize versions that aren't stable yet
|
||||||
|
if (!journal.entry_fits(sizeof(journal_entry_stable)) &&
|
||||||
|
journal.sector_info[journal.cur_sector].dirty)
|
||||||
|
{
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
|
cur_sector = journal.cur_sector;
|
||||||
|
}
|
||||||
journal_entry_stable *je = (journal_entry_stable*)
|
journal_entry_stable *je = (journal_entry_stable*)
|
||||||
prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable));
|
prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable));
|
||||||
journal.sector_info[journal.cur_sector].dirty = false;
|
|
||||||
je->oid = v->oid;
|
je->oid = v->oid;
|
||||||
je->version = v->version;
|
je->version = v->version;
|
||||||
je->crc32 = je_crc32((journal_entry*)je);
|
je->crc32 = je_crc32((journal_entry*)je);
|
||||||
journal.crc32_last = je->crc32;
|
journal.crc32_last = je->crc32;
|
||||||
if (cur_sector != journal.cur_sector)
|
|
||||||
{
|
|
||||||
// Write previous sector. We should write the sector only after filling it,
|
|
||||||
// because otherwise we'll write a lot more sectors in the "no_same_sector_overwrite" mode
|
|
||||||
if (cur_sector != -1)
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
else
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (cur_sector != -1)
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
assert(s == space_check.sectors_to_write);
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->pending_ops = s;
|
PRIV(op)->pending_ops = s;
|
||||||
PRIV(op)->op_state = 1;
|
PRIV(op)->op_state = 1;
|
||||||
@@ -213,9 +204,6 @@ void blockstore_impl_t::mark_stable(const obj_ver_id & v)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
|
||||||
printf("enqueue_flush %lx:%lx v%lu\n", v.oid.inode, v.oid.stripe, v.version);
|
|
||||||
#endif
|
|
||||||
flusher->enqueue_flush(v);
|
flusher->enqueue_flush(v);
|
||||||
}
|
}
|
||||||
auto unstab_it = unstable_writes.find(v.oid);
|
auto unstab_it = unstable_writes.find(v.oid);
|
||||||
|
@@ -112,30 +112,29 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// Get SQEs. Don't bother about merging, submit each journal sector as a separate request
|
// Get SQEs. Don't bother about merging, submit each journal sector as a separate request
|
||||||
struct io_uring_sqe *sqe[space_check.sectors_required];
|
struct io_uring_sqe *sqe[space_check.sectors_to_write];
|
||||||
for (int i = 0; i < space_check.sectors_required; i++)
|
for (int i = 0; i < space_check.sectors_to_write; i++)
|
||||||
{
|
{
|
||||||
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
|
||||||
}
|
}
|
||||||
// Prepare and submit journal entries
|
// Prepare and submit journal entries
|
||||||
auto it = PRIV(op)->sync_big_writes.begin();
|
auto it = PRIV(op)->sync_big_writes.begin();
|
||||||
int s = 0, cur_sector = -1;
|
int s = 0, cur_sector = -1;
|
||||||
if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_big_write) &&
|
|
||||||
journal.sector_info[journal.cur_sector].dirty)
|
|
||||||
{
|
|
||||||
if (cur_sector == -1)
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
}
|
|
||||||
while (it != PRIV(op)->sync_big_writes.end())
|
while (it != PRIV(op)->sync_big_writes.end())
|
||||||
{
|
{
|
||||||
|
if (!journal.entry_fits(sizeof(journal_entry_big_write)) &&
|
||||||
|
journal.sector_info[journal.cur_sector].dirty)
|
||||||
|
{
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
|
cur_sector = journal.cur_sector;
|
||||||
|
}
|
||||||
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
|
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
|
||||||
journal, (dirty_db[*it].state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
|
journal, (dirty_db[*it].state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
|
||||||
sizeof(journal_entry_big_write)
|
sizeof(journal_entry_big_write)
|
||||||
);
|
);
|
||||||
dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset;
|
dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset;
|
||||||
journal.sector_info[journal.cur_sector].dirty = false;
|
|
||||||
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf(
|
printf(
|
||||||
@@ -152,19 +151,11 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
|||||||
je->crc32 = je_crc32((journal_entry*)je);
|
je->crc32 = je_crc32((journal_entry*)je);
|
||||||
journal.crc32_last = je->crc32;
|
journal.crc32_last = je->crc32;
|
||||||
it++;
|
it++;
|
||||||
if (cur_sector != journal.cur_sector)
|
|
||||||
{
|
|
||||||
// Write previous sector. We should write the sector only after filling it,
|
|
||||||
// because otherwise we'll write a lot more sectors in the "no_same_sector_overwrite" mode
|
|
||||||
if (cur_sector != -1)
|
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
|
||||||
else
|
|
||||||
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
|
||||||
cur_sector = journal.cur_sector;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (cur_sector != -1)
|
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
|
||||||
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
|
assert(s == space_check.sectors_to_write);
|
||||||
|
if (cur_sector == -1)
|
||||||
|
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
|
||||||
PRIV(op)->pending_ops = s;
|
PRIV(op)->pending_ops = s;
|
||||||
PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT;
|
PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT;
|
||||||
|
@@ -57,13 +57,16 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
|||||||
{
|
{
|
||||||
// It's allowed to write versions with low numbers over deletes
|
// It's allowed to write versions with low numbers over deletes
|
||||||
// However, we have to flush those deletes first as we use version number for ordering
|
// However, we have to flush those deletes first as we use version number for ordering
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("Write %lx:%lx v%lu over delete (real v%lu) offset=%u len=%u\n", op->oid.inode, op->oid.stripe, version, op->version, op->offset, op->len);
|
||||||
|
#endif
|
||||||
wait_del = true;
|
wait_del = true;
|
||||||
PRIV(op)->real_version = op->version;
|
PRIV(op)->real_version = op->version;
|
||||||
op->version = version;
|
op->version = version;
|
||||||
flusher->unshift_flush((obj_ver_id){
|
flusher->unshift_flush((obj_ver_id){
|
||||||
.oid = op->oid,
|
.oid = op->oid,
|
||||||
.version = version-1,
|
.version = version-1,
|
||||||
});
|
}, true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -87,7 +90,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
|||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
if (is_del)
|
if (is_del)
|
||||||
printf("Delete %lx:%lx v%lu\n", op->oid.inode, op->oid.stripe, op->version);
|
printf("Delete %lx:%lx v%lu\n", op->oid.inode, op->oid.stripe, op->version);
|
||||||
else
|
else if (!wait_del)
|
||||||
printf("Write %lx:%lx v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
|
printf("Write %lx:%lx v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
|
||||||
#endif
|
#endif
|
||||||
// FIXME No strict need to add it into dirty_db here, it's just left
|
// FIXME No strict need to add it into dirty_db here, it's just left
|
||||||
@@ -141,6 +144,9 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
|||||||
if (PRIV(op)->real_version != 0)
|
if (PRIV(op)->real_version != 0)
|
||||||
{
|
{
|
||||||
// Restore original low version number for unblocked operations
|
// Restore original low version number for unblocked operations
|
||||||
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
|
printf("Restoring %lx:%lx version: v%lu -> v%lu\n", op->oid.inode, op->oid.stripe, op->version, PRIV(op)->real_version);
|
||||||
|
#endif
|
||||||
auto prev_it = dirty_it;
|
auto prev_it = dirty_it;
|
||||||
prev_it--;
|
prev_it--;
|
||||||
if (prev_it->first.oid == op->oid && prev_it->first.version >= PRIV(op)->real_version)
|
if (prev_it->first.oid == op->oid && prev_it->first.version >= PRIV(op)->real_version)
|
||||||
@@ -371,7 +377,6 @@ resume_2:
|
|||||||
sizeof(journal_entry_big_write)
|
sizeof(journal_entry_big_write)
|
||||||
);
|
);
|
||||||
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
|
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
|
||||||
journal.sector_info[journal.cur_sector].dirty = false;
|
|
||||||
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf(
|
printf(
|
||||||
@@ -396,7 +401,7 @@ resume_2:
|
|||||||
resume_4:
|
resume_4:
|
||||||
// Switch object state
|
// Switch object state
|
||||||
#ifdef BLOCKSTORE_DEBUG
|
#ifdef BLOCKSTORE_DEBUG
|
||||||
printf("Ack write %lx:%lx v%lu = %d\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
|
printf("Ack write %lx:%lx v%lu = state %x\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
|
||||||
#endif
|
#endif
|
||||||
bool imm = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE
|
bool imm = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE
|
||||||
? (immediate_commit == IMMEDIATE_ALL)
|
? (immediate_commit == IMMEDIATE_ALL)
|
||||||
@@ -463,8 +468,8 @@ void blockstore_impl_t::release_journal_sectors(blockstore_op_t *op)
|
|||||||
uint64_t s = PRIV(op)->min_flushed_journal_sector;
|
uint64_t s = PRIV(op)->min_flushed_journal_sector;
|
||||||
while (1)
|
while (1)
|
||||||
{
|
{
|
||||||
journal.sector_info[s-1].usage_count--;
|
journal.sector_info[s-1].flush_count--;
|
||||||
if (s != (1+journal.cur_sector) && journal.sector_info[s-1].usage_count == 0)
|
if (s != (1+journal.cur_sector) && journal.sector_info[s-1].flush_count == 0)
|
||||||
{
|
{
|
||||||
// We know for sure that we won't write into this sector anymore
|
// We know for sure that we won't write into this sector anymore
|
||||||
uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size;
|
uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size;
|
||||||
|
@@ -473,7 +473,7 @@ void cluster_client_t::slice_rw(cluster_op_t *op)
|
|||||||
// Primary OSDs still operate individual stripes, but their size is multiplied by PG minsize in case of EC
|
// Primary OSDs still operate individual stripes, but their size is multiplied by PG minsize in case of EC
|
||||||
auto & pool_cfg = st_cli.pool_config[INODE_POOL(op->inode)];
|
auto & pool_cfg = st_cli.pool_config[INODE_POOL(op->inode)];
|
||||||
uint64_t pg_block_size = bs_block_size * (
|
uint64_t pg_block_size = bs_block_size * (
|
||||||
pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_minsize
|
pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks
|
||||||
);
|
);
|
||||||
uint64_t first_stripe = (op->offset / pg_block_size) * pg_block_size;
|
uint64_t first_stripe = (op->offset / pg_block_size) * pg_block_size;
|
||||||
uint64_t last_stripe = ((op->offset + op->len + pg_block_size - 1) / pg_block_size - 1) * pg_block_size;
|
uint64_t last_stripe = ((op->offset + op->len + pg_block_size - 1) / pg_block_size - 1) * pg_block_size;
|
||||||
|
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,3 +1,15 @@
|
|||||||
|
vitastor (0.5.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Bugfixes
|
||||||
|
|
||||||
|
-- Vitaliy Filippov <vitalif@yourcmc.ru> Tue, 02 Feb 2021 23:01:24 +0300
|
||||||
|
|
||||||
|
vitastor (0.5.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Add jerasure support
|
||||||
|
|
||||||
|
-- Vitaliy Filippov <vitalif@yourcmc.ru> Sat, 05 Dec 2020 17:02:26 +0300
|
||||||
|
|
||||||
vitastor (0.5-1) unstable; urgency=medium
|
vitastor (0.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
* First packaging for Debian
|
* First packaging for Debian
|
||||||
|
6
debian/control
vendored
6
debian/control
vendored
@@ -2,14 +2,14 @@ Source: vitastor
|
|||||||
Section: admin
|
Section: admin
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Maintainer: Vitaliy Filippov <vitalif@yourcmc.ru>
|
Maintainer: Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||||
Build-Depends: debhelper, liburing-dev (>= 0.6), g++ (>= 8), libstdc++6 (>= 8), linux-libc-dev, libgoogle-perftools-dev
|
Build-Depends: debhelper, liburing-dev (>= 0.6), g++ (>= 8), libstdc++6 (>= 8), linux-libc-dev, libgoogle-perftools-dev, libjerasure-dev, libgf-complete-dev
|
||||||
Standards-Version: 4.5.0
|
Standards-Version: 4.5.0
|
||||||
Homepage: https://vitastor.io/
|
Homepage: https://vitastor.io/
|
||||||
Rules-Requires-Root: no
|
Rules-Requires-Root: no
|
||||||
|
|
||||||
Package: vitastor
|
Package: vitastor
|
||||||
Architecture: any
|
Architecture: amd64
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, fio (= ${dep:fio}), qemu (= ${dep:qemu}), nodejs (>= 12), node-sprintf-js, node-ws (>= 7)
|
Depends: ${shlibs:Depends}, ${misc:Depends}, fio (= ${dep:fio}), qemu (= ${dep:qemu}), nodejs (>= 10), node-sprintf-js, node-ws (>= 7), libjerasure2, lp-solve
|
||||||
Description: Vitastor, a fast software-defined clustered block storage
|
Description: Vitastor, a fast software-defined clustered block storage
|
||||||
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
||||||
architecturally similar to Ceph which means strong consistency, primary-replication,
|
architecturally similar to Ceph which means strong consistency, primary-replication,
|
||||||
|
49
debian/patched-qemu.Dockerfile
vendored
Normal file
49
debian/patched-qemu.Dockerfile
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Build patched QEMU for Debian Buster or Bullseye/Sid inside a container
|
||||||
|
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/build:/root/build -f debian/patched-qemu.Dockerfile .
|
||||||
|
|
||||||
|
ARG REL=bullseye
|
||||||
|
|
||||||
|
FROM debian:$REL
|
||||||
|
|
||||||
|
# again, it doesn't work otherwise
|
||||||
|
ARG REL=bullseye
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
RUN if [ "$REL" = "buster" ]; then \
|
||||||
|
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list; \
|
||||||
|
echo >> /etc/apt/preferences; \
|
||||||
|
echo 'Package: *' >> /etc/apt/preferences; \
|
||||||
|
echo 'Pin: release a=buster-backports' >> /etc/apt/preferences; \
|
||||||
|
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||||
|
fi; \
|
||||||
|
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||||
|
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||||
|
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||||
|
RUN apt-get -y build-dep qemu
|
||||||
|
RUN apt-get -y build-dep fio
|
||||||
|
RUN apt-get --download-only source qemu
|
||||||
|
RUN apt-get --download-only source fio
|
||||||
|
|
||||||
|
ADD qemu-5.0-vitastor.patch qemu-5.1-vitastor.patch /root/vitastor/
|
||||||
|
RUN set -e; \
|
||||||
|
mkdir -p /root/build/qemu-$REL; \
|
||||||
|
rm -rf /root/build/qemu-$REL/*; \
|
||||||
|
cd /root/build/qemu-$REL; \
|
||||||
|
dpkg-source -x /root/qemu*.dsc; \
|
||||||
|
if [ -d /root/build/qemu-$REL/qemu-5.0 ]; then \
|
||||||
|
cp /root/vitastor/qemu-5.0-vitastor.patch /root/build/qemu-$REL/qemu-5.0/debian/patches; \
|
||||||
|
echo qemu-5.0-vitastor.patch >> /root/build/qemu-$REL/qemu-5.0/debian/patches/series; \
|
||||||
|
else \
|
||||||
|
cp /root/vitastor/qemu-5.1-vitastor.patch /root/build/qemu-$REL/qemu-*/debian/patches; \
|
||||||
|
P=`ls -d /root/build/qemu-$REL/qemu-*/debian/patches`; \
|
||||||
|
echo qemu-5.1-vitastor.patch >> $P/series; \
|
||||||
|
fi; \
|
||||||
|
cd /root/build/qemu-$REL/qemu-*/; \
|
||||||
|
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor1; \
|
||||||
|
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
|
||||||
|
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||||
|
rm -rf /root/build/qemu-$REL/qemu-*/
|
80
debian/vitastor-buster.Dockerfile
vendored
80
debian/vitastor-buster.Dockerfile
vendored
@@ -1,80 +0,0 @@
|
|||||||
# Build packages for Debian 10 inside a container
|
|
||||||
# cd ..; podman build -t vitastor-buster -v `pwd`/build:/root/build -f debian/vitastor-buster.Dockerfile .
|
|
||||||
|
|
||||||
FROM debian:buster
|
|
||||||
|
|
||||||
WORKDIR /root
|
|
||||||
|
|
||||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list; \
|
|
||||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
|
||||||
echo 'APT::Install-Recommends false;' > /etc/apt/apt.conf
|
|
||||||
|
|
||||||
RUN apt-get update
|
|
||||||
RUN apt-get -t buster-backports -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
|
||||||
RUN apt-get -t buster-backports -y build-dep qemu
|
|
||||||
RUN apt-get -y build-dep fio
|
|
||||||
RUN apt-get -t buster-backports --download-only source qemu-kvm
|
|
||||||
RUN apt-get --download-only source fio
|
|
||||||
|
|
||||||
ADD qemu-5.0-vitastor.patch qemu-5.1-vitastor.patch /root/vitastor/
|
|
||||||
RUN set -e; \
|
|
||||||
mkdir -p /root/build/qemu-buster; \
|
|
||||||
rm -rf /root/build/qemu-buster/*; \
|
|
||||||
cd /root/build/qemu-buster; \
|
|
||||||
dpkg-source -x /root/qemu*.dsc; \
|
|
||||||
if [ -d /root/build/qemu-buster/qemu-5.0 ]; then \
|
|
||||||
cp /root/vitastor/qemu-5.0-vitastor.patch /root/build/qemu-buster/qemu-5.0/debian/patches; \
|
|
||||||
echo qemu-5.0-vitastor.patch >> /root/build/qemu-buster/qemu-5.0/debian/patches/series; \
|
|
||||||
else \
|
|
||||||
cp /root/vitastor/qemu-5.1-vitastor.patch /root/build/qemu-buster/qemu-*/debian/patches; \
|
|
||||||
echo qemu-5.1-vitastor.patch >> /root/build/qemu-buster/qemu-*/debian/patches/series; \
|
|
||||||
fi; \
|
|
||||||
cd /root/build/qemu-buster/qemu-*/; \
|
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)\).*$/$1/')+vitastor1; \
|
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D buster -v $V 'Plug Vitastor block driver'; \
|
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
|
||||||
rm -rf /root/build/qemu-buster/qemu-*/
|
|
||||||
|
|
||||||
RUN cd /root/build/qemu-buster && apt-get -y -t buster-backports install ./qemu-system-data*.deb ./qemu-system-common_*.deb ./qemu-system-x86_*.deb ./qemu_*.deb
|
|
||||||
|
|
||||||
ADD . /root/vitastor
|
|
||||||
RUN set -e -x; \
|
|
||||||
mkdir -p /root/fio-build/; \
|
|
||||||
cd /root/fio-build/; \
|
|
||||||
rm -rf /root/fio-build/*; \
|
|
||||||
dpkg-source -x /root/fio*.dsc; \
|
|
||||||
cd /root/build/qemu-buster/; \
|
|
||||||
rm -rf qemu*/; \
|
|
||||||
dpkg-source -x qemu*.dsc; \
|
|
||||||
cd /root/build/qemu-buster/qemu*/; \
|
|
||||||
debian/rules b/configure-stamp; \
|
|
||||||
cd b/qemu; \
|
|
||||||
make -j8 qapi; \
|
|
||||||
mkdir -p /root/build/vitastor-buster; \
|
|
||||||
rm -rf /root/build/vitastor-buster/*; \
|
|
||||||
cd /root/build/vitastor-buster; \
|
|
||||||
cp -r /root/vitastor vitastor-0.5; \
|
|
||||||
ln -s /root/build/qemu-buster/qemu-*/ vitastor-0.5/qemu; \
|
|
||||||
ln -s /root/fio-build/fio-*/ vitastor-0.5/fio; \
|
|
||||||
cd vitastor-0.5; \
|
|
||||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
|
||||||
QEMU=$(head -n1 qemu/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
|
||||||
sh copy-qemu-includes.sh; \
|
|
||||||
sh copy-fio-includes.sh; \
|
|
||||||
rm qemu fio; \
|
|
||||||
mkdir -p a b debian/patches; \
|
|
||||||
mv qemu-copy b/qemu; \
|
|
||||||
mv fio-copy b/fio; \
|
|
||||||
diff -NaurpbB a b > debian/patches/qemu-fio-headers.patch || true; \
|
|
||||||
echo qemu-fio-headers.patch >> debian/patches/series; \
|
|
||||||
rm -rf a b; \
|
|
||||||
rm -rf /root/build/qemu-buster/qemu*/; \
|
|
||||||
echo "dep:fio=$FIO" > debian/substvars; \
|
|
||||||
echo "dep:qemu=$QEMU" >> debian/substvars; \
|
|
||||||
cd /root/build/vitastor-buster; \
|
|
||||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.5.orig.tar.xz vitastor-0.5; \
|
|
||||||
cd vitastor-0.5; \
|
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D buster -v "$V""buster" "Rebuild for buster"; \
|
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
|
||||||
rm -rf /root/build/vitastor-buster/vitastor-*/
|
|
@@ -1,5 +1,5 @@
|
|||||||
# Build packages for Debian Bullseye/Sid inside a container
|
# Build Vitastor packages for Debian Buster or Bullseye/Sid inside a container
|
||||||
# cd ..; podman build -t vitastor-bullseye -v `pwd`/build:/root/build -f debian/vitastor-bullseye.Dockerfile .
|
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/build:/root/build -f debian/vitastor.Dockerfile .
|
||||||
|
|
||||||
ARG REL=bullseye
|
ARG REL=bullseye
|
||||||
|
|
||||||
@@ -10,8 +10,16 @@ ARG REL=bullseye
|
|||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
RUN if [ "$REL" = "buster" ]; then \
|
||||||
echo 'APT::Install-Recommends false;' > /etc/apt/apt.conf
|
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list; \
|
||||||
|
echo >> /etc/apt/preferences; \
|
||||||
|
echo 'Package: *' >> /etc/apt/preferences; \
|
||||||
|
echo 'Pin: release a=buster-backports' >> /etc/apt/preferences; \
|
||||||
|
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||||
|
fi; \
|
||||||
|
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||||
|
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||||
|
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update
|
||||||
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||||
@@ -19,29 +27,7 @@ RUN apt-get -y build-dep qemu
|
|||||||
RUN apt-get -y build-dep fio
|
RUN apt-get -y build-dep fio
|
||||||
RUN apt-get --download-only source qemu
|
RUN apt-get --download-only source qemu
|
||||||
RUN apt-get --download-only source fio
|
RUN apt-get --download-only source fio
|
||||||
|
RUN apt-get -y install libjerasure-dev
|
||||||
ADD qemu-5.0-vitastor.patch qemu-5.1-vitastor.patch /root/vitastor/
|
|
||||||
RUN set -e; \
|
|
||||||
mkdir -p /root/build/qemu-$REL; \
|
|
||||||
rm -rf /root/build/qemu-$REL/*; \
|
|
||||||
cd /root/build/qemu-$REL; \
|
|
||||||
dpkg-source -x /root/qemu*.dsc; \
|
|
||||||
if [ -d /root/build/qemu-$REL/qemu-5.0 ]; then \
|
|
||||||
cp /root/vitastor/qemu-5.0-vitastor.patch /root/build/qemu-$REL/qemu-5.0/debian/patches; \
|
|
||||||
echo qemu-5.0-vitastor.patch >> /root/build/qemu-$REL/qemu-5.0/debian/patches/series; \
|
|
||||||
else \
|
|
||||||
cp /root/vitastor/qemu-5.1-vitastor.patch /root/build/qemu-$REL/qemu-*/debian/patches; \
|
|
||||||
P=`ls -d /root/build/qemu-$REL/qemu-*/debian/patches`; \
|
|
||||||
echo qemu-5.1-vitastor.patch >> $P/series; \
|
|
||||||
fi; \
|
|
||||||
cd /root/build/qemu-$REL/qemu-*/; \
|
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor1; \
|
|
||||||
echo ">>> VERSION: $V"; \
|
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
|
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
|
||||||
rm -rf /root/build/qemu-$REL/qemu-*/
|
|
||||||
|
|
||||||
RUN cd /root/build/qemu-$REL && apt-get -y install ./qemu-system-data*.deb ./qemu-system-common_*.deb ./qemu-system-x86_*.deb ./qemu_*.deb
|
|
||||||
|
|
||||||
ADD . /root/vitastor
|
ADD . /root/vitastor
|
||||||
RUN set -e -x; \
|
RUN set -e -x; \
|
||||||
@@ -59,10 +45,10 @@ RUN set -e -x; \
|
|||||||
mkdir -p /root/build/vitastor-$REL; \
|
mkdir -p /root/build/vitastor-$REL; \
|
||||||
rm -rf /root/build/vitastor-$REL/*; \
|
rm -rf /root/build/vitastor-$REL/*; \
|
||||||
cd /root/build/vitastor-$REL; \
|
cd /root/build/vitastor-$REL; \
|
||||||
cp -r /root/vitastor vitastor-0.5; \
|
cp -r /root/vitastor vitastor-0.5.4; \
|
||||||
ln -s /root/build/qemu-$REL/qemu-*/ vitastor-0.5/qemu; \
|
ln -s /root/build/qemu-$REL/qemu-*/ vitastor-0.5.4/qemu; \
|
||||||
ln -s /root/fio-build/fio-*/ vitastor-0.5/fio; \
|
ln -s /root/fio-build/fio-*/ vitastor-0.5.4/fio; \
|
||||||
cd vitastor-0.5; \
|
cd vitastor-0.5.4; \
|
||||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
QEMU=$(head -n1 qemu/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
QEMU=$(head -n1 qemu/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
sh copy-qemu-includes.sh; \
|
sh copy-qemu-includes.sh; \
|
||||||
@@ -78,8 +64,8 @@ RUN set -e -x; \
|
|||||||
echo "dep:fio=$FIO" > debian/substvars; \
|
echo "dep:fio=$FIO" > debian/substvars; \
|
||||||
echo "dep:qemu=$QEMU" >> debian/substvars; \
|
echo "dep:qemu=$QEMU" >> debian/substvars; \
|
||||||
cd /root/build/vitastor-$REL; \
|
cd /root/build/vitastor-$REL; \
|
||||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.5.orig.tar.xz vitastor-0.5; \
|
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.5.4.orig.tar.xz vitastor-0.5.4; \
|
||||||
cd vitastor-0.5; \
|
cd vitastor-0.5.4; \
|
||||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
109
dump_journal.cpp
109
dump_journal.cpp
@@ -26,23 +26,32 @@ struct journal_dump_t
|
|||||||
uint64_t journal_offset;
|
uint64_t journal_offset;
|
||||||
uint64_t journal_len;
|
uint64_t journal_len;
|
||||||
uint64_t journal_pos;
|
uint64_t journal_pos;
|
||||||
|
bool all;
|
||||||
|
bool started;
|
||||||
int fd;
|
int fd;
|
||||||
|
uint32_t crc32_last;
|
||||||
|
|
||||||
void dump_block(void *buf);
|
int dump_block(void *buf);
|
||||||
};
|
};
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
if (argc < 5)
|
journal_dump_t self = { 0 };
|
||||||
|
int b = 1;
|
||||||
|
if (argc >= 2 && !strcmp(argv[1], "--all"))
|
||||||
{
|
{
|
||||||
printf("USAGE: %s <journal_file> <journal_block_size> <offset> <size>\n", argv[0]);
|
self.all = true;
|
||||||
|
b = 2;
|
||||||
|
}
|
||||||
|
if (argc < b+4)
|
||||||
|
{
|
||||||
|
printf("USAGE: %s [--all] <journal_file> <journal_block_size> <offset> <size>\n", argv[0]);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
journal_dump_t self;
|
self.journal_device = argv[b];
|
||||||
self.journal_device = argv[1];
|
self.journal_block = strtoul(argv[b+1], NULL, 10);
|
||||||
self.journal_block = strtoul(argv[2], NULL, 10);
|
self.journal_offset = strtoull(argv[b+2], NULL, 10);
|
||||||
self.journal_offset = strtoull(argv[3], NULL, 10);
|
self.journal_len = strtoull(argv[b+3], NULL, 10);
|
||||||
self.journal_len = strtoull(argv[4], NULL, 10);
|
|
||||||
if (self.journal_block < MEM_ALIGNMENT || (self.journal_block % MEM_ALIGNMENT) ||
|
if (self.journal_block < MEM_ALIGNMENT || (self.journal_block % MEM_ALIGNMENT) ||
|
||||||
self.journal_block > 128*1024)
|
self.journal_block > 128*1024)
|
||||||
{
|
{
|
||||||
@@ -57,30 +66,64 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
void *data = memalign(MEM_ALIGNMENT, self.journal_block);
|
void *data = memalign(MEM_ALIGNMENT, self.journal_block);
|
||||||
self.journal_pos = 0;
|
self.journal_pos = 0;
|
||||||
while (self.journal_pos < self.journal_len)
|
if (self.all)
|
||||||
|
{
|
||||||
|
while (self.journal_pos < self.journal_len)
|
||||||
|
{
|
||||||
|
int r = pread(self.fd, data, self.journal_block, self.journal_offset+self.journal_pos);
|
||||||
|
assert(r == self.journal_block);
|
||||||
|
uint64_t s;
|
||||||
|
for (s = 0; s < self.journal_block; s += 8)
|
||||||
|
{
|
||||||
|
if (*((uint64_t*)(data+s)) != 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (s == self.journal_block)
|
||||||
|
{
|
||||||
|
printf("offset %08lx: zeroes\n", self.journal_pos);
|
||||||
|
self.journal_pos += self.journal_block;
|
||||||
|
}
|
||||||
|
else if (((journal_entry*)data)->magic == JOURNAL_MAGIC)
|
||||||
|
{
|
||||||
|
printf("offset %08lx:\n", self.journal_pos);
|
||||||
|
self.dump_block(data);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("offset %08lx: no magic in the beginning, looks like random data (pattern=%lx)\n", self.journal_pos, *((uint64_t*)data));
|
||||||
|
self.journal_pos += self.journal_block;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
int r = pread(self.fd, data, self.journal_block, self.journal_offset+self.journal_pos);
|
int r = pread(self.fd, data, self.journal_block, self.journal_offset+self.journal_pos);
|
||||||
assert(r == self.journal_block);
|
assert(r == self.journal_block);
|
||||||
uint64_t s;
|
journal_entry *je = (journal_entry*)(data);
|
||||||
for (s = 0; s < self.journal_block; s += 8)
|
if (je->magic != JOURNAL_MAGIC || je->type != JE_START || je_crc32(je) != je->crc32)
|
||||||
{
|
{
|
||||||
if (*((uint64_t*)(data+s)) != 0)
|
printf("offset %08lx: journal superblock is invalid\n", self.journal_pos);
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (s == self.journal_block)
|
|
||||||
{
|
|
||||||
printf("offset %08lx: zeroes\n", self.journal_pos);
|
|
||||||
self.journal_pos += self.journal_block;
|
|
||||||
}
|
|
||||||
else if (((journal_entry*)data)->magic == JOURNAL_MAGIC)
|
|
||||||
{
|
|
||||||
printf("offset %08lx:\n", self.journal_pos);
|
|
||||||
self.dump_block(data);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
printf("offset %08lx: no magic in the beginning, looks like random data (pattern=%lx)\n", self.journal_pos, *((uint64_t*)data));
|
printf("offset %08lx:\n", self.journal_pos);
|
||||||
self.journal_pos += self.journal_block;
|
self.dump_block(data);
|
||||||
|
self.started = false;
|
||||||
|
self.journal_pos = je->start.journal_start;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
if (self.journal_pos >= self.journal_len)
|
||||||
|
self.journal_pos = self.journal_block;
|
||||||
|
r = pread(self.fd, data, self.journal_block, self.journal_offset+self.journal_pos);
|
||||||
|
assert(r == self.journal_block);
|
||||||
|
printf("offset %08lx:\n", self.journal_pos);
|
||||||
|
r = self.dump_block(data);
|
||||||
|
if (r <= 0)
|
||||||
|
{
|
||||||
|
printf("end of the journal\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(data);
|
free(data);
|
||||||
@@ -88,7 +131,7 @@ int main(int argc, char *argv[])
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void journal_dump_t::dump_block(void *buf)
|
int journal_dump_t::dump_block(void *buf)
|
||||||
{
|
{
|
||||||
uint32_t pos = 0;
|
uint32_t pos = 0;
|
||||||
journal_pos += journal_block;
|
journal_pos += journal_block;
|
||||||
@@ -97,12 +140,19 @@ void journal_dump_t::dump_block(void *buf)
|
|||||||
while (pos < journal_block)
|
while (pos < journal_block)
|
||||||
{
|
{
|
||||||
journal_entry *je = (journal_entry*)(buf + pos);
|
journal_entry *je = (journal_entry*)(buf + pos);
|
||||||
if (je->magic != JOURNAL_MAGIC || je->type < JE_MIN || je->type > JE_MAX)
|
if (je->magic != JOURNAL_MAGIC || je->type < JE_MIN || je->type > JE_MAX ||
|
||||||
|
!all && started && je->crc32_prev != crc32_last)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
const char *crc32_valid = je_crc32(je) == je->crc32 ? "(valid)" : "(invalid)";
|
bool crc32_valid = je_crc32(je) == je->crc32;
|
||||||
printf("entry % 3d: crc32=%08x %s prev=%08x ", entry, je->crc32, crc32_valid, je->crc32_prev);
|
if (!all && !crc32_valid)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
started = true;
|
||||||
|
crc32_last = je->crc32;
|
||||||
|
printf("entry % 3d: crc32=%08x %s prev=%08x ", entry, je->crc32, (crc32_valid ? "(valid)" : "(invalid)"), je->crc32_prev);
|
||||||
if (je->type == JE_START)
|
if (je->type == JE_START)
|
||||||
{
|
{
|
||||||
printf("je_start start=%08lx\n", je->start.journal_start);
|
printf("je_start start=%08lx\n", je->start.journal_start);
|
||||||
@@ -170,4 +220,5 @@ void journal_dump_t::dump_block(void *buf)
|
|||||||
{
|
{
|
||||||
journal_pos = journal_len;
|
journal_pos = journal_len;
|
||||||
}
|
}
|
||||||
|
return entry;
|
||||||
}
|
}
|
||||||
|
@@ -84,8 +84,12 @@ void epoll_manager_t::handle_epoll_events()
|
|||||||
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0);
|
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0);
|
||||||
for (int i = 0; i < nfds; i++)
|
for (int i = 0; i < nfds; i++)
|
||||||
{
|
{
|
||||||
auto & cb = epoll_handlers[events[i].data.fd];
|
auto cb_it = epoll_handlers.find(events[i].data.fd);
|
||||||
cb(events[i].data.fd, events[i].events);
|
if (cb_it != epoll_handlers.end())
|
||||||
|
{
|
||||||
|
auto & cb = cb_it->second;
|
||||||
|
cb(events[i].data.fd, events[i].events);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} while (nfds == MAX_EPOLL_EVENTS);
|
} while (nfds == MAX_EPOLL_EVENTS);
|
||||||
}
|
}
|
||||||
|
@@ -407,6 +407,7 @@ void etcd_state_client_t::parse_state(const std::string & key, const json11::Jso
|
|||||||
if (pc.pg_stripe_size < min_stripe_size)
|
if (pc.pg_stripe_size < min_stripe_size)
|
||||||
pc.pg_stripe_size = min_stripe_size;
|
pc.pg_stripe_size = min_stripe_size;
|
||||||
// Save
|
// Save
|
||||||
|
pc.real_pg_count = this->pool_config[pool_id].real_pg_count;
|
||||||
std::swap(pc.pg_config, this->pool_config[pool_id].pg_config);
|
std::swap(pc.pg_config, this->pool_config[pool_id].pg_config);
|
||||||
std::swap(this->pool_config[pool_id], pc);
|
std::swap(this->pool_config[pool_id], pc);
|
||||||
auto & parsed_cfg = this->pool_config[pool_id];
|
auto & parsed_cfg = this->pool_config[pool_id];
|
||||||
|
@@ -93,7 +93,7 @@ static struct fio_option options[] = {
|
|||||||
{
|
{
|
||||||
.name = "cluster_log_level",
|
.name = "cluster_log_level",
|
||||||
.lname = "cluster log level",
|
.lname = "cluster log level",
|
||||||
.type = FIO_OPT_BOOL,
|
.type = FIO_OPT_INT,
|
||||||
.off1 = offsetof(struct sec_options, cluster_log),
|
.off1 = offsetof(struct sec_options, cluster_log),
|
||||||
.help = "Set log level for the Vitastor client",
|
.help = "Set log level for the Vitastor client",
|
||||||
.def = "0",
|
.def = "0",
|
||||||
@@ -145,9 +145,7 @@ static void sec_cleanup(struct thread_data *td)
|
|||||||
delete bsd->cli;
|
delete bsd->cli;
|
||||||
delete bsd->epmgr;
|
delete bsd->epmgr;
|
||||||
delete bsd->ringloop;
|
delete bsd->ringloop;
|
||||||
bsd->cli = NULL;
|
delete bsd;
|
||||||
bsd->epmgr = NULL;
|
|
||||||
bsd->ringloop = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -140,6 +140,7 @@ static void sec_cleanup(struct thread_data *td)
|
|||||||
if (bsd)
|
if (bsd)
|
||||||
{
|
{
|
||||||
close(bsd->connect_fd);
|
close(bsd->connect_fd);
|
||||||
|
delete bsd;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,6 +313,7 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
io_u* io = it->second;
|
io_u* io = it->second;
|
||||||
|
bsd->queue.erase(it);
|
||||||
if (io->ddir == DDIR_READ)
|
if (io->ddir == DDIR_READ)
|
||||||
{
|
{
|
||||||
if (reply.hdr.retval != io->xfer_buflen)
|
if (reply.hdr.retval != io->xfer_buflen)
|
||||||
|
@@ -30,7 +30,7 @@ osd_messenger_t::~osd_messenger_t()
|
|||||||
{
|
{
|
||||||
while (clients.size() > 0)
|
while (clients.size() > 0)
|
||||||
{
|
{
|
||||||
stop_client(clients.begin()->first);
|
stop_client(clients.begin()->first, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +111,7 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
|
|||||||
timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
|
timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
|
||||||
{
|
{
|
||||||
osd_num_t peer_osd = clients.at(peer_fd)->osd_num;
|
osd_num_t peer_osd = clients.at(peer_fd)->osd_num;
|
||||||
stop_client(peer_fd);
|
stop_client(peer_fd, true);
|
||||||
on_connect_peer(peer_osd, -EIO);
|
on_connect_peer(peer_osd, -EIO);
|
||||||
return;
|
return;
|
||||||
});
|
});
|
||||||
@@ -149,7 +149,7 @@ void osd_messenger_t::handle_connect_epoll(int peer_fd)
|
|||||||
}
|
}
|
||||||
if (result != 0)
|
if (result != 0)
|
||||||
{
|
{
|
||||||
stop_client(peer_fd);
|
stop_client(peer_fd, true);
|
||||||
on_connect_peer(peer_osd, -result);
|
on_connect_peer(peer_osd, -result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -171,7 +171,7 @@ void osd_messenger_t::handle_peer_epoll(int peer_fd, int epoll_events)
|
|||||||
{
|
{
|
||||||
// Stop client
|
// Stop client
|
||||||
printf("[OSD %lu] client %d disconnected\n", this->osd_num, peer_fd);
|
printf("[OSD %lu] client %d disconnected\n", this->osd_num, peer_fd);
|
||||||
stop_client(peer_fd);
|
stop_client(peer_fd, true);
|
||||||
}
|
}
|
||||||
else if (epoll_events & EPOLLIN)
|
else if (epoll_events & EPOLLIN)
|
||||||
{
|
{
|
||||||
@@ -309,7 +309,7 @@ void osd_messenger_t::cancel_op(osd_op_t *op)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void osd_messenger_t::stop_client(int peer_fd)
|
void osd_messenger_t::stop_client(int peer_fd, bool force)
|
||||||
{
|
{
|
||||||
assert(peer_fd != 0);
|
assert(peer_fd != 0);
|
||||||
auto it = clients.find(peer_fd);
|
auto it = clients.find(peer_fd);
|
||||||
@@ -334,6 +334,10 @@ void osd_messenger_t::stop_client(int peer_fd)
|
|||||||
printf("[OSD %lu] Stopping client %d (regular client)\n", osd_num, peer_fd);
|
printf("[OSD %lu] Stopping client %d (regular client)\n", osd_num, peer_fd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else if (!force)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
cl->peer_state = PEER_STOPPED;
|
cl->peer_state = PEER_STOPPED;
|
||||||
clients.erase(it);
|
clients.erase(it);
|
||||||
tfd->set_fd_handler(peer_fd, false, NULL);
|
tfd->set_fd_handler(peer_fd, false, NULL);
|
||||||
@@ -348,7 +352,14 @@ void osd_messenger_t::stop_client(int peer_fd)
|
|||||||
}
|
}
|
||||||
if (cl->read_op)
|
if (cl->read_op)
|
||||||
{
|
{
|
||||||
delete cl->read_op;
|
if (cl->read_op->callback)
|
||||||
|
{
|
||||||
|
cancel_op(cl->read_op);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
delete cl->read_op;
|
||||||
|
}
|
||||||
cl->read_op = NULL;
|
cl->read_op = NULL;
|
||||||
}
|
}
|
||||||
for (auto rit = read_ready_clients.begin(); rit != read_ready_clients.end(); rit++)
|
for (auto rit = read_ready_clients.begin(); rit != read_ready_clients.end(); rit++)
|
||||||
|
@@ -275,7 +275,7 @@ struct osd_messenger_t
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
void connect_peer(uint64_t osd_num, json11::Json peer_state);
|
void connect_peer(uint64_t osd_num, json11::Json peer_state);
|
||||||
void stop_client(int peer_fd);
|
void stop_client(int peer_fd, bool force = false);
|
||||||
void outbox_push(osd_op_t *cur_op);
|
void outbox_push(osd_op_t *cur_op);
|
||||||
std::function<void(osd_op_t*)> exec_op;
|
std::function<void(osd_op_t*)> exec_op;
|
||||||
std::function<void(osd_num_t)> repeer_pgs;
|
std::function<void(osd_num_t)> repeer_pgs;
|
||||||
|
@@ -253,7 +253,10 @@ class Mon
|
|||||||
const res = await this.etcd_call('/kv/txn', { success: [
|
const res = await this.etcd_call('/kv/txn', { success: [
|
||||||
{ requestRange: { key: b64(this.etcd_prefix+'/config/global') } }
|
{ requestRange: { key: b64(this.etcd_prefix+'/config/global') } }
|
||||||
] }, this.etcd_start_timeout, -1);
|
] }, this.etcd_start_timeout, -1);
|
||||||
this.parse_kv(res.responses[0].response_range.kvs[0]);
|
if (res.responses[0].response_range.kvs)
|
||||||
|
{
|
||||||
|
this.parse_kv(res.responses[0].response_range.kvs[0]);
|
||||||
|
}
|
||||||
this.check_config();
|
this.check_config();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -4,6 +4,7 @@
|
|||||||
// Simple tool to calculate journal and metadata offsets for a single device
|
// Simple tool to calculate journal and metadata offsets for a single device
|
||||||
// Will be replaced by smarter tools in the future
|
// Will be replaced by smarter tools in the future
|
||||||
|
|
||||||
|
const fs = require('fs').promises;
|
||||||
const child_process = require('child_process');
|
const child_process = require('child_process');
|
||||||
|
|
||||||
async function run()
|
async function run()
|
||||||
@@ -15,6 +16,7 @@ async function run()
|
|||||||
device_block_size: 4096,
|
device_block_size: 4096,
|
||||||
journal_offset: 0,
|
journal_offset: 0,
|
||||||
device_size: 0,
|
device_size: 0,
|
||||||
|
format: 'text',
|
||||||
};
|
};
|
||||||
for (let i = 2; i < process.argv.length; i++)
|
for (let i = 2; i < process.argv.length; i++)
|
||||||
{
|
{
|
||||||
@@ -24,7 +26,22 @@ async function run()
|
|||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const device_size = Number(options.device_size || await system("blockdev --getsize64 "+options.device));
|
if (!options.device)
|
||||||
|
{
|
||||||
|
process.stderr.write('USAGE: nodejs '+process.argv[1]+' --device /dev/sdXXX\n');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
options.device_size = Number(options.device_size);
|
||||||
|
let device_size = options.device_size;
|
||||||
|
if (!device_size)
|
||||||
|
{
|
||||||
|
const st = await fs.stat(options.device);
|
||||||
|
options.device_block_size = st.blksize;
|
||||||
|
if (st.isBlockDevice())
|
||||||
|
device_size = Number(await system("/sbin/blockdev --getsize64 "+options.device))
|
||||||
|
else
|
||||||
|
device_size = st.size;
|
||||||
|
}
|
||||||
if (!device_size)
|
if (!device_size)
|
||||||
{
|
{
|
||||||
process.stderr.write('Failed to get device size\n');
|
process.stderr.write('Failed to get device size\n');
|
||||||
@@ -32,25 +49,45 @@ async function run()
|
|||||||
}
|
}
|
||||||
options.journal_offset = Math.ceil(options.journal_offset/options.device_block_size)*options.device_block_size;
|
options.journal_offset = Math.ceil(options.journal_offset/options.device_block_size)*options.device_block_size;
|
||||||
const meta_offset = options.journal_offset + Math.ceil(options.journal_size/options.device_block_size)*options.device_block_size;
|
const meta_offset = options.journal_offset + Math.ceil(options.journal_size/options.device_block_size)*options.device_block_size;
|
||||||
const entries_per_block = Math.floor(options.device_block_size / (24 + options.object_size/options.bitmap_granularity/8));
|
const entries_per_block = Math.floor(options.device_block_size / (24 + 2*options.object_size/options.bitmap_granularity/8));
|
||||||
const object_count = Math.floor((device_size-meta_offset)/options.object_size);
|
const object_count = Math.floor((device_size-meta_offset)/options.object_size);
|
||||||
const meta_size = Math.ceil(object_count / entries_per_block) * options.device_block_size;
|
const meta_size = Math.ceil(object_count / entries_per_block) * options.device_block_size;
|
||||||
const data_offset = meta_offset + meta_size;
|
const data_offset = meta_offset + meta_size;
|
||||||
const meta_size_fmt = (meta_size > 1024*1024*1024 ? Math.round(meta_size/1024/1024/1024*100)/100+" GB"
|
const meta_size_fmt = (meta_size > 1024*1024*1024 ? Math.round(meta_size/1024/1024/1024*100)/100+" GB"
|
||||||
: Math.round(meta_size/1024/1024*100)/100+" MB");
|
: Math.round(meta_size/1024/1024*100)/100+" MB");
|
||||||
process.stdout.write(
|
if (options.format == 'text' || options.format == 'options')
|
||||||
`Metadata size: ${meta_size_fmt}\n`+
|
{
|
||||||
`Options for the OSD:\n`+
|
if (options.format == 'text')
|
||||||
` --journal_offset ${options.journal_offset}\n`+
|
{
|
||||||
` --meta_offset ${meta_offset}\n`+
|
process.stderr.write(
|
||||||
` --data_offset ${data_offset}\n`+
|
`Metadata size: ${meta_size_fmt}\n`+
|
||||||
(options.device_size ? ` --data_size ${device_size-data_offset}\n` : '')
|
`Options for the OSD:\n`
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
process.stdout.write(
|
||||||
|
` --data_device ${options.device}\n`+
|
||||||
|
` --journal_offset ${options.journal_offset}\n`+
|
||||||
|
` --meta_offset ${meta_offset}\n`+
|
||||||
|
` --data_offset ${data_offset}\n`+
|
||||||
|
(options.device_size ? ` --data_size ${device_size-data_offset}\n` : '')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
else if (options.format == 'env')
|
||||||
|
{
|
||||||
|
process.stdout.write(
|
||||||
|
`journal_offset=${options.journal_offset}\n`+
|
||||||
|
`meta_offset=${meta_offset}\n`+
|
||||||
|
`data_offset=${data_offset}\n`+
|
||||||
|
`data_size=${device_size-data_offset}\n`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
process.stdout.write('Unknown format: '+options.format);
|
||||||
}
|
}
|
||||||
|
|
||||||
function system(cmd)
|
function system(cmd)
|
||||||
{
|
{
|
||||||
return new Promise((ok, no) => child_process.exec(cmd, { maxBuffer: 64*1024*1024 }, (err, stdout, stderr) => (err ? no(err) : ok(stdout))));
|
return new Promise((ok, no) => child_process.exec(cmd, { maxBuffer: 64*1024*1024 }, (err, stdout, stderr) => (err ? no(err.message) : ok(stdout))));
|
||||||
}
|
}
|
||||||
|
|
||||||
run().catch(console.error);
|
run().catch(err => { console.error(err); process.exit(1); });
|
||||||
|
@@ -9,6 +9,10 @@ void osd_messenger_t::read_requests()
|
|||||||
{
|
{
|
||||||
int peer_fd = read_ready_clients[i];
|
int peer_fd = read_ready_clients[i];
|
||||||
osd_client_t *cl = clients[peer_fd];
|
osd_client_t *cl = clients[peer_fd];
|
||||||
|
if (cl->read_msg.msg_iovlen)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (cl->read_remaining < receive_buffer_size)
|
if (cl->read_remaining < receive_buffer_size)
|
||||||
{
|
{
|
||||||
cl->read_iov.iov_base = cl->in_buf;
|
cl->read_iov.iov_base = cl->in_buf;
|
||||||
@@ -29,6 +33,7 @@ void osd_messenger_t::read_requests()
|
|||||||
io_uring_sqe* sqe = ringloop->get_sqe();
|
io_uring_sqe* sqe = ringloop->get_sqe();
|
||||||
if (!sqe)
|
if (!sqe)
|
||||||
{
|
{
|
||||||
|
cl->read_msg.msg_iovlen = 0;
|
||||||
read_ready_clients.erase(read_ready_clients.begin(), read_ready_clients.begin() + i);
|
read_ready_clients.erase(read_ready_clients.begin(), read_ready_clients.begin() + i);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -52,6 +57,7 @@ void osd_messenger_t::read_requests()
|
|||||||
bool osd_messenger_t::handle_read(int result, osd_client_t *cl)
|
bool osd_messenger_t::handle_read(int result, osd_client_t *cl)
|
||||||
{
|
{
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
cl->read_msg.msg_iovlen = 0;
|
||||||
cl->refs--;
|
cl->refs--;
|
||||||
if (cl->peer_state == PEER_STOPPED)
|
if (cl->peer_state == PEER_STOPPED)
|
||||||
{
|
{
|
||||||
@@ -160,8 +166,14 @@ bool osd_messenger_t::handle_finished_read(osd_client_t *cl)
|
|||||||
{
|
{
|
||||||
if (cl->read_op->req.hdr.magic == SECONDARY_OSD_REPLY_MAGIC)
|
if (cl->read_op->req.hdr.magic == SECONDARY_OSD_REPLY_MAGIC)
|
||||||
return handle_reply_hdr(cl);
|
return handle_reply_hdr(cl);
|
||||||
else
|
else if (cl->read_op->req.hdr.magic == SECONDARY_OSD_OP_MAGIC)
|
||||||
handle_op_hdr(cl);
|
handle_op_hdr(cl);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("Received garbage: magic=%lx id=%lu opcode=%lx from %d\n", cl->read_op->req.hdr.magic, cl->read_op->req.hdr.id, cl->read_op->req.hdr.opcode, cl->peer_fd);
|
||||||
|
stop_client(cl->peer_fd);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (cl->read_state == CL_READ_DATA)
|
else if (cl->read_state == CL_READ_DATA)
|
||||||
{
|
{
|
||||||
|
@@ -46,7 +46,8 @@ void osd_messenger_t::outbox_push(osd_op_t *cur_op)
|
|||||||
to_send_list.push_back((iovec){ .iov_base = cur_op->req.buf, .iov_len = OSD_PACKET_SIZE });
|
to_send_list.push_back((iovec){ .iov_base = cur_op->req.buf, .iov_len = OSD_PACKET_SIZE });
|
||||||
cl->sent_ops[cur_op->req.hdr.id] = cur_op;
|
cl->sent_ops[cur_op->req.hdr.id] = cur_op;
|
||||||
}
|
}
|
||||||
// Pre-defined send_lists
|
to_outbox.push_back(NULL);
|
||||||
|
// Operation data
|
||||||
if ((cur_op->op_type == OSD_OP_IN
|
if ((cur_op->op_type == OSD_OP_IN
|
||||||
? (cur_op->req.hdr.opcode == OSD_OP_READ ||
|
? (cur_op->req.hdr.opcode == OSD_OP_READ ||
|
||||||
cur_op->req.hdr.opcode == OSD_OP_SEC_READ ||
|
cur_op->req.hdr.opcode == OSD_OP_SEC_READ ||
|
||||||
@@ -58,17 +59,17 @@ void osd_messenger_t::outbox_push(osd_op_t *cur_op)
|
|||||||
cur_op->req.hdr.opcode == OSD_OP_SEC_STABILIZE ||
|
cur_op->req.hdr.opcode == OSD_OP_SEC_STABILIZE ||
|
||||||
cur_op->req.hdr.opcode == OSD_OP_SEC_ROLLBACK)) && cur_op->iov.count > 0)
|
cur_op->req.hdr.opcode == OSD_OP_SEC_ROLLBACK)) && cur_op->iov.count > 0)
|
||||||
{
|
{
|
||||||
to_outbox.push_back(NULL);
|
|
||||||
for (int i = 0; i < cur_op->iov.count; i++)
|
for (int i = 0; i < cur_op->iov.count; i++)
|
||||||
{
|
{
|
||||||
assert(cur_op->iov.buf[i].iov_base);
|
assert(cur_op->iov.buf[i].iov_base);
|
||||||
to_send_list.push_back(cur_op->iov.buf[i]);
|
to_send_list.push_back(cur_op->iov.buf[i]);
|
||||||
to_outbox.push_back(i == cur_op->iov.count-1 ? cur_op : NULL);
|
to_outbox.push_back(NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
if (cur_op->op_type == OSD_OP_IN)
|
||||||
{
|
{
|
||||||
to_outbox.push_back(cur_op);
|
// To free it later
|
||||||
|
to_outbox[to_outbox.size()-1] = cur_op;
|
||||||
}
|
}
|
||||||
if (!ringloop)
|
if (!ringloop)
|
||||||
{
|
{
|
||||||
@@ -92,6 +93,10 @@ void osd_messenger_t::outbox_push(osd_op_t *cur_op)
|
|||||||
void osd_messenger_t::measure_exec(osd_op_t *cur_op)
|
void osd_messenger_t::measure_exec(osd_op_t *cur_op)
|
||||||
{
|
{
|
||||||
// Measure execution latency
|
// Measure execution latency
|
||||||
|
if (cur_op->req.hdr.opcode > OSD_OP_MAX)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
timespec tv_end;
|
timespec tv_end;
|
||||||
clock_gettime(CLOCK_REALTIME, &tv_end);
|
clock_gettime(CLOCK_REALTIME, &tv_end);
|
||||||
stats.op_stat_count[cur_op->req.hdr.opcode]++;
|
stats.op_stat_count[cur_op->req.hdr.opcode]++;
|
||||||
@@ -198,11 +203,8 @@ void osd_messenger_t::handle_send(int result, osd_client_t *cl)
|
|||||||
{
|
{
|
||||||
if (cl->outbox[done])
|
if (cl->outbox[done])
|
||||||
{
|
{
|
||||||
// Operation fully sent
|
// Reply fully sent
|
||||||
if (cl->outbox[done]->op_type == OSD_OP_IN)
|
delete cl->outbox[done];
|
||||||
{
|
|
||||||
delete cl->outbox[done];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
result -= iov.iov_len;
|
result -= iov.iov_len;
|
||||||
done++;
|
done++;
|
||||||
|
@@ -384,6 +384,7 @@ void osd_t::create_osd_state()
|
|||||||
{
|
{
|
||||||
st_cli.load_pgs();
|
st_cli.load_pgs();
|
||||||
}
|
}
|
||||||
|
report_statistics();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -494,7 +495,11 @@ void osd_t::apply_pg_count()
|
|||||||
}
|
}
|
||||||
if (still_active > 0)
|
if (still_active > 0)
|
||||||
{
|
{
|
||||||
printf("[OSD %lu] PG count change detected, but %d PG(s) are still active. This is not allowed. Exiting\n", this->osd_num, still_active);
|
printf(
|
||||||
|
"[OSD %lu] PG count change detected for pool %u (new is %lu, old is %u),"
|
||||||
|
" but %u PG(s) are still active. This is not allowed. Exiting\n",
|
||||||
|
this->osd_num, pool_item.first, pool_item.second.real_pg_count, pg_counts[pool_item.first], still_active
|
||||||
|
);
|
||||||
force_stop(1);
|
force_stop(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@@ -94,7 +94,7 @@ struct pg_t
|
|||||||
std::vector<osd_num_t> cur_set;
|
std::vector<osd_num_t> cur_set;
|
||||||
// same thing in state_dict-like format
|
// same thing in state_dict-like format
|
||||||
pg_osd_set_t cur_loc_set;
|
pg_osd_set_t cur_loc_set;
|
||||||
// moved object map. by default, each object is considered to reside on the cur_set.
|
// moved object map. by default, each object is considered to reside on cur_set.
|
||||||
// this map stores all objects that differ.
|
// this map stores all objects that differ.
|
||||||
// it may consume up to ~ (raw storage / object size) * 24 bytes in the worst case scenario
|
// it may consume up to ~ (raw storage / object size) * 24 bytes in the worst case scenario
|
||||||
// which is up to ~192 MB per 1 TB in the worst case scenario
|
// which is up to ~192 MB per 1 TB in the worst case scenario
|
||||||
|
@@ -489,7 +489,11 @@ resume_7:
|
|||||||
}
|
}
|
||||||
// Remember PG as dirty to drop the connection when PG goes offline
|
// Remember PG as dirty to drop the connection when PG goes offline
|
||||||
// (this is required because of the "lazy sync")
|
// (this is required because of the "lazy sync")
|
||||||
c_cli.clients[cur_op->peer_fd]->dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
auto cl_it = c_cli.clients.find(cur_op->peer_fd);
|
||||||
|
if (cl_it != c_cli.clients.end())
|
||||||
|
{
|
||||||
|
cl_it->second->dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
||||||
|
}
|
||||||
dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@@ -295,7 +295,7 @@ void osd_t::handle_primary_subop(osd_op_t *subop, osd_op_t *cur_op)
|
|||||||
uint64_t version = subop->reply.sec_rw.version;
|
uint64_t version = subop->reply.sec_rw.version;
|
||||||
#ifdef OSD_DEBUG
|
#ifdef OSD_DEBUG
|
||||||
uint64_t peer_osd = c_cli.clients.find(subop->peer_fd) != c_cli.clients.end()
|
uint64_t peer_osd = c_cli.clients.find(subop->peer_fd) != c_cli.clients.end()
|
||||||
? c_cli.clients[subop->peer_fd].osd_num : osd_num;
|
? c_cli.clients[subop->peer_fd]->osd_num : osd_num;
|
||||||
printf("subop %lu from osd %lu: version = %lu\n", opcode, peer_osd, version);
|
printf("subop %lu from osd %lu: version = %lu\n", opcode, peer_osd, version);
|
||||||
#endif
|
#endif
|
||||||
if (op_data->fact_ver != 0 && op_data->fact_ver != version)
|
if (op_data->fact_ver != 0 && op_data->fact_ver != version)
|
||||||
|
13
osd_rmw.cpp
13
osd_rmw.cpp
@@ -1,6 +1,7 @@
|
|||||||
// Copyright (c) Vitaliy Filippov, 2019+
|
// Copyright (c) Vitaliy Filippov, 2019+
|
||||||
// License: VNPL-1.0 (see README.md for details)
|
// License: VNPL-1.0 (see README.md for details)
|
||||||
|
|
||||||
|
#include <stdexcept>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <jerasure/reed_sol.h>
|
#include <jerasure/reed_sol.h>
|
||||||
@@ -10,6 +11,8 @@
|
|||||||
#include "osd_rmw.h"
|
#include "osd_rmw.h"
|
||||||
#include "malloc_or_die.h"
|
#include "malloc_or_die.h"
|
||||||
|
|
||||||
|
#define OSD_JERASURE_W 32
|
||||||
|
|
||||||
static inline void extend_read(uint32_t start, uint32_t end, osd_rmw_stripe_t & stripe)
|
static inline void extend_read(uint32_t start, uint32_t end, osd_rmw_stripe_t & stripe)
|
||||||
{
|
{
|
||||||
if (stripe.read_end == 0)
|
if (stripe.read_end == 0)
|
||||||
@@ -157,7 +160,7 @@ void use_jerasure(int pg_size, int pg_minsize, bool use)
|
|||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int *matrix = reed_sol_vandermonde_coding_matrix(pg_minsize, pg_size-pg_minsize, 32);
|
int *matrix = reed_sol_vandermonde_coding_matrix(pg_minsize, pg_size-pg_minsize, OSD_JERASURE_W);
|
||||||
matrices[key] = (reed_sol_matrix_t){
|
matrices[key] = (reed_sol_matrix_t){
|
||||||
.refs = 0,
|
.refs = 0,
|
||||||
.data = matrix,
|
.data = matrix,
|
||||||
@@ -213,8 +216,8 @@ int* get_jerasure_decoding_matrix(osd_rmw_stripe_t *stripes, int pg_size, int pg
|
|||||||
int *decoding_matrix = dm_ids + pg_minsize;
|
int *decoding_matrix = dm_ids + pg_minsize;
|
||||||
if (!dm_ids)
|
if (!dm_ids)
|
||||||
throw std::bad_alloc();
|
throw std::bad_alloc();
|
||||||
// we always use row_k_ones=1 and w=32
|
// we always use row_k_ones=1 and w=8 (OSD_JERASURE_W)
|
||||||
if (jerasure_make_decoding_matrix(pg_minsize, pg_size-pg_minsize, 32, matrix->data, erased, decoding_matrix, dm_ids) < 0)
|
if (jerasure_make_decoding_matrix(pg_minsize, pg_size-pg_minsize, OSD_JERASURE_W, matrix->data, erased, decoding_matrix, dm_ids) < 0)
|
||||||
{
|
{
|
||||||
free(dm_ids);
|
free(dm_ids);
|
||||||
throw std::runtime_error("jerasure_make_decoding_matrix() failed");
|
throw std::runtime_error("jerasure_make_decoding_matrix() failed");
|
||||||
@@ -251,7 +254,7 @@ void reconstruct_stripes_jerasure(osd_rmw_stripe_t *stripes, int pg_size, int pg
|
|||||||
}
|
}
|
||||||
data_ptrs[role] = (char*)stripes[role].read_buf;
|
data_ptrs[role] = (char*)stripes[role].read_buf;
|
||||||
jerasure_matrix_dotprod(
|
jerasure_matrix_dotprod(
|
||||||
pg_minsize, 32, decoding_matrix+(role*pg_minsize), dm_ids, role,
|
pg_minsize, OSD_JERASURE_W, decoding_matrix+(role*pg_minsize), dm_ids, role,
|
||||||
data_ptrs, data_ptrs+pg_minsize, stripes[role].read_end - stripes[role].read_start
|
data_ptrs, data_ptrs+pg_minsize, stripes[role].read_end - stripes[role].read_start
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -693,7 +696,7 @@ void calc_rmw_parity_jerasure(osd_rmw_stripe_t *stripes, int pg_size, int pg_min
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
jerasure_matrix_encode(
|
jerasure_matrix_encode(
|
||||||
pg_minsize, pg_size-pg_minsize, 32, matrix->data,
|
pg_minsize, pg_size-pg_minsize, OSD_JERASURE_W, matrix->data,
|
||||||
(char**)data_ptrs, (char**)data_ptrs+pg_minsize, next_end-pos
|
(char**)data_ptrs, (char**)data_ptrs+pg_minsize, next_end-pos
|
||||||
);
|
);
|
||||||
pos = next_end;
|
pos = next_end;
|
||||||
|
@@ -77,7 +77,10 @@ void ring_loop_t::loop()
|
|||||||
dl.callback(&dl);
|
dl.callback(&dl);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
{
|
||||||
|
printf("Warning: empty callback in SQE\n");
|
||||||
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
||||||
|
}
|
||||||
io_uring_cqe_seen(&ring, cqe);
|
io_uring_cqe_seen(&ring, cqe);
|
||||||
}
|
}
|
||||||
while (get_sqe_queue.size() > 0)
|
while (get_sqe_queue.size() > 0)
|
||||||
|
@@ -142,7 +142,10 @@ public:
|
|||||||
return NULL;
|
return NULL;
|
||||||
struct io_uring_sqe* sqe = io_uring_get_sqe(&ring);
|
struct io_uring_sqe* sqe = io_uring_get_sqe(&ring);
|
||||||
if (sqe)
|
if (sqe)
|
||||||
|
{
|
||||||
|
*sqe = { 0 };
|
||||||
io_uring_sqe_set_data(sqe, ring_datas + free_ring_data[--free_ring_data_ptr]);
|
io_uring_sqe_set_data(sqe, ring_datas + free_ring_data[--free_ring_data_ptr]);
|
||||||
|
}
|
||||||
return sqe;
|
return sqe;
|
||||||
}
|
}
|
||||||
inline int wait_sqe(std::function<void()> cb)
|
inline int wait_sqe(std::function<void()> cb)
|
||||||
|
203
rm_inode.cpp
203
rm_inode.cpp
@@ -6,26 +6,38 @@
|
|||||||
* May be included into a bigger "command-line management interface" in the future
|
* May be included into a bigger "command-line management interface" in the future
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
#include "epoll_manager.h"
|
#include "epoll_manager.h"
|
||||||
#include "cluster_client.h"
|
#include "cluster_client.h"
|
||||||
#include "pg_states.h"
|
#include "pg_states.h"
|
||||||
|
|
||||||
#define RM_NO_LIST 1
|
#define RM_LISTING 1
|
||||||
#define RM_LIST_SENT 2
|
#define RM_REMOVING 2
|
||||||
#define RM_REMOVING 3
|
#define RM_END 3
|
||||||
#define RM_END 4
|
|
||||||
|
|
||||||
const char *exe_name = NULL;
|
const char *exe_name = NULL;
|
||||||
|
|
||||||
|
struct rm_pg_t;
|
||||||
|
|
||||||
struct rm_pg_osd_t
|
struct rm_pg_osd_t
|
||||||
{
|
{
|
||||||
pg_num_t pg_num;
|
rm_pg_t *pg = NULL;
|
||||||
osd_num_t osd_num;
|
osd_num_t osd_num;
|
||||||
|
bool sent = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rm_pg_t
|
||||||
|
{
|
||||||
|
pg_num_t pg_num;
|
||||||
|
osd_num_t rm_osd_num;
|
||||||
|
std::vector<rm_pg_osd_t> list_osds;
|
||||||
int state = 0;
|
int state = 0;
|
||||||
obj_ver_id *obj_list = NULL;
|
int to_list;
|
||||||
uint64_t obj_count = 0, obj_pos = 0, obj_done = 0, obj_prev_done = 0;
|
std::set<object_id> objects;
|
||||||
|
std::set<object_id>::iterator obj_pos;
|
||||||
|
uint64_t obj_count = 0, obj_done = 0, obj_prev_done = 0;
|
||||||
int in_flight = 0;
|
int in_flight = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -41,11 +53,12 @@ protected:
|
|||||||
cluster_client_t *cli = NULL;
|
cluster_client_t *cli = NULL;
|
||||||
ring_consumer_t consumer;
|
ring_consumer_t consumer;
|
||||||
|
|
||||||
std::vector<rm_pg_osd_t*> lists;
|
std::vector<rm_pg_t*> lists;
|
||||||
uint64_t total_count = 0, total_done = 0, total_prev_pct = 0;
|
uint64_t total_count = 0, total_done = 0, total_prev_pct = 0;
|
||||||
uint64_t pgs_to_list = 0;
|
uint64_t pgs_to_list = 0;
|
||||||
bool started = false;
|
bool started = false;
|
||||||
bool progress = true;
|
bool progress = true;
|
||||||
|
bool list_first = false;
|
||||||
int log_level = 0;
|
int log_level = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -62,7 +75,7 @@ public:
|
|||||||
else if (args[i][0] == '-' && args[i][1] == '-')
|
else if (args[i][0] == '-' && args[i][1] == '-')
|
||||||
{
|
{
|
||||||
const char *opt = args[i]+2;
|
const char *opt = args[i]+2;
|
||||||
cfg[opt] = !strcmp(opt, "json") || i == narg-1 ? "1" : args[++i];
|
cfg[opt] = !strcmp(opt, "json") || !strcmp(opt, "wait-list") || i == narg-1 ? "1" : args[++i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cfg;
|
return cfg;
|
||||||
@@ -74,7 +87,7 @@ public:
|
|||||||
"Vitastor inode removal tool\n"
|
"Vitastor inode removal tool\n"
|
||||||
"(c) Vitaliy Filippov, 2020 (VNPL-1.0)\n\n"
|
"(c) Vitaliy Filippov, 2020 (VNPL-1.0)\n\n"
|
||||||
"USAGE:\n"
|
"USAGE:\n"
|
||||||
" %s --etcd_address <etcd_address> --pool <pool> --inode <inode>\n",
|
" %s --etcd_address <etcd_address> --pool <pool> --inode <inode> [--wait-list]\n",
|
||||||
exe_name
|
exe_name
|
||||||
);
|
);
|
||||||
exit(0);
|
exit(0);
|
||||||
@@ -105,6 +118,7 @@ public:
|
|||||||
parallel_osds = 4;
|
parallel_osds = 4;
|
||||||
log_level = cfg["log_level"].int64_value();
|
log_level = cfg["log_level"].int64_value();
|
||||||
progress = cfg["progress"].uint64_value() ? true : false;
|
progress = cfg["progress"].uint64_value() ? true : false;
|
||||||
|
list_first = cfg["wait-list"].uint64_value() ? true : false;
|
||||||
// Create client
|
// Create client
|
||||||
ringloop = new ring_loop_t(512);
|
ringloop = new ring_loop_t(512);
|
||||||
epmgr = new epoll_manager_t(ringloop);
|
epmgr = new epoll_manager_t(ringloop);
|
||||||
@@ -137,21 +151,57 @@ public:
|
|||||||
for (auto & pg_item: pool_cfg.pg_config)
|
for (auto & pg_item: pool_cfg.pg_config)
|
||||||
{
|
{
|
||||||
auto & pg = pg_item.second;
|
auto & pg = pg_item.second;
|
||||||
if (pg.pause || !pg.cur_primary || pg.cur_state != PG_ACTIVE)
|
if (pg.pause || !pg.cur_primary || !(pg.cur_state & PG_ACTIVE))
|
||||||
{
|
{
|
||||||
// FIXME Support deletion in non-clean active PGs by introducing a "primary-list" command
|
fprintf(stderr, "PG %u is inactive, skipping\n", pg_item.first);
|
||||||
fprintf(stderr, "PG %u is not active+clean, skipping\n", pg_item.first);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
rm_pg_osd_t *r = new rm_pg_osd_t();
|
rm_pg_t *r = new rm_pg_t();
|
||||||
r->pg_num = pg_item.first;
|
r->pg_num = pg_item.first;
|
||||||
r->osd_num = pg.cur_primary;
|
r->rm_osd_num = pg.cur_primary;
|
||||||
r->state = RM_NO_LIST;
|
r->state = RM_LISTING;
|
||||||
|
if (pg.cur_state != PG_ACTIVE)
|
||||||
|
{
|
||||||
|
std::set<osd_num_t> all_peers;
|
||||||
|
for (osd_num_t pg_osd: pg.target_set)
|
||||||
|
{
|
||||||
|
if (pg_osd != 0)
|
||||||
|
{
|
||||||
|
all_peers.insert(pg_osd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (osd_num_t pg_osd: pg.all_peers)
|
||||||
|
{
|
||||||
|
if (pg_osd != 0)
|
||||||
|
{
|
||||||
|
all_peers.insert(pg_osd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto & hist_item: pg.target_history)
|
||||||
|
{
|
||||||
|
for (auto pg_osd: hist_item)
|
||||||
|
{
|
||||||
|
if (pg_osd != 0)
|
||||||
|
{
|
||||||
|
all_peers.insert(pg_osd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (osd_num_t peer_osd: all_peers)
|
||||||
|
{
|
||||||
|
r->list_osds.push_back((rm_pg_osd_t){ .pg = r, .osd_num = peer_osd, .sent = false });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
r->list_osds.push_back((rm_pg_osd_t){ .pg = r, .osd_num = pg.cur_primary, .sent = false });
|
||||||
|
}
|
||||||
|
r->to_list = r->list_osds.size();
|
||||||
lists.push_back(r);
|
lists.push_back(r);
|
||||||
}
|
}
|
||||||
std::sort(lists.begin(), lists.end(), [](rm_pg_osd_t *a, rm_pg_osd_t *b)
|
std::sort(lists.begin(), lists.end(), [](rm_pg_t *a, rm_pg_t *b)
|
||||||
{
|
{
|
||||||
return a->osd_num < b->osd_num ? true : false;
|
return a->rm_osd_num < b->rm_osd_num ? true : false;
|
||||||
});
|
});
|
||||||
pgs_to_list = lists.size();
|
pgs_to_list = lists.size();
|
||||||
started = true;
|
started = true;
|
||||||
@@ -160,6 +210,10 @@ public:
|
|||||||
|
|
||||||
void send_list(rm_pg_osd_t *cur_list)
|
void send_list(rm_pg_osd_t *cur_list)
|
||||||
{
|
{
|
||||||
|
if (cur_list->sent)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (cli->msgr.osd_peer_fds.find(cur_list->osd_num) ==
|
if (cli->msgr.osd_peer_fds.find(cur_list->osd_num) ==
|
||||||
cli->msgr.osd_peer_fds.end())
|
cli->msgr.osd_peer_fds.end())
|
||||||
{
|
{
|
||||||
@@ -177,7 +231,7 @@ public:
|
|||||||
.id = cli->msgr.next_subop_id++,
|
.id = cli->msgr.next_subop_id++,
|
||||||
.opcode = OSD_OP_SEC_LIST,
|
.opcode = OSD_OP_SEC_LIST,
|
||||||
},
|
},
|
||||||
.list_pg = cur_list->pg_num,
|
.list_pg = cur_list->pg->pg_num,
|
||||||
.pg_count = (pg_num_t)cli->st_cli.pool_config[pool_id].real_pg_count,
|
.pg_count = (pg_num_t)cli->st_cli.pool_config[pool_id].real_pg_count,
|
||||||
.pg_stripe_size = cli->st_cli.pool_config[pool_id].pg_stripe_size,
|
.pg_stripe_size = cli->st_cli.pool_config[pool_id].pg_stripe_size,
|
||||||
.min_inode = inode,
|
.min_inode = inode,
|
||||||
@@ -186,53 +240,67 @@ public:
|
|||||||
};
|
};
|
||||||
op->callback = [this, cur_list](osd_op_t *op)
|
op->callback = [this, cur_list](osd_op_t *op)
|
||||||
{
|
{
|
||||||
pgs_to_list--;
|
cur_list->pg->to_list--;
|
||||||
if (op->reply.hdr.retval < 0)
|
if (op->reply.hdr.retval < 0)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Failed to get object list from OSD %lu (retval=%ld), skipping the PG\n",
|
fprintf(stderr, "Failed to get PG %u/%u object list from OSD %lu (retval=%ld), skipping\n",
|
||||||
cur_list->osd_num, op->reply.hdr.retval);
|
pool_id, cur_list->pg->pg_num, cur_list->osd_num, op->reply.hdr.retval);
|
||||||
cli->msgr.stop_client(cur_list->osd_num);
|
|
||||||
delete op;
|
|
||||||
cur_list->state = RM_END;
|
|
||||||
continue_delete();
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
if (log_level > 0)
|
else
|
||||||
{
|
{
|
||||||
printf(
|
if (op->reply.sec_list.stable_count < op->reply.hdr.retval)
|
||||||
"[PG %u/%u] Got inode object list from OSD %lu: %ld object versions\n",
|
{
|
||||||
pool_id, cur_list->pg_num, cur_list->osd_num, op->reply.hdr.retval
|
// Unstable objects, if present, mean that someone still writes into the inode. Warn the user about it.
|
||||||
);
|
printf(
|
||||||
|
"[PG %u/%u] Inode still has %lu unstable object versions - is it still open? Not a good idea to delete it.\n",
|
||||||
|
pool_id, cur_list->pg->pg_num, op->reply.hdr.retval - op->reply.sec_list.stable_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (log_level > 0)
|
||||||
|
{
|
||||||
|
printf(
|
||||||
|
"[PG %u/%u] Got inode object list from OSD %lu: %ld object versions\n",
|
||||||
|
pool_id, cur_list->pg->pg_num, cur_list->osd_num, op->reply.hdr.retval
|
||||||
|
);
|
||||||
|
}
|
||||||
|
for (uint64_t i = 0; i < op->reply.hdr.retval; i++)
|
||||||
|
{
|
||||||
|
object_id oid = ((obj_ver_id*)op->buf)[i].oid;
|
||||||
|
oid.stripe = oid.stripe & ~STRIPE_MASK;
|
||||||
|
cur_list->pg->objects.insert(oid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cur_list->obj_list = (obj_ver_id*)op->buf;
|
|
||||||
cur_list->obj_count = (uint64_t)op->reply.hdr.retval;
|
|
||||||
cur_list->obj_done = cur_list->obj_prev_done = cur_list->obj_pos = 0;
|
|
||||||
total_count += cur_list->obj_count;
|
|
||||||
total_prev_pct = 0;
|
|
||||||
// set op->buf to NULL so it doesn't get freed
|
|
||||||
op->buf = NULL;
|
|
||||||
delete op;
|
delete op;
|
||||||
cur_list->state = RM_REMOVING;
|
if (cur_list->pg->to_list <= 0)
|
||||||
|
{
|
||||||
|
cur_list->pg->obj_done = cur_list->pg->obj_prev_done = 0;
|
||||||
|
cur_list->pg->obj_pos = cur_list->pg->objects.begin();
|
||||||
|
cur_list->pg->obj_count = cur_list->pg->objects.size();
|
||||||
|
total_count += cur_list->pg->obj_count;
|
||||||
|
total_prev_pct = 0;
|
||||||
|
cur_list->pg->state = RM_REMOVING;
|
||||||
|
pgs_to_list--;
|
||||||
|
}
|
||||||
continue_delete();
|
continue_delete();
|
||||||
};
|
};
|
||||||
cur_list->state = RM_LIST_SENT;
|
|
||||||
cli->msgr.outbox_push(op);
|
cli->msgr.outbox_push(op);
|
||||||
|
cur_list->sent = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_ops(rm_pg_osd_t *cur_list)
|
void send_ops(rm_pg_t *cur_list)
|
||||||
{
|
{
|
||||||
if (cli->msgr.osd_peer_fds.find(cur_list->osd_num) ==
|
if (cli->msgr.osd_peer_fds.find(cur_list->rm_osd_num) ==
|
||||||
cli->msgr.osd_peer_fds.end())
|
cli->msgr.osd_peer_fds.end())
|
||||||
{
|
{
|
||||||
// Initiate connection
|
// Initiate connection
|
||||||
cli->msgr.connect_peer(cur_list->osd_num, cli->st_cli.peer_states[cur_list->osd_num]);
|
cli->msgr.connect_peer(cur_list->rm_osd_num, cli->st_cli.peer_states[cur_list->rm_osd_num]);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while (cur_list->in_flight < iodepth && cur_list->obj_pos < cur_list->obj_count)
|
while (cur_list->in_flight < iodepth && cur_list->obj_pos != cur_list->objects.end())
|
||||||
{
|
{
|
||||||
osd_op_t *op = new osd_op_t();
|
osd_op_t *op = new osd_op_t();
|
||||||
op->op_type = OSD_OP_OUT;
|
op->op_type = OSD_OP_OUT;
|
||||||
op->peer_fd = cli->msgr.osd_peer_fds[cur_list->osd_num];
|
op->peer_fd = cli->msgr.osd_peer_fds[cur_list->rm_osd_num];
|
||||||
op->req = (osd_any_op_t){
|
op->req = (osd_any_op_t){
|
||||||
.rw = {
|
.rw = {
|
||||||
.header = {
|
.header = {
|
||||||
@@ -240,8 +308,8 @@ public:
|
|||||||
.id = cli->msgr.next_subop_id++,
|
.id = cli->msgr.next_subop_id++,
|
||||||
.opcode = OSD_OP_DELETE,
|
.opcode = OSD_OP_DELETE,
|
||||||
},
|
},
|
||||||
.inode = cur_list->obj_list[cur_list->obj_pos].oid.inode,
|
.inode = cur_list->obj_pos->inode,
|
||||||
.offset = (cur_list->obj_list[cur_list->obj_pos].oid.stripe & ~STRIPE_MASK),
|
.offset = (cur_list->obj_pos->stripe & ~STRIPE_MASK),
|
||||||
.len = 0,
|
.len = 0,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -251,7 +319,7 @@ public:
|
|||||||
if (op->reply.hdr.retval < 0)
|
if (op->reply.hdr.retval < 0)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Failed to remove object from PG %u (OSD %lu) (retval=%ld)\n",
|
fprintf(stderr, "Failed to remove object from PG %u (OSD %lu) (retval=%ld)\n",
|
||||||
cur_list->pg_num, cur_list->osd_num, op->reply.hdr.retval);
|
cur_list->pg_num, cur_list->rm_osd_num, op->reply.hdr.retval);
|
||||||
}
|
}
|
||||||
delete op;
|
delete op;
|
||||||
cur_list->obj_done++;
|
cur_list->obj_done++;
|
||||||
@@ -262,12 +330,10 @@ public:
|
|||||||
cur_list->obj_pos++;
|
cur_list->obj_pos++;
|
||||||
cur_list->in_flight++;
|
cur_list->in_flight++;
|
||||||
}
|
}
|
||||||
if (!cur_list->in_flight && cur_list->obj_pos >= cur_list->obj_count)
|
if (!cur_list->in_flight && cur_list->obj_pos == cur_list->objects.end())
|
||||||
{
|
{
|
||||||
free(cur_list->obj_list);
|
|
||||||
cur_list->obj_list = NULL;
|
|
||||||
cur_list->obj_count = 0;
|
cur_list->obj_count = 0;
|
||||||
cur_list->obj_done = cur_list->obj_prev_done = cur_list->obj_pos = 0;
|
cur_list->obj_done = cur_list->obj_prev_done = 0;
|
||||||
cur_list->state = RM_END;
|
cur_list->state = RM_END;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -276,6 +342,22 @@ public:
|
|||||||
{
|
{
|
||||||
int par_osd = 0;
|
int par_osd = 0;
|
||||||
osd_num_t max_seen_osd = 0;
|
osd_num_t max_seen_osd = 0;
|
||||||
|
bool no_del = false;
|
||||||
|
if (list_first)
|
||||||
|
{
|
||||||
|
int i, n = 0;
|
||||||
|
for (i = 0; i < lists.size(); i++)
|
||||||
|
{
|
||||||
|
if (lists[i]->state == RM_LISTING)
|
||||||
|
{
|
||||||
|
n++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (n > 0)
|
||||||
|
{
|
||||||
|
no_del = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
for (int i = 0; i < lists.size(); i++)
|
for (int i = 0; i < lists.size(); i++)
|
||||||
{
|
{
|
||||||
if (lists[i]->state == RM_END)
|
if (lists[i]->state == RM_END)
|
||||||
@@ -284,18 +366,25 @@ public:
|
|||||||
lists.erase(lists.begin()+i, lists.begin()+i+1);
|
lists.erase(lists.begin()+i, lists.begin()+i+1);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
else if (lists[i]->osd_num > max_seen_osd)
|
else if (lists[i]->rm_osd_num > max_seen_osd)
|
||||||
{
|
{
|
||||||
if (lists[i]->state == RM_NO_LIST)
|
if (lists[i]->state == RM_LISTING)
|
||||||
{
|
{
|
||||||
send_list(lists[i]);
|
for (int j = 0; j < lists[i]->list_osds.size(); j++)
|
||||||
|
{
|
||||||
|
send_list(&lists[i]->list_osds[j]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (lists[i]->state == RM_REMOVING)
|
else if (lists[i]->state == RM_REMOVING)
|
||||||
{
|
{
|
||||||
|
if (no_del)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
send_ops(lists[i]);
|
send_ops(lists[i]);
|
||||||
}
|
}
|
||||||
par_osd++;
|
par_osd++;
|
||||||
max_seen_osd = lists[i]->osd_num;
|
max_seen_osd = lists[i]->rm_osd_num;
|
||||||
if (par_osd >= parallel_osds)
|
if (par_osd >= parallel_osds)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
@@ -48,4 +48,4 @@ FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Ve
|
|||||||
QEMU=`rpm -qi qemu qemu-kvm | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
QEMU=`rpm -qi qemu qemu-kvm | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||||
perl -i -pe 's/(Requires:\s*qemu(?:-kvm)?)([^\n]+)?/$1 = '$QEMU'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
perl -i -pe 's/(Requires:\s*qemu(?:-kvm)?)([^\n]+)?/$1 = '$QEMU'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||||
tar --transform 's#^#vitastor-0.5/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.5$(rpm --eval '%dist').tar.gz *
|
tar --transform 's#^#vitastor-0.5.4/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.5.4$(rpm --eval '%dist').tar.gz *
|
||||||
|
31
rpm/qemu-el8.Dockerfile
Normal file
31
rpm/qemu-el8.Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Build packages for CentOS 8 inside a container
|
||||||
|
# cd ..; podman build -t qemu-el8 -v `pwd`/build:/root/build -f rpm/qemu-el8.Dockerfile .
|
||||||
|
|
||||||
|
FROM centos:8
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
RUN rm -f /etc/yum.repos.d/CentOS-Media.repo
|
||||||
|
RUN dnf -y install centos-release-advanced-virtualization epel-release dnf-plugins-core rpm-build
|
||||||
|
RUN rm -rf /var/lib/dnf/*; dnf download --disablerepo='*' --enablerepo='centos-advanced-virtualization-source' --source qemu-kvm
|
||||||
|
RUN rpm --nomd5 -i qemu*.src.rpm
|
||||||
|
RUN cd ~/rpmbuild/SPECS && dnf builddep -y --enablerepo=PowerTools --spec qemu-kvm.spec
|
||||||
|
|
||||||
|
ADD qemu-*-vitastor.patch /root/vitastor/
|
||||||
|
|
||||||
|
RUN set -e; \
|
||||||
|
mkdir -p /root/build/qemu-el8; \
|
||||||
|
rm -rf /root/build/qemu-el8/*; \
|
||||||
|
rpm --nomd5 -i /root/qemu*.src.rpm; \
|
||||||
|
cd ~/rpmbuild/SPECS; \
|
||||||
|
PN=$(grep ^Patch qemu-kvm.spec | tail -n1 | perl -pe 's/Patch(\d+).*/$1/'); \
|
||||||
|
csplit qemu-kvm.spec "/^Patch$PN/"; \
|
||||||
|
cat xx00 > qemu-kvm.spec; \
|
||||||
|
head -n 1 xx01 >> qemu-kvm.spec; \
|
||||||
|
echo "Patch$((PN+1)): qemu-4.2-vitastor.patch" >> qemu-kvm.spec; \
|
||||||
|
tail -n +2 xx01 >> qemu-kvm.spec; \
|
||||||
|
perl -i -pe 's/(^Release:\s*\d+)/$1.vitastor/' qemu-kvm.spec; \
|
||||||
|
cp /root/vitastor/qemu-4.2-vitastor.patch ~/rpmbuild/SOURCES; \
|
||||||
|
rpmbuild --nocheck -ba qemu-kvm.spec; \
|
||||||
|
cp ~/rpmbuild/RPMS/*/*qemu* /root/build/qemu-el8/; \
|
||||||
|
cp ~/rpmbuild/SRPMS/*qemu* /root/build/qemu-el8/
|
257
rpm/qemu-kvm-el7.spec.patch
Normal file
257
rpm/qemu-kvm-el7.spec.patch
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
--- qemu-kvm.spec.orig 2020-11-09 23:41:03.000000000 +0000
|
||||||
|
+++ qemu-kvm.spec 2020-12-06 10:44:24.207640963 +0000
|
||||||
|
@@ -2,7 +2,7 @@
|
||||||
|
%global SLOF_gittagcommit 899d9883
|
||||||
|
|
||||||
|
%global have_usbredir 1
|
||||||
|
-%global have_spice 1
|
||||||
|
+%global have_spice 0
|
||||||
|
%global have_opengl 1
|
||||||
|
%global have_fdt 0
|
||||||
|
%global have_gluster 1
|
||||||
|
@@ -56,7 +56,7 @@ Requires: %{name}-block-curl = %{epoch}:
|
||||||
|
Requires: %{name}-block-gluster = %{epoch}:%{version}-%{release} \
|
||||||
|
%endif \
|
||||||
|
Requires: %{name}-block-iscsi = %{epoch}:%{version}-%{release} \
|
||||||
|
-Requires: %{name}-block-rbd = %{epoch}:%{version}-%{release} \
|
||||||
|
+#Requires: %{name}-block-rbd = %{epoch}:%{version}-%{release} \
|
||||||
|
Requires: %{name}-block-ssh = %{epoch}:%{version}-%{release}
|
||||||
|
|
||||||
|
# Macro to properly setup RHEL/RHEV conflict handling
|
||||||
|
@@ -67,7 +67,7 @@ Obsoletes: %1-rhev
|
||||||
|
Summary: QEMU is a machine emulator and virtualizer
|
||||||
|
Name: qemu-kvm
|
||||||
|
Version: 4.2.0
|
||||||
|
-Release: 29.vitastor%{?dist}.6
|
||||||
|
+Release: 30.vitastor%{?dist}.6
|
||||||
|
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||||
|
Epoch: 15
|
||||||
|
License: GPLv2 and GPLv2+ and CC-BY
|
||||||
|
@@ -99,8 +99,8 @@ Source30: kvm-s390x.conf
|
||||||
|
Source31: kvm-x86.conf
|
||||||
|
Source32: qemu-pr-helper.service
|
||||||
|
Source33: qemu-pr-helper.socket
|
||||||
|
-Source34: 81-kvm-rhel.rules
|
||||||
|
-Source35: udev-kvm-check.c
|
||||||
|
+#Source34: 81-kvm-rhel.rules
|
||||||
|
+#Source35: udev-kvm-check.c
|
||||||
|
Source36: README.tests
|
||||||
|
|
||||||
|
|
||||||
|
@@ -825,7 +825,9 @@ Patch331: kvm-Drop-bogus-IPv6-messages.p
|
||||||
|
Patch333: kvm-virtiofsd-Whitelist-fchmod.patch
|
||||||
|
# For bz#1883869 - virtiofsd core dump in KATA Container [rhel-8.2.1.z]
|
||||||
|
Patch334: kvm-virtiofsd-avoid-proc-self-fd-tempdir.patch
|
||||||
|
-Patch335: qemu-4.2-vitastor.patch
|
||||||
|
+Patch335: qemu-use-sphinx-1.2.patch
|
||||||
|
+Patch336: qemu-config-tcmalloc-warning.patch
|
||||||
|
+Patch337: qemu-4.2-vitastor.patch
|
||||||
|
|
||||||
|
BuildRequires: wget
|
||||||
|
BuildRequires: rpm-build
|
||||||
|
@@ -842,7 +844,8 @@ BuildRequires: pciutils-devel
|
||||||
|
BuildRequires: libiscsi-devel
|
||||||
|
BuildRequires: ncurses-devel
|
||||||
|
BuildRequires: libattr-devel
|
||||||
|
-BuildRequires: libusbx-devel >= 1.0.22
|
||||||
|
+BuildRequires: gperftools-devel
|
||||||
|
+BuildRequires: libusbx-devel >= 1.0.21
|
||||||
|
%if %{have_usbredir}
|
||||||
|
BuildRequires: usbredir-devel >= 0.7.1
|
||||||
|
%endif
|
||||||
|
@@ -856,12 +859,12 @@ BuildRequires: virglrenderer-devel
|
||||||
|
# For smartcard NSS support
|
||||||
|
BuildRequires: nss-devel
|
||||||
|
%endif
|
||||||
|
-BuildRequires: libseccomp-devel >= 2.4.0
|
||||||
|
+#Requires: libseccomp >= 2.4.0
|
||||||
|
# For network block driver
|
||||||
|
BuildRequires: libcurl-devel
|
||||||
|
BuildRequires: libssh-devel
|
||||||
|
-BuildRequires: librados-devel
|
||||||
|
-BuildRequires: librbd-devel
|
||||||
|
+#BuildRequires: librados-devel
|
||||||
|
+#BuildRequires: librbd-devel
|
||||||
|
%if %{have_gluster}
|
||||||
|
# For gluster block driver
|
||||||
|
BuildRequires: glusterfs-api-devel
|
||||||
|
@@ -955,25 +958,25 @@ hardware for a full system such as a PC
|
||||||
|
|
||||||
|
%package -n qemu-kvm-core
|
||||||
|
Summary: qemu-kvm core components
|
||||||
|
+Requires: gperftools-libs
|
||||||
|
Requires: qemu-img = %{epoch}:%{version}-%{release}
|
||||||
|
%ifarch %{ix86} x86_64
|
||||||
|
Requires: seabios-bin >= 1.10.2-1
|
||||||
|
Requires: sgabios-bin
|
||||||
|
-Requires: edk2-ovmf
|
||||||
|
%endif
|
||||||
|
%ifarch aarch64
|
||||||
|
Requires: edk2-aarch64
|
||||||
|
%endif
|
||||||
|
|
||||||
|
%ifnarch aarch64 s390x
|
||||||
|
-Requires: seavgabios-bin >= 1.12.0-3
|
||||||
|
-Requires: ipxe-roms-qemu >= 20170123-1
|
||||||
|
+Requires: seavgabios-bin >= 1.11.0-1
|
||||||
|
+Requires: ipxe-roms-qemu >= 20181214-1
|
||||||
|
+Requires: /usr/share/ipxe.efi
|
||||||
|
%endif
|
||||||
|
%ifarch %{power64}
|
||||||
|
Requires: SLOF >= %{SLOF_gittagdate}-1.git%{SLOF_gittagcommit}
|
||||||
|
%endif
|
||||||
|
Requires: %{name}-common = %{epoch}:%{version}-%{release}
|
||||||
|
-Requires: libseccomp >= 2.4.0
|
||||||
|
# For compressed guest memory dumps
|
||||||
|
Requires: lzo snappy
|
||||||
|
%if %{have_kvm_setup}
|
||||||
|
@@ -1085,15 +1088,15 @@ This package provides the additional iSC
|
||||||
|
Install this package if you want to access iSCSI volumes.
|
||||||
|
|
||||||
|
|
||||||
|
-%package block-rbd
|
||||||
|
-Summary: QEMU Ceph/RBD block driver
|
||||||
|
-Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||||
|
-
|
||||||
|
-%description block-rbd
|
||||||
|
-This package provides the additional Ceph/RBD block driver for QEMU.
|
||||||
|
-
|
||||||
|
-Install this package if you want to access remote Ceph volumes
|
||||||
|
-using the rbd protocol.
|
||||||
|
+#%package block-rbd
|
||||||
|
+#Summary: QEMU Ceph/RBD block driver
|
||||||
|
+#Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||||
|
+#
|
||||||
|
+#%description block-rbd
|
||||||
|
+#This package provides the additional Ceph/RBD block driver for QEMU.
|
||||||
|
+#
|
||||||
|
+#Install this package if you want to access remote Ceph volumes
|
||||||
|
+#using the rbd protocol.
|
||||||
|
|
||||||
|
|
||||||
|
%package block-ssh
|
||||||
|
@@ -1117,12 +1120,14 @@ the Secure Shell (SSH) protocol.
|
||||||
|
# --build-id option is used for giving info to the debug packages.
|
||||||
|
buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
|
||||||
|
-%global block_drivers_list qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle
|
||||||
|
+#%global block_drivers_list qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle
|
||||||
|
+%global block_drivers_list qcow2,raw,file,host_device,nbd,iscsi,blkdebug,luks,null-co,nvme,copy-on-read,throttle
|
||||||
|
|
||||||
|
%if 0%{have_gluster}
|
||||||
|
%global block_drivers_list %{block_drivers_list},gluster
|
||||||
|
%endif
|
||||||
|
|
||||||
|
+[ -e /usr/bin/sphinx-build ] || ln -s sphinx-build-3 /usr/bin/sphinx-build
|
||||||
|
./configure \
|
||||||
|
--prefix="%{_prefix}" \
|
||||||
|
--libdir="%{_libdir}" \
|
||||||
|
@@ -1152,15 +1157,15 @@ buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
%else
|
||||||
|
--disable-numa \
|
||||||
|
%endif
|
||||||
|
- --enable-rbd \
|
||||||
|
+ --disable-rbd \
|
||||||
|
%if 0%{have_librdma}
|
||||||
|
--enable-rdma \
|
||||||
|
%else
|
||||||
|
--disable-rdma \
|
||||||
|
%endif
|
||||||
|
--disable-pvrdma \
|
||||||
|
- --enable-seccomp \
|
||||||
|
-%if 0%{have_spice}
|
||||||
|
+ --disable-seccomp \
|
||||||
|
+%if %{have_spice}
|
||||||
|
--enable-spice \
|
||||||
|
--enable-smartcard \
|
||||||
|
--enable-virglrenderer \
|
||||||
|
@@ -1179,7 +1184,7 @@ buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
%else
|
||||||
|
--disable-usb-redir \
|
||||||
|
%endif
|
||||||
|
- --disable-tcmalloc \
|
||||||
|
+ --enable-tcmalloc \
|
||||||
|
%ifarch x86_64
|
||||||
|
--enable-libpmem \
|
||||||
|
%else
|
||||||
|
@@ -1193,9 +1198,7 @@ buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
%endif
|
||||||
|
--python=%{__python3} \
|
||||||
|
--target-list="%{buildarch}" \
|
||||||
|
- --block-drv-rw-whitelist=%{block_drivers_list} \
|
||||||
|
--audio-drv-list= \
|
||||||
|
- --block-drv-ro-whitelist=vmdk,vhdx,vpc,https,ssh \
|
||||||
|
--with-coroutine=ucontext \
|
||||||
|
--tls-priority=NORMAL \
|
||||||
|
--disable-bluez \
|
||||||
|
@@ -1262,7 +1265,7 @@ buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
--disable-sanitizers \
|
||||||
|
--disable-hvf \
|
||||||
|
--disable-whpx \
|
||||||
|
- --enable-malloc-trim \
|
||||||
|
+ --disable-malloc-trim \
|
||||||
|
--disable-membarrier \
|
||||||
|
--disable-vhost-crypto \
|
||||||
|
--disable-libxml2 \
|
||||||
|
@@ -1308,7 +1311,7 @@ make V=1 %{?_smp_mflags} $buildldflags
|
||||||
|
cp -a %{kvm_target}-softmmu/qemu-system-%{kvm_target} qemu-kvm
|
||||||
|
|
||||||
|
gcc %{SOURCE6} $RPM_OPT_FLAGS $RPM_LD_FLAGS -o ksmctl
|
||||||
|
-gcc %{SOURCE35} $RPM_OPT_FLAGS $RPM_LD_FLAGS -o udev-kvm-check
|
||||||
|
+#gcc %{SOURCE35} $RPM_OPT_FLAGS $RPM_LD_FLAGS -o udev-kvm-check
|
||||||
|
|
||||||
|
%install
|
||||||
|
%define _udevdir %(pkg-config --variable=udevdir udev)
|
||||||
|
@@ -1343,8 +1346,8 @@ mkdir -p $RPM_BUILD_ROOT%{testsdir}/test
|
||||||
|
mkdir -p $RPM_BUILD_ROOT%{testsdir}/tests/qemu-iotests
|
||||||
|
mkdir -p $RPM_BUILD_ROOT%{testsdir}/scripts/qmp
|
||||||
|
|
||||||
|
-install -p -m 0755 udev-kvm-check $RPM_BUILD_ROOT%{_udevdir}
|
||||||
|
-install -p -m 0644 %{SOURCE34} $RPM_BUILD_ROOT%{_udevrulesdir}
|
||||||
|
+#install -p -m 0755 udev-kvm-check $RPM_BUILD_ROOT%{_udevdir}
|
||||||
|
+#install -p -m 0644 %{SOURCE34} $RPM_BUILD_ROOT%{_udevrulesdir}
|
||||||
|
|
||||||
|
install -m 0644 scripts/dump-guest-memory.py \
|
||||||
|
$RPM_BUILD_ROOT%{_datadir}/%{name}
|
||||||
|
@@ -1562,6 +1565,8 @@ rm -rf $RPM_BUILD_ROOT%{qemudocdir}/inte
|
||||||
|
# Remove spec
|
||||||
|
rm -rf $RPM_BUILD_ROOT%{qemudocdir}/specs
|
||||||
|
|
||||||
|
+%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
|
||||||
|
+
|
||||||
|
%check
|
||||||
|
export DIFF=diff; make check V=1
|
||||||
|
|
||||||
|
@@ -1645,8 +1650,8 @@ useradd -r -u 107 -g qemu -G kvm -d / -s
|
||||||
|
%config(noreplace) %{_sysconfdir}/sysconfig/ksm
|
||||||
|
%{_unitdir}/ksmtuned.service
|
||||||
|
%{_sbindir}/ksmtuned
|
||||||
|
-%{_udevdir}/udev-kvm-check
|
||||||
|
-%{_udevrulesdir}/81-kvm-rhel.rules
|
||||||
|
+#%{_udevdir}/udev-kvm-check
|
||||||
|
+#%{_udevrulesdir}/81-kvm-rhel.rules
|
||||||
|
%ghost %{_sysconfdir}/kvm
|
||||||
|
%config(noreplace) %{_sysconfdir}/ksmtuned.conf
|
||||||
|
%dir %{_sysconfdir}/%{name}
|
||||||
|
@@ -1711,8 +1716,8 @@ useradd -r -u 107 -g qemu -G kvm -d / -s
|
||||||
|
%{_libexecdir}/vhost-user-gpu
|
||||||
|
%{_datadir}/%{name}/vhost-user/50-qemu-gpu.json
|
||||||
|
%endif
|
||||||
|
-%{_libexecdir}/virtiofsd
|
||||||
|
-%{_datadir}/%{name}/vhost-user/50-qemu-virtiofsd.json
|
||||||
|
+#%{_libexecdir}/virtiofsd
|
||||||
|
+#%{_datadir}/%{name}/vhost-user/50-qemu-virtiofsd.json
|
||||||
|
|
||||||
|
%files -n qemu-img
|
||||||
|
%defattr(-,root,root)
|
||||||
|
@@ -1748,8 +1753,8 @@ useradd -r -u 107 -g qemu -G kvm -d / -s
|
||||||
|
%files block-iscsi
|
||||||
|
%{_libdir}/qemu-kvm/block-iscsi.so
|
||||||
|
|
||||||
|
-%files block-rbd
|
||||||
|
-%{_libdir}/qemu-kvm/block-rbd.so
|
||||||
|
+#%files block-rbd
|
||||||
|
+#%{_libdir}/qemu-kvm/block-rbd.so
|
||||||
|
|
||||||
|
%files block-ssh
|
||||||
|
%{_libdir}/qemu-kvm/block-ssh.so
|
@@ -1,5 +1,5 @@
|
|||||||
--- qemu-kvm.spec 2020-11-07 22:48:46.312124920 +0000
|
--- qemu-kvm.spec 2020-12-05 13:13:54.388623517 +0000
|
||||||
+++ qemu-kvm.spec 2020-11-07 23:04:06.246772766 +0000
|
+++ qemu-kvm.spec 2020-12-05 13:13:58.728696598 +0000
|
||||||
@@ -67,7 +67,7 @@ Obsoletes: %1-rhev
|
@@ -67,7 +67,7 @@ Obsoletes: %1-rhev
|
||||||
Summary: QEMU is a machine emulator and virtualizer
|
Summary: QEMU is a machine emulator and virtualizer
|
||||||
Name: qemu-kvm
|
Name: qemu-kvm
|
||||||
@@ -9,12 +9,21 @@
|
|||||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||||
Epoch: 15
|
Epoch: 15
|
||||||
License: GPLv2 and GPLv2+ and CC-BY
|
License: GPLv2 and GPLv2+ and CC-BY
|
||||||
@@ -825,6 +825,8 @@ Patch331: kvm-Drop-bogus-IPv6-messages.p
|
@@ -825,6 +825,7 @@ Patch331: kvm-Drop-bogus-IPv6-messages.p
|
||||||
Patch333: kvm-virtiofsd-Whitelist-fchmod.patch
|
Patch333: kvm-virtiofsd-Whitelist-fchmod.patch
|
||||||
# For bz#1883869 - virtiofsd core dump in KATA Container [rhel-8.2.1.z]
|
# For bz#1883869 - virtiofsd core dump in KATA Container [rhel-8.2.1.z]
|
||||||
Patch334: kvm-virtiofsd-avoid-proc-self-fd-tempdir.patch
|
Patch334: kvm-virtiofsd-avoid-proc-self-fd-tempdir.patch
|
||||||
+# Vitastor
|
|
||||||
+Patch335: qemu-4.2-vitastor.patch
|
+Patch335: qemu-4.2-vitastor.patch
|
||||||
|
|
||||||
BuildRequires: wget
|
BuildRequires: wget
|
||||||
BuildRequires: rpm-build
|
BuildRequires: rpm-build
|
||||||
|
@@ -1192,9 +1193,7 @@ buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||||
|
%endif
|
||||||
|
--python=%{__python3} \
|
||||||
|
--target-list="%{buildarch}" \
|
||||||
|
- --block-drv-rw-whitelist=%{block_drivers_list} \
|
||||||
|
--audio-drv-list= \
|
||||||
|
- --block-drv-ro-whitelist=vmdk,vhdx,vpc,https,ssh \
|
||||||
|
--with-coroutine=ucontext \
|
||||||
|
--tls-priority=NORMAL \
|
||||||
|
--disable-bluez \
|
||||||
|
@@ -6,9 +6,10 @@ FROM centos:7
|
|||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
|
RUN rm -f /etc/yum.repos.d/CentOS-Media.repo
|
||||||
RUN yum -y --enablerepo=extras install centos-release-scl epel-release yum-utils rpm-build
|
RUN yum -y --enablerepo=extras install centos-release-scl epel-release yum-utils rpm-build
|
||||||
RUN yum -y install https://vitastor.io/rpms/centos/7/vitastor-release-1.0-1.el7.noarch.rpm
|
RUN yum -y install https://vitastor.io/rpms/centos/7/vitastor-release-1.0-1.el7.noarch.rpm
|
||||||
RUN yum -y install devtoolset-9-gcc-c++ devtoolset-9-libatomic-devel gperftools-devel qemu-kvm fio rh-nodejs12
|
RUN yum -y install devtoolset-9-gcc-c++ devtoolset-9-libatomic-devel gperftools-devel qemu-kvm fio rh-nodejs12 jerasure-devel gf-complete-devel
|
||||||
RUN yumdownloader --disablerepo=centos-sclo-rh --source qemu-kvm
|
RUN yumdownloader --disablerepo=centos-sclo-rh --source qemu-kvm
|
||||||
RUN yumdownloader --disablerepo=centos-sclo-rh --source fio
|
RUN yumdownloader --disablerepo=centos-sclo-rh --source fio
|
||||||
RUN rpm --nomd5 -i qemu*.src.rpm
|
RUN rpm --nomd5 -i qemu*.src.rpm
|
||||||
@@ -36,7 +37,7 @@ ADD . /root/vitastor
|
|||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-0.5.el7.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-0.5.4.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
@@ -1,22 +1,26 @@
|
|||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 0.5
|
Version: 0.5.4
|
||||||
Release: 2%{?dist}
|
Release: 2%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.0
|
License: Vitastor Network Public License 1.0
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-0.5.el7.tar.gz
|
Source0: vitastor-0.5.4.el7.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
BuildRequires: devtoolset-9-gcc-c++
|
BuildRequires: devtoolset-9-gcc-c++
|
||||||
BuildRequires: rh-nodejs12
|
BuildRequires: rh-nodejs12
|
||||||
BuildRequires: rh-nodejs12-npm
|
BuildRequires: rh-nodejs12-npm
|
||||||
|
BuildRequires: jerasure-devel
|
||||||
|
BuildRequires: gf-complete-devel
|
||||||
Requires: fio = 3.7-1.el7
|
Requires: fio = 3.7-1.el7
|
||||||
Requires: qemu-kvm = 2.0.0-1.el7.6
|
Requires: qemu-kvm = 2.0.0-1.el7.6
|
||||||
Requires: rh-nodejs12
|
Requires: rh-nodejs12
|
||||||
Requires: rh-nodejs12-npm
|
Requires: rh-nodejs12-npm
|
||||||
Requires: liburing >= 0.6
|
Requires: liburing >= 0.6
|
||||||
|
Requires: libJerasure2
|
||||||
|
Requires: lpsolve
|
||||||
|
|
||||||
%description
|
%description
|
||||||
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
||||||
|
@@ -7,13 +7,14 @@ WORKDIR /root
|
|||||||
|
|
||||||
RUN rm -f /etc/yum.repos.d/CentOS-Media.repo
|
RUN rm -f /etc/yum.repos.d/CentOS-Media.repo
|
||||||
RUN dnf -y install centos-release-advanced-virtualization epel-release dnf-plugins-core
|
RUN dnf -y install centos-release-advanced-virtualization epel-release dnf-plugins-core
|
||||||
RUN dnf --enablerepo='centos-advanced-virtualization' -y install gcc-toolset-9 gcc-toolset-9-gcc-c++ gperftools-devel qemu-kvm fio nodejs rpm-build
|
RUN yum -y install https://vitastor.io/rpms/centos/8/vitastor-release-1.0-1.el8.noarch.rpm
|
||||||
RUN rm -rf /var/lib/dnf/*; dnf download --disablerepo='*' --enablerepo='centos-advanced-virtualization-source' --source qemu-kvm
|
RUN dnf --enablerepo='centos-advanced-virtualization' -y install gcc-toolset-9 gcc-toolset-9-gcc-c++ gperftools-devel qemu-kvm fio nodejs rpm-build jerasure-devel gf-complete-devel
|
||||||
|
RUN rm -rf /var/lib/dnf/*; dnf download --disablerepo='*' --enablerepo='vitastor' --source qemu-kvm
|
||||||
RUN dnf download --source fio
|
RUN dnf download --source fio
|
||||||
RUN rpm --nomd5 -i qemu*.src.rpm
|
RUN rpm --nomd5 -i qemu*.src.rpm
|
||||||
RUN rpm --nomd5 -i fio*.src.rpm
|
RUN rpm --nomd5 -i fio*.src.rpm
|
||||||
RUN cd ~/rpmbuild/SPECS && dnf builddep -y --enablerepo='*' --spec qemu-kvm.spec
|
RUN cd ~/rpmbuild/SPECS && dnf builddep -y --enablerepo=PowerTools --spec qemu-kvm.spec
|
||||||
RUN cd ~/rpmbuild/SPECS && dnf builddep -y --enablerepo='*' --spec fio.spec
|
RUN cd ~/rpmbuild/SPECS && dnf builddep -y --enablerepo=PowerTools --spec fio.spec
|
||||||
|
|
||||||
ADD https://vitastor.io/rpms/liburing-el7/liburing-0.7-2.el7.src.rpm /root
|
ADD https://vitastor.io/rpms/liburing-el7/liburing-0.7-2.el7.src.rpm /root
|
||||||
|
|
||||||
@@ -29,33 +30,12 @@ RUN set -e; \
|
|||||||
|
|
||||||
RUN rpm -i `ls /root/build/liburing-el7/liburing-*.x86_64.rpm | grep -v debug`
|
RUN rpm -i `ls /root/build/liburing-el7/liburing-*.x86_64.rpm | grep -v debug`
|
||||||
|
|
||||||
ADD qemu-*-vitastor.patch /root/vitastor/
|
|
||||||
|
|
||||||
RUN set -e; \
|
|
||||||
mkdir -p /root/build/qemu-el8; \
|
|
||||||
rm -rf /root/build/qemu-el8/*; \
|
|
||||||
rpm --nomd5 -i /root/qemu*.src.rpm; \
|
|
||||||
cd ~/rpmbuild/SPECS; \
|
|
||||||
PN=$(grep ^Patch qemu-kvm.spec | tail -n1 | perl -pe 's/Patch(\d+).*/$1/'); \
|
|
||||||
csplit qemu-kvm.spec "/^Patch$PN/"; \
|
|
||||||
cat xx00 > qemu-kvm.spec; \
|
|
||||||
head -n 1 xx01 >> qemu-kvm.spec; \
|
|
||||||
echo "Patch$((PN+1)): qemu-4.2-vitastor.patch" >> qemu-kvm.spec; \
|
|
||||||
tail -n +2 xx01 >> qemu-kvm.spec; \
|
|
||||||
perl -i -pe 's/(^Release:\s*\d+)/$1.vitastor/' qemu-kvm.spec; \
|
|
||||||
cp /root/vitastor/qemu-4.2-vitastor.patch ~/rpmbuild/SOURCES; \
|
|
||||||
rpmbuild --nocheck -ba qemu-kvm.spec; \
|
|
||||||
cp ~/rpmbuild/RPMS/*/*qemu* /root/build/qemu-el8/; \
|
|
||||||
cp ~/rpmbuild/SRPMS/*qemu* /root/build/qemu-el8/
|
|
||||||
|
|
||||||
RUN cd /root/build/qemu-el8; dnf -y install `ls qemu*.rpm | grep -vP 'debug|guest|tests|src'`
|
|
||||||
|
|
||||||
ADD . /root/vitastor
|
ADD . /root/vitastor
|
||||||
|
|
||||||
RUN set -e; \
|
RUN set -e; \
|
||||||
cd /root/vitastor/rpm; \
|
cd /root/vitastor/rpm; \
|
||||||
sh build-tarball.sh; \
|
sh build-tarball.sh; \
|
||||||
cp /root/vitastor-0.5.el8.tar.gz ~/rpmbuild/SOURCES; \
|
cp /root/vitastor-0.5.4.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||||
cd ~/rpmbuild/SPECS/; \
|
cd ~/rpmbuild/SPECS/; \
|
||||||
rpmbuild -ba vitastor.spec; \
|
rpmbuild -ba vitastor.spec; \
|
||||||
|
@@ -1,20 +1,24 @@
|
|||||||
Name: vitastor
|
Name: vitastor
|
||||||
Version: 0.5
|
Version: 0.5.4
|
||||||
Release: 2%{?dist}
|
Release: 2%{?dist}
|
||||||
Summary: Vitastor, a fast software-defined clustered block storage
|
Summary: Vitastor, a fast software-defined clustered block storage
|
||||||
|
|
||||||
License: Vitastor Network Public License 1.0
|
License: Vitastor Network Public License 1.0
|
||||||
URL: https://vitastor.io/
|
URL: https://vitastor.io/
|
||||||
Source0: vitastor-0.5.el8.tar.gz
|
Source0: vitastor-0.5.4.el8.tar.gz
|
||||||
|
|
||||||
BuildRequires: liburing-devel >= 0.6
|
BuildRequires: liburing-devel >= 0.6
|
||||||
BuildRequires: gperftools-devel
|
BuildRequires: gperftools-devel
|
||||||
BuildRequires: gcc-toolset-9-gcc-c++
|
BuildRequires: gcc-toolset-9-gcc-c++
|
||||||
BuildRequires: nodejs >= 10
|
BuildRequires: nodejs >= 10
|
||||||
|
BuildRequires: jerasure-devel
|
||||||
|
BuildRequires: gf-complete-devel
|
||||||
Requires: fio = 3.7-3.el8
|
Requires: fio = 3.7-3.el8
|
||||||
Requires: qemu-kvm = 4.2.0-29.el8.6
|
Requires: qemu-kvm = 4.2.0-29.el8.6
|
||||||
Requires: nodejs >= 10
|
Requires: nodejs >= 10
|
||||||
Requires: liburing >= 0.6
|
Requires: liburing >= 0.6
|
||||||
|
Requires: libJerasure2
|
||||||
|
Requires: lpsolve
|
||||||
|
|
||||||
%description
|
%description
|
||||||
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
|
||||||
|
77
run_tests.sh
Executable file
77
run_tests.sh
Executable file
@@ -0,0 +1,77 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ ! "$BASH_VERSION" ] ; then
|
||||||
|
echo "Use bash to run this script ($0)" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
format_error()
|
||||||
|
{
|
||||||
|
echo $(echo -n -e "\033[1;31m")$1$(echo -n -e "\033[m")
|
||||||
|
$ETCDCTL get --prefix /vitastor > ./testdata/etcd-dump.txt
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
format_green()
|
||||||
|
{
|
||||||
|
echo $(echo -n -e "\033[1;32m")$1$(echo -n -e "\033[m")
|
||||||
|
}
|
||||||
|
|
||||||
|
set -e -x
|
||||||
|
|
||||||
|
trap 'kill -9 $(jobs -p)' EXIT
|
||||||
|
|
||||||
|
ETCD=${ETCD:-etcd}
|
||||||
|
ETCD_PORT=${ETCD_PORT:-12379}
|
||||||
|
|
||||||
|
rm -rf ./testdata
|
||||||
|
mkdir -p ./testdata
|
||||||
|
dd if=/dev/zero of=./testdata/test_osd1.bin bs=1024 count=1 seek=$((1024*1024-1))
|
||||||
|
dd if=/dev/zero of=./testdata/test_osd2.bin bs=1024 count=1 seek=$((1024*1024-1))
|
||||||
|
dd if=/dev/zero of=./testdata/test_osd3.bin bs=1024 count=1 seek=$((1024*1024-1))
|
||||||
|
|
||||||
|
$ETCD -name etcd_test --data-dir ./testdata/etcd \
|
||||||
|
--advertise-client-urls http://127.0.0.1:$ETCD_PORT --listen-client-urls http://127.0.0.1:$ETCD_PORT \
|
||||||
|
--initial-advertise-peer-urls http://127.0.0.1:$((ETCD_PORT+1)) --listen-peer-urls http://127.0.0.1:$((ETCD_PORT+1)) \
|
||||||
|
--max-txn-ops=100000 --auto-compaction-retention=10 --auto-compaction-mode=revision &>./testdata/etcd.log &
|
||||||
|
ETCD_PID=$!
|
||||||
|
ETCD_URL=127.0.0.1:$ETCD_PORT/v3
|
||||||
|
ETCDCTL="${ETCD}ctl --endpoints=http://$ETCD_URL"
|
||||||
|
|
||||||
|
./osd --osd_num 1 --bind_address 127.0.0.1 --etcd_address $ETCD_URL $(node mon/simple-offsets.js --format options --device ./testdata/test_osd1.bin 2>/dev/null) &>./testdata/osd1.log &
|
||||||
|
OSD1_PID=$!
|
||||||
|
./osd --osd_num 2 --bind_address 127.0.0.1 --etcd_address $ETCD_URL $(node mon/simple-offsets.js --format options --device ./testdata/test_osd2.bin 2>/dev/null) &>./testdata/osd2.log &
|
||||||
|
OSD2_PID=$!
|
||||||
|
./osd --osd_num 3 --bind_address 127.0.0.1 --etcd_address $ETCD_URL $(node mon/simple-offsets.js --format options --device ./testdata/test_osd3.bin 2>/dev/null) &>./testdata/osd3.log &
|
||||||
|
OSD3_PID=$!
|
||||||
|
|
||||||
|
cd mon
|
||||||
|
npm install
|
||||||
|
cd ..
|
||||||
|
node mon/mon-main.js --etcd_url http://$ETCD_URL --etcd_prefix "/vitastor" &>./testdata/mon.log &
|
||||||
|
MON_PID=$!
|
||||||
|
|
||||||
|
$ETCDCTL put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"xor","pg_size":3,"pg_minsize":2,"parity_chunks":1,"pg_count":1,"failure_domain":"osd"}}'
|
||||||
|
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
if ! ($ETCDCTL get /vitastor/config/pgs --print-value-only | jq -s -e '(. | length) != 0 and (.[0].items["1"]["1"].osd_set | sort) == ["1","2","3"]'); then
|
||||||
|
format_error "FAILED: 1 PG NOT CONFIGURED"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! ($ETCDCTL get /vitastor/pg/state/1/1 --print-value-only | jq -s -e '(. | length) != 0 and .[0].state == ["active"]'); then
|
||||||
|
format_error "FAILED: 1 PG NOT UP"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo leak:fio >> testdata/lsan-suppress.txt
|
||||||
|
echo leak:tcmalloc >> testdata/lsan-suppress.txt
|
||||||
|
echo leak:ceph >> testdata/lsan-suppress.txt
|
||||||
|
echo leak:librbd >> testdata/lsan-suppress.txt
|
||||||
|
echo leak:_M_mutate >> testdata/lsan-suppress.txt
|
||||||
|
echo leak:_M_assign >> testdata/lsan-suppress.txt
|
||||||
|
#LSAN_OPTIONS=suppressions=`pwd`/testdata/lsan-suppress.txt LD_PRELOAD=libasan.so.5 \
|
||||||
|
# fio -thread -name=test -ioengine=./libfio_sec_osd.so -bs=4k -fsync=128 `$ETCDCTL get /vitastor/osd/state/1 --print-value-only | jq -r '"-host="+.addresses[0]+" -port="+(.port|tostring)'` -rw=write -size=32M
|
||||||
|
|
||||||
|
LSAN_OPTIONS=suppressions=`pwd`/testdata/lsan-suppress.txt LD_PRELOAD=libasan.so.5 \
|
||||||
|
fio -thread -name=test -ioengine=./libfio_cluster.so -bs=4M -direct=1 -iodepth=1 -fsync=1 -rw=write -etcd=$ETCD_URL -pool=1 -inode=1 -size=1G -cluster_log_level=10
|
||||||
|
|
||||||
|
format_green OK
|
Reference in New Issue
Block a user