Use btree_map instead of sparsepp

trace-sqes
Vitaliy Filippov 2020-03-04 17:12:27 +03:00
parent 8e63995306
commit b27ad550cf
5 changed files with 15 additions and 13 deletions

View File

@ -46,7 +46,7 @@ class journal_flusher_co
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_w;
bool skip_copy, has_delete, has_empty;
spp::sparse_hash_map<object_id, clean_entry>::iterator clean_it;
blockstore_clean_db_t::iterator clean_it;
std::vector<copy_buffer_t> v;
std::vector<copy_buffer_t>::iterator it;
int copy_count;

View File

@ -16,7 +16,7 @@
#include <deque>
#include <new>
#include "sparsepp/sparsepp/spp.h"
#include "cpp-btree/btree_map.h"
#include "allocator.h"
@ -164,6 +164,10 @@ struct blockstore_op_private_t
int sync_state, prev_sync_count;
};
// https://github.com/algorithm-ninja/cpp-btree
// https://github.com/greg7mdp/sparsepp/ was used previously, but it was TERRIBLY slow after resizing
// with sparsepp, random reads dropped to ~700 iops very fast with just as much as ~32k objects in the DB
typedef btree::btree_map<object_id, clean_entry> blockstore_clean_db_t;
typedef std::map<obj_ver_id, dirty_entry> blockstore_dirty_db_t;
#include "blockstore_init.h"
@ -198,8 +202,7 @@ class blockstore_impl_t
struct ring_consumer_t ring_consumer;
// Another option is https://github.com/algorithm-ninja/cpp-btree
spp::sparse_hash_map<object_id, clean_entry> clean_db;
blockstore_clean_db_t clean_db;
uint8_t *clean_bitmap = NULL;
blockstore_dirty_db_t dirty_db;
std::list<blockstore_op_t*> submit_queue; // FIXME: funny thing is that vector is better here

2
osd.h
View File

@ -19,8 +19,6 @@
#include "osd_ops.h"
#include "osd_peering_pg.h"
#include "sparsepp/sparsepp/spp.h"
#define OSD_OP_IN 0
#define OSD_OP_OUT 1

View File

@ -93,7 +93,7 @@ void pg_t::remember_object(pg_obj_state_check_t &st, std::vector<obj_ver_role> &
}
if (state & (OBJ_NEEDS_ROLLBACK | OBJ_NEEDS_STABLE))
{
spp::sparse_hash_map<obj_piece_id_t, obj_piece_ver_t> pieces;
std::unordered_map<obj_piece_id_t, obj_piece_ver_t> pieces;
for (int i = st.obj_start; i < st.obj_end; i++)
{
auto & pcs = pieces[(obj_piece_id_t){ .oid = all[i].oid, .osd_num = all[i].osd_num }];

View File

@ -1,12 +1,13 @@
#include <map>
#include <unordered_map>
#include <vector>
#include <algorithm>
#include "cpp-btree/btree_map.h"
#include "object_id.h"
#include "osd_ops.h"
#include "sparsepp/sparsepp/spp.h"
// Placement group states
// Exactly one of these:
#define PG_OFFLINE (1<<0)
@ -64,8 +65,8 @@ struct osd_op_t;
struct pg_peering_state_t
{
// osd_num -> list result
spp::sparse_hash_map<osd_num_t, osd_op_t*> list_ops;
spp::sparse_hash_map<osd_num_t, pg_list_result_t> list_results;
std::unordered_map<osd_num_t, osd_op_t*> list_ops;
std::unordered_map<osd_num_t, pg_list_result_t> list_results;
int list_done = 0;
};
@ -122,9 +123,9 @@ struct pg_t
// it may consume up to ~ (raw storage / object size) * 24 bytes in the worst case scenario
// which is up to ~192 MB per 1 TB in the worst case scenario
std::map<pg_osd_set_t, pg_osd_set_state_t> state_dict;
spp::sparse_hash_map<object_id, pg_osd_set_state_t*> obj_states;
btree::btree_map<object_id, pg_osd_set_state_t*> obj_states;
std::map<obj_piece_id_t, obj_stab_action_t> obj_stab_actions;
spp::sparse_hash_map<object_id, uint64_t> ver_override;
btree::btree_map<object_id, uint64_t> ver_override;
pg_peering_state_t *peering_state = NULL;
std::multimap<object_id, osd_op_t*> write_queue;