Skip to content

Commit

Permalink
Merge branch 'stable/v3.x' into SDSTOR-encode
Browse files Browse the repository at this point in the history
  • Loading branch information
shosseinimotlagh committed Jul 12, 2023
2 parents 9d6e10b + 4923483 commit fcf284d
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 24 deletions.
26 changes: 12 additions & 14 deletions src/engine/meta/meta_blks_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,8 @@ void MetaBlkMgr::start(blk_store_t* sb_blk_store, const sb_blkstore_blob* blob,

HS_REL_ASSERT_GT(get_page_size(), META_BLK_HDR_MAX_SZ);
HS_REL_ASSERT_GT(get_page_size(), MAX_BLK_OVF_HDR_MAX_SZ);
auto sm{HomeStoreBase::safe_instance()->sobject_mgr()};
sm->create_object("module", "MetaBlkMgr",
std::bind(&MetaBlkMgr::get_status, this, std::placeholders::_1));
HomeStoreBase::safe_instance()->sobject_mgr()->create_object(
"module", "MetaBlkMgr", std::bind(&MetaBlkMgr::get_status, this, std::placeholders::_1));

reset_self_recover();
alloc_compress_buf(get_init_compress_memory_size());
Expand Down Expand Up @@ -401,10 +400,10 @@ void MetaBlkMgr::register_handler(const meta_sub_type type, const meta_blk_found
}

void MetaBlkMgr::create_sobject(meta_sub_type type) {
if(m_sub_info[type].meta_bids.size()==1) {
auto sm{HomeStoreBase::safe_instance()->sobject_mgr()};
sm->create_object("MetaBlk", "MetaBlk_" + type,
std::bind(&MetaBlkMgr::get_status_metablk, this, std::placeholders::_1, "MetaBlk_" + type));
if (m_sub_info[type].meta_bids.size() == 1) {
HomeStoreBase::safe_instance()->sobject_mgr()->create_object(
"MetaBlk", "MetaBlk_" + type,
std::bind(&MetaBlkMgr::get_status_metablk, this, std::placeholders::_1, "MetaBlk_" + type));
}
}

Expand Down Expand Up @@ -1500,10 +1499,11 @@ sisl::status_response MetaBlkMgr::get_status_metablk(const sisl::status_request&
std::string jname = "content";
sisl::byte_array buf;
if (client == "VOLUME" || log_level == 3) {
if(it == m_meta_blks.end()){
j["error"] = fmt::format("Expecting meta_bid: {} to be found in meta blks cache. Corruption detected!",
bid.to_string());
return response;
if (it == m_meta_blks.end()) {
j["error"] = fmt::format(
"Expecting meta_bid: {} to be found in meta blks cache. Corruption detected!",
bid.to_string());
return response;
}

if (it == m_meta_blks.end()) {
Expand Down Expand Up @@ -1616,9 +1616,7 @@ nlohmann::json MetaBlkMgr::populate_json(const int log_level, meta_blk_map_t& me
const std::string file_path{fmt::format("{}/{}_{}", dump_dir, x.first, bid_cnt)};
std::ofstream f{file_path};
f.write(reinterpret_cast< const char* >(buf->bytes), buf->size);
j[x.first]["content"][std::to_string(bid_cnt)] =
hs_utils::encodeBase64(buf->bytes, buf->size);
;
j[x.first]["content"][std::to_string(bid_cnt)] = hs_utils::encodeBase64(buf->bytes, buf->size);

free_space -= buf->size;
}
Expand Down
34 changes: 27 additions & 7 deletions src/homelogstore/log_store.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -316,19 +316,23 @@ int HomeLogStore::search_max_le(const logstore_seq_num_t input_sn) {

nlohmann::json HomeLogStore::dump_log_store(const log_dump_req& dump_req) {
nlohmann::json json_dump{}; // create root object
json_dump["store_id"] = this->m_store_id;

const auto trunc_upto{this->truncated_upto()};
std::remove_const_t< decltype(trunc_upto) > idx{trunc_upto + 1};
int32_t batch_size;
if (dump_req.batch_size != 0) {
batch_size = dump_req.batch_size;
} else {
batch_size = dump_req.end_seq_num - dump_req.start_seq_num;
}
if (dump_req.start_seq_num != 0) idx = dump_req.start_seq_num;

// must use move operator= operation instead of move copy constructor
nlohmann::json json_records = nlohmann::json::array();
bool end_iterate{false};
bool proceed{false};
m_records.foreach_completed(
idx,
[&json_records, &dump_req, &end_iterate, this](decltype(idx) cur_idx, decltype(idx) max_idx,
const homestore::logstore_record& record) -> bool {
[&batch_size, &json_dump, &json_records, &dump_req, &proceed,
this](decltype(idx) cur_idx, decltype(idx) max_idx, const homestore::logstore_record& record) -> bool {
// do a sync read
// must use move operator= operation instead of move copy constructor
nlohmann::json json_val = nlohmann::json::object();
Expand All @@ -349,8 +353,10 @@ nlohmann::json HomeLogStore::dump_log_store(const log_dump_req& dump_req) {

json_records.emplace_back(std::move(json_val));
decltype(idx) end_idx{std::min(max_idx, dump_req.end_seq_num)};
end_iterate = (cur_idx < end_idx) ? true : false;
return end_iterate;
proceed = (cur_idx < end_idx && --batch_size > 0) ? true : false;
// User can provide either the end_seq_num or batch_size in the request.
if (cur_idx < end_idx && batch_size == 0) { json_dump["next_cursor"] = std::to_string(cur_idx + 1); }
return proceed;
});

json_dump["log_records"] = std::move(json_records);
Expand Down Expand Up @@ -379,6 +385,19 @@ logstore_seq_num_t HomeLogStore::get_contiguous_completed_seq_num(const logstore

sisl::status_response HomeLogStore::get_status(const sisl::status_request& request) {
sisl::status_response response;
if (request.json.contains("type") && request.json["type"] == "logstore_record") {
log_dump_req dump_req{};
if (!request.next_cursor.empty()) { dump_req.start_seq_num = std::stoul(request.next_cursor); }
dump_req.batch_size = request.batch_size;
dump_req.end_seq_num = UINT32_MAX;
homestore::log_dump_verbosity verbose_level = homestore::log_dump_verbosity::HEADER;
if (request.json.contains("log_content")) { verbose_level = homestore::log_dump_verbosity::CONTENT; }
dump_req.verbosity_level = verbose_level;
response.json.update(dump_log_store(dump_req));
return response;
}

response.json["store_id"] = this->m_store_id;
response.json["append_mode"] = m_append_mode;
response.json["highest_lsn"] = m_seq_num.load(std::memory_order_relaxed);
response.json["max_lsn_in_prev_flush_batch"] = m_flush_batch_max_lsn;
Expand All @@ -400,6 +419,7 @@ sisl::status_response HomeLogStore::get_status(const sisl::status_request& reque
dump_req.verbosity_level = verbose_level;
response.json.update(dump_log_store(dump_req));
}

return response;
}

Expand Down
1 change: 1 addition & 0 deletions src/homelogstore/log_store.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ struct log_dump_req {
std::shared_ptr< HomeLogStore > log_store; // if null all log stores are dumped
logstore_seq_num_t start_seq_num; // empty_key if from start of log file
logstore_seq_num_t end_seq_num; // empty_key if till last log entry
int32_t batch_size = 0; // Size of the output batch.
};

struct logstore_record {
Expand Down
7 changes: 4 additions & 3 deletions src/homelogstore/log_store_family.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,17 @@ LogStoreFamily::LogStoreFamily(const logstore_family_id_t f_id) :
m_family_id{f_id},
m_metablk_name{std::string("LogStoreFamily") + std::to_string(f_id)},
m_log_dev{f_id, m_metablk_name} {
auto hb = HomeStoreBase::safe_instance();
m_sobject = hb->sobject_mgr()->create_object("LogStoreFamily", m_metablk_name,
std::bind(&LogStoreFamily::get_status, this, std::placeholders::_1));
}

void LogStoreFamily::meta_blk_found_cb(meta_blk* const mblk, const sisl::byte_view buf, const size_t size) {
m_log_dev.meta_blk_found(mblk, buf, size);
}

void LogStoreFamily::start(const bool format, JournalVirtualDev* blk_store) {
auto hb = HomeStoreBase::safe_instance();
m_sobject = hb->sobject_mgr()->create_object("LogStoreFamily", m_metablk_name,
std::bind(&LogStoreFamily::get_status, this, std::placeholders::_1));

m_log_dev.register_store_found_cb(bind_this(LogStoreFamily::on_log_store_found, 2));
m_log_dev.register_append_cb(bind_this(LogStoreFamily::on_io_completion, 5));
m_log_dev.register_logfound_cb(bind_this(LogStoreFamily::on_logfound, 6));
Expand Down

0 comments on commit fcf284d

Please sign in to comment.