Skip to content

Commit

Permalink
Bugfix: close to VM capacity allocation
Browse files Browse the repository at this point in the history
Fixes an unexpected error that happens when the total allocation size is close to the reserved VM capacity.
  • Loading branch information
Keita Iwabuchi committed Oct 5, 2024
1 parent c0649d6 commit 317fad2
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 36 deletions.
1 change: 1 addition & 0 deletions include/metall/kernel/segment_allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ class segment_allocator {
/// \return The offset of an allocated memory.
/// On error, k_null_offset is returned.
difference_type allocate(const size_type nbytes) {
if (nbytes == 0) return k_null_offset;
const bin_no_type bin_no = bin_no_mngr::to_bin_no(nbytes);

const auto offset = (priv_small_object_bin(bin_no))
Expand Down
96 changes: 60 additions & 36 deletions include/metall/kernel/segment_storage.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class segment_storage {
: m_system_page_size(other.m_system_page_size),
m_num_blocks(other.m_num_blocks),
m_vm_region_size(other.m_vm_region_size),
m_segment_capacity(other.m_segment_capacity),
m_current_segment_size(other.m_current_segment_size),
m_vm_region(other.m_vm_region),
m_segment(other.m_segment),
Expand All @@ -95,6 +96,7 @@ class segment_storage {
m_system_page_size = other.m_system_page_size;
m_num_blocks = other.m_num_blocks;
m_vm_region_size = other.m_vm_region_size;
m_segment_capacity = other.m_segment_capacity;
m_current_segment_size = other.m_current_segment_size;
m_vm_region = other.m_vm_region;
m_segment_header = other.m_segment_header;
Expand Down Expand Up @@ -256,14 +258,23 @@ class segment_storage {
return total_file_size;
}

std::size_t priv_aligment() const {
return std::max((size_t)m_system_page_size, (size_t)k_block_size);
std::size_t priv_round_up_to_block_size(const std::size_t nbytes) const {
const auto alignment =
std::max((size_t)m_system_page_size, (size_t)k_block_size);
return mdtl::round_up(nbytes, alignment);
}

std::size_t priv_round_down_to_block_size(const std::size_t nbytes) const {
const auto alignment =
std::max((size_t)m_system_page_size, (size_t)k_block_size);
return mdtl::round_down(nbytes, alignment);
}

void priv_clear_status() {
m_system_page_size = 0;
m_num_blocks = 0;
m_vm_region_size = 0;
m_segment_capacity = 0;
m_current_segment_size = 0;
m_vm_region = nullptr;
m_segment = nullptr;
Expand All @@ -277,9 +288,11 @@ class segment_storage {
}

bool priv_is_open() const {
// TODO: brush up this logic
return (check_sanity() && m_system_page_size > 0 && m_num_blocks > 0 &&
m_vm_region_size > 0 && m_current_segment_size > 0 && m_vm_region &&
m_segment && !m_top_path.empty() && !m_block_fd_list.empty());
m_vm_region_size > 0 && m_segment_capacity > 0 &&
m_current_segment_size > 0 && m_vm_region && m_segment &&
!m_top_path.empty() && !m_block_fd_list.empty());
}

static bool priv_copy(const path_type &source_path,
Expand Down Expand Up @@ -309,20 +322,44 @@ class segment_storage {
return false;
}

bool priv_prepare_header_and_segment(
const std::size_t segment_capacity_request) {
const auto header_size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
const auto vm_region_size =
header_size + priv_round_up_to_block_size(segment_capacity_request);
if (!priv_reserve_vm(vm_region_size)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
m_segment_capacity =
priv_round_down_to_block_size(m_vm_region_size - header_size);
assert(m_segment_capacity >= segment_capacity_request);
assert(m_segment_capacity + header_size <= m_vm_region_size);
priv_construct_segment_header(m_vm_region);

return true;
}

bool priv_reserve_vm(const std::size_t nbytes) {
m_vm_region_size =
mdtl::round_up((int64_t)nbytes, (int64_t)priv_aligment());
mdtl::round_up((int64_t)nbytes, (int64_t)m_system_page_size);
m_vm_region =
mdtl::reserve_aligned_vm_region(priv_aligment(), m_vm_region_size);
mdtl::reserve_aligned_vm_region(m_system_page_size, m_vm_region_size);

if (!m_vm_region) {
std::stringstream ss;
ss << "Cannot reserve a VM region " << nbytes << " bytes";
ss << "Cannot reserve a VM region " << m_vm_region_size << " bytes";
logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str());
m_vm_region_size = 0;
return false;
} else {
std::stringstream ss;
ss << "Reserved a VM region: " << m_vm_region_size << " bytes at "
<< (uint64_t)m_vm_region;
logger::out(logger::level::verbose, __FILE__, __LINE__, ss.str().c_str());
}
assert(reinterpret_cast<uint64_t>(m_vm_region) % priv_aligment() == 0);

return true;
}
Expand Down Expand Up @@ -352,8 +389,8 @@ class segment_storage {
return false;
}

const auto size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
if (mdtl::map_anonymous_write_mode(addr, size, MAP_FIXED) != addr) {
logger::out(logger::level::error, __FILE__, __LINE__,
"Cannot allocate segment header");
Expand All @@ -368,8 +405,8 @@ class segment_storage {

bool priv_deallocate_segment_header() {
std::destroy_at(&m_segment_header);
const auto size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
const auto ret = mdtl::munmap(m_segment_header, size, false);
m_segment_header = nullptr;
if (!ret) {
Expand All @@ -379,7 +416,8 @@ class segment_storage {
return ret;
}

bool priv_create(const path_type &top_path, const std::size_t capacity) {
bool priv_create(const path_type &top_path,
const std::size_t segment_capacity_request) {
if (!check_sanity()) return false;
if (is_open())
return false; // Cannot open multiple segments simultaneously.
Expand All @@ -398,15 +436,7 @@ class segment_storage {
}
}

const auto header_size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto vm_region_size = header_size + capacity;
if (!priv_reserve_vm(vm_region_size)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
priv_construct_segment_header(m_vm_region);
priv_prepare_header_and_segment(segment_capacity_request);

m_top_path = top_path;
m_read_only = false;
Expand All @@ -431,7 +461,8 @@ class segment_storage {
return true;
}

bool priv_open(const path_type &top_path, const std::size_t capacity,
bool priv_open(const path_type &top_path,
const std::size_t segment_capacity_request,
const bool read_only) {
if (!check_sanity()) return false;
if (is_open())
Expand All @@ -442,16 +473,8 @@ class segment_storage {
logger::out(logger::level::verbose, __FILE__, __LINE__, s.c_str());
}

const auto header_size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto vm_size =
header_size + ((read_only) ? priv_get_size(top_path) : capacity);
if (!priv_reserve_vm(vm_size)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
priv_construct_segment_header(m_vm_region);
priv_prepare_header_and_segment(read_only ? priv_get_size(top_path)
: segment_capacity_request);

m_top_path = top_path;
m_read_only = read_only;
Expand Down Expand Up @@ -516,7 +539,7 @@ class segment_storage {
return false;
}

if (request_size > m_vm_region_size) {
if (request_size > m_segment_capacity) {
logger::out(logger::level::error, __FILE__, __LINE__,
"Requested segment size is bigger than the reserved VM size");
return false;
Expand Down Expand Up @@ -587,7 +610,7 @@ class segment_storage {
assert(!path.empty());
assert(file_size > 0);
assert(segment_offset >= 0);
assert(segment_offset + file_size <= m_vm_region_size);
assert(segment_offset + file_size <= m_segment_capacity);

[[maybe_unused]] static constexpr int map_nosync =
#ifdef MAP_NOSYNC
Expand Down Expand Up @@ -628,7 +651,7 @@ class segment_storage {
assert(!path.empty());
assert(region_size > 0);
assert(segment_offset >= 0);
assert(segment_offset + region_size <= m_vm_region_size);
assert(segment_offset + region_size <= m_segment_capacity);

const auto map_addr = static_cast<char *>(m_segment) + segment_offset;
{
Expand Down Expand Up @@ -924,6 +947,7 @@ class segment_storage {
ssize_t m_system_page_size{0};
std::size_t m_num_blocks{0};
std::size_t m_vm_region_size{0};
std::size_t m_segment_capacity{0};
std::size_t m_current_segment_size{0};
void *m_vm_region{nullptr};
void *m_segment{nullptr};
Expand Down

0 comments on commit 317fad2

Please sign in to comment.