From 317fad2e8b99b05fd34cb362edd2de4f8b8daff2 Mon Sep 17 00:00:00 2001 From: Keita Iwabuchi Date: Fri, 4 Oct 2024 20:48:25 -0700 Subject: [PATCH] Bugfix: close to VM capacity allocation Fixes an unexpected error that happens when the total allocation size is close to the reserved VM capacity. --- include/metall/kernel/segment_allocator.hpp | 1 + include/metall/kernel/segment_storage.hpp | 96 +++++++++++++-------- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/include/metall/kernel/segment_allocator.hpp b/include/metall/kernel/segment_allocator.hpp index 0e53f1c8..3ef3c1b9 100644 --- a/include/metall/kernel/segment_allocator.hpp +++ b/include/metall/kernel/segment_allocator.hpp @@ -148,6 +148,7 @@ class segment_allocator { /// \return The offset of an allocated memory. /// On error, k_null_offset is returned. difference_type allocate(const size_type nbytes) { + if (nbytes == 0) return k_null_offset; const bin_no_type bin_no = bin_no_mngr::to_bin_no(nbytes); const auto offset = (priv_small_object_bin(bin_no)) diff --git a/include/metall/kernel/segment_storage.hpp b/include/metall/kernel/segment_storage.hpp index 9e9b8fcd..bacad41c 100644 --- a/include/metall/kernel/segment_storage.hpp +++ b/include/metall/kernel/segment_storage.hpp @@ -75,6 +75,7 @@ class segment_storage { : m_system_page_size(other.m_system_page_size), m_num_blocks(other.m_num_blocks), m_vm_region_size(other.m_vm_region_size), + m_segment_capacity(other.m_segment_capacity), m_current_segment_size(other.m_current_segment_size), m_vm_region(other.m_vm_region), m_segment(other.m_segment), @@ -95,6 +96,7 @@ class segment_storage { m_system_page_size = other.m_system_page_size; m_num_blocks = other.m_num_blocks; m_vm_region_size = other.m_vm_region_size; + m_segment_capacity = other.m_segment_capacity; m_current_segment_size = other.m_current_segment_size; m_vm_region = other.m_vm_region; m_segment_header = other.m_segment_header; @@ -256,14 +258,23 @@ class segment_storage { return total_file_size; } - std::size_t priv_aligment() const { - return std::max((size_t)m_system_page_size, (size_t)k_block_size); + std::size_t priv_round_up_to_block_size(const std::size_t nbytes) const { + const auto alignment = + std::max((size_t)m_system_page_size, (size_t)k_block_size); + return mdtl::round_up(nbytes, alignment); + } + + std::size_t priv_round_down_to_block_size(const std::size_t nbytes) const { + const auto alignment = + std::max((size_t)m_system_page_size, (size_t)k_block_size); + return mdtl::round_down(nbytes, alignment); } void priv_clear_status() { m_system_page_size = 0; m_num_blocks = 0; m_vm_region_size = 0; + m_segment_capacity = 0; m_current_segment_size = 0; m_vm_region = nullptr; m_segment = nullptr; @@ -277,9 +288,11 @@ class segment_storage { } bool priv_is_open() const { + // TODO: brush up this logic return (check_sanity() && m_system_page_size > 0 && m_num_blocks > 0 && - m_vm_region_size > 0 && m_current_segment_size > 0 && m_vm_region && - m_segment && !m_top_path.empty() && !m_block_fd_list.empty()); + m_vm_region_size > 0 && m_segment_capacity > 0 && + m_current_segment_size > 0 && m_vm_region && m_segment && + !m_top_path.empty() && !m_block_fd_list.empty()); } static bool priv_copy(const path_type &source_path, @@ -309,20 +322,44 @@ class segment_storage { return false; } + bool priv_prepare_header_and_segment( + const std::size_t segment_capacity_request) { + const auto header_size = mdtl::round_up(sizeof(segment_header_type), + int64_t(m_system_page_size)); + const auto vm_region_size = + header_size + priv_round_up_to_block_size(segment_capacity_request); + if (!priv_reserve_vm(vm_region_size)) { + priv_set_broken_status(); + return false; + } + m_segment = reinterpret_cast(m_vm_region) + header_size; + m_segment_capacity = + priv_round_down_to_block_size(m_vm_region_size - header_size); + assert(m_segment_capacity >= segment_capacity_request); + assert(m_segment_capacity + header_size <= m_vm_region_size); + priv_construct_segment_header(m_vm_region); + + return true; + } + bool priv_reserve_vm(const std::size_t nbytes) { m_vm_region_size = - mdtl::round_up((int64_t)nbytes, (int64_t)priv_aligment()); + mdtl::round_up((int64_t)nbytes, (int64_t)m_system_page_size); m_vm_region = - mdtl::reserve_aligned_vm_region(priv_aligment(), m_vm_region_size); + mdtl::reserve_aligned_vm_region(m_system_page_size, m_vm_region_size); if (!m_vm_region) { std::stringstream ss; - ss << "Cannot reserve a VM region " << nbytes << " bytes"; + ss << "Cannot reserve a VM region " << m_vm_region_size << " bytes"; logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); m_vm_region_size = 0; return false; + } else { + std::stringstream ss; + ss << "Reserved a VM region: " << m_vm_region_size << " bytes at " + << (uint64_t)m_vm_region; + logger::out(logger::level::verbose, __FILE__, __LINE__, ss.str().c_str()); } - assert(reinterpret_cast(m_vm_region) % priv_aligment() == 0); return true; } @@ -352,8 +389,8 @@ class segment_storage { return false; } - const auto size = - mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + const auto size = mdtl::round_up(sizeof(segment_header_type), + int64_t(m_system_page_size)); if (mdtl::map_anonymous_write_mode(addr, size, MAP_FIXED) != addr) { logger::out(logger::level::error, __FILE__, __LINE__, "Cannot allocate segment header"); @@ -368,8 +405,8 @@ class segment_storage { bool priv_deallocate_segment_header() { std::destroy_at(&m_segment_header); - const auto size = - mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + const auto size = mdtl::round_up(sizeof(segment_header_type), + int64_t(m_system_page_size)); const auto ret = mdtl::munmap(m_segment_header, size, false); m_segment_header = nullptr; if (!ret) { @@ -379,7 +416,8 @@ class segment_storage { return ret; } - bool priv_create(const path_type &top_path, const std::size_t capacity) { + bool priv_create(const path_type &top_path, + const std::size_t segment_capacity_request) { if (!check_sanity()) return false; if (is_open()) return false; // Cannot open multiple segments simultaneously. @@ -398,15 +436,7 @@ class segment_storage { } } - const auto header_size = - mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); - const auto vm_region_size = header_size + capacity; - if (!priv_reserve_vm(vm_region_size)) { - priv_set_broken_status(); - return false; - } - m_segment = reinterpret_cast(m_vm_region) + header_size; - priv_construct_segment_header(m_vm_region); + priv_prepare_header_and_segment(segment_capacity_request); m_top_path = top_path; m_read_only = false; @@ -431,7 +461,8 @@ class segment_storage { return true; } - bool priv_open(const path_type &top_path, const std::size_t capacity, + bool priv_open(const path_type &top_path, + const std::size_t segment_capacity_request, const bool read_only) { if (!check_sanity()) return false; if (is_open()) @@ -442,16 +473,8 @@ class segment_storage { logger::out(logger::level::verbose, __FILE__, __LINE__, s.c_str()); } - const auto header_size = - mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); - const auto vm_size = - header_size + ((read_only) ? priv_get_size(top_path) : capacity); - if (!priv_reserve_vm(vm_size)) { - priv_set_broken_status(); - return false; - } - m_segment = reinterpret_cast(m_vm_region) + header_size; - priv_construct_segment_header(m_vm_region); + priv_prepare_header_and_segment(read_only ? priv_get_size(top_path) + : segment_capacity_request); m_top_path = top_path; m_read_only = read_only; @@ -516,7 +539,7 @@ class segment_storage { return false; } - if (request_size > m_vm_region_size) { + if (request_size > m_segment_capacity) { logger::out(logger::level::error, __FILE__, __LINE__, "Requested segment size is bigger than the reserved VM size"); return false; @@ -587,7 +610,7 @@ class segment_storage { assert(!path.empty()); assert(file_size > 0); assert(segment_offset >= 0); - assert(segment_offset + file_size <= m_vm_region_size); + assert(segment_offset + file_size <= m_segment_capacity); [[maybe_unused]] static constexpr int map_nosync = #ifdef MAP_NOSYNC @@ -628,7 +651,7 @@ class segment_storage { assert(!path.empty()); assert(region_size > 0); assert(segment_offset >= 0); - assert(segment_offset + region_size <= m_vm_region_size); + assert(segment_offset + region_size <= m_segment_capacity); const auto map_addr = static_cast(m_segment) + segment_offset; { @@ -924,6 +947,7 @@ class segment_storage { ssize_t m_system_page_size{0}; std::size_t m_num_blocks{0}; std::size_t m_vm_region_size{0}; + std::size_t m_segment_capacity{0}; std::size_t m_current_segment_size{0}; void *m_vm_region{nullptr}; void *m_segment{nullptr};