Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bugfix: close to VM capacity allocation #334

Merged
merged 2 commits into from
Oct 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions include/metall/basic_manager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -902,9 +902,10 @@ class basic_manager {
/// \copydoc doc_thread_safe_alloc
///
/// \param nbytes Number of bytes to allocate. Must
/// be a multiple alignment. \param alignment Alignment size. Alignment must
/// be a power of two and satisfy [min allocation size, chunk size]. \return
/// Returns a pointer to the allocated memory.
/// be a multiple alignment.
/// \param alignment Alignment size. Alignment must be a power of two and
/// satisfy [min allocation size, system page size].
/// \return Returns a pointer to the allocated memory.
void *allocate_aligned(size_type nbytes, size_type alignment) noexcept {
if (!check_sanity()) {
return nullptr;
Expand Down
2 changes: 1 addition & 1 deletion include/metall/detail/mmap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ inline void *reserve_aligned_vm_region(const size_t alignment,
if (length % alignment != 0) {
std::stringstream ss;
ss << "length (" << length << ") is not a multiple of alignment ("
<< ::sysconf(_SC_PAGE_SIZE) << ")";
<< alignment << ")";
logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str());
return nullptr;
}
Expand Down
1 change: 1 addition & 0 deletions include/metall/kernel/manager_kernel_impl.ipp
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ void *manager_kernel<st, sst, cn, cs>::allocate_aligned(
}
assert(offset >= 0);

assert((std::ptrdiff_t)m_segment_storage.get_segment() % alignment == 0);
auto *addr = priv_to_address(offset);
assert((uint64_t)addr % alignment == 0);
return addr;
Expand Down
13 changes: 9 additions & 4 deletions include/metall/kernel/segment_allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ class segment_allocator {
/// \return The offset of an allocated memory.
/// On error, k_null_offset is returned.
difference_type allocate(const size_type nbytes) {
if (nbytes == 0) return k_null_offset;
const bin_no_type bin_no = bin_no_mngr::to_bin_no(nbytes);

const auto offset = (priv_small_object_bin(bin_no))
Expand All @@ -163,10 +164,10 @@ class segment_allocator {
/// within this segment, i.e., this function does not know the address this
/// segment is mapped to. \param nbytes A size to allocate. Must be a multiple
/// of alignment. \param alignment An alignment requirement. Alignment must be
/// a power of two and satisfy [min allocation size, chunk size]. \return On
/// success, returns the pointer to the beginning of newly allocated memory.
/// Returns k_null_offset, if the given arguments do not satisfy the
/// requirements above.
/// a power of two and satisfy [min allocation size, system page size].
/// \return On success, returns the pointer to the beginning of newly
/// allocated memory. Returns k_null_offset, if the given arguments do not
/// satisfy the requirements above.
difference_type allocate_aligned(const size_type nbytes,
const size_type alignment) {
// This aligned allocation algorithm assumes that all power of 2 numbers
Expand All @@ -193,6 +194,10 @@ class segment_allocator {
return k_null_offset;
}

// Internal allocation size must be a multiple of alignment
assert(bin_no_mngr::to_object_size(bin_no_mngr::to_bin_no(nbytes)) %
alignment == 0);

// As long as the above requirements are satisfied, just calling the normal
// allocate function is enough
const auto offset = allocate(nbytes);
Expand Down
90 changes: 60 additions & 30 deletions include/metall/kernel/segment_storage.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class segment_storage {
: m_system_page_size(other.m_system_page_size),
m_num_blocks(other.m_num_blocks),
m_vm_region_size(other.m_vm_region_size),
m_segment_capacity(other.m_segment_capacity),
m_current_segment_size(other.m_current_segment_size),
m_vm_region(other.m_vm_region),
m_segment(other.m_segment),
Expand All @@ -95,6 +96,7 @@ class segment_storage {
m_system_page_size = other.m_system_page_size;
m_num_blocks = other.m_num_blocks;
m_vm_region_size = other.m_vm_region_size;
m_segment_capacity = other.m_segment_capacity;
m_current_segment_size = other.m_current_segment_size;
m_vm_region = other.m_vm_region;
m_segment_header = other.m_segment_header;
Expand Down Expand Up @@ -256,14 +258,23 @@ class segment_storage {
return total_file_size;
}

std::size_t priv_aligment() const {
return std::max((size_t)m_system_page_size, (size_t)k_block_size);
std::size_t priv_round_up_to_block_size(const std::size_t nbytes) const {
const auto alignment =
std::max((size_t)m_system_page_size, (size_t)k_block_size);
return mdtl::round_up(nbytes, alignment);
}

std::size_t priv_round_down_to_block_size(const std::size_t nbytes) const {
const auto alignment =
std::max((size_t)m_system_page_size, (size_t)k_block_size);
return mdtl::round_down(nbytes, alignment);
}

void priv_clear_status() {
m_system_page_size = 0;
m_num_blocks = 0;
m_vm_region_size = 0;
m_segment_capacity = 0;
m_current_segment_size = 0;
m_vm_region = nullptr;
m_segment = nullptr;
Expand All @@ -277,9 +288,11 @@ class segment_storage {
}

bool priv_is_open() const {
// TODO: brush up this logic
return (check_sanity() && m_system_page_size > 0 && m_num_blocks > 0 &&
m_vm_region_size > 0 && m_current_segment_size > 0 && m_vm_region &&
m_segment && !m_top_path.empty() && !m_block_fd_list.empty());
m_vm_region_size > 0 && m_segment_capacity > 0 &&
m_current_segment_size > 0 && m_vm_region && m_segment &&
!m_top_path.empty() && !m_block_fd_list.empty());
}

static bool priv_copy(const path_type &source_path,
Expand Down Expand Up @@ -309,20 +322,44 @@ class segment_storage {
return false;
}

bool priv_prepare_header_and_segment(
const std::size_t segment_capacity_request) {
const auto header_size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
const auto vm_region_size =
header_size + priv_round_up_to_block_size(segment_capacity_request);
if (!priv_reserve_vm(vm_region_size)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
m_segment_capacity =
priv_round_down_to_block_size(m_vm_region_size - header_size);
assert(m_segment_capacity >= segment_capacity_request);
assert(m_segment_capacity + header_size <= m_vm_region_size);
priv_construct_segment_header(m_vm_region);

return true;
}

bool priv_reserve_vm(const std::size_t nbytes) {
m_vm_region_size =
mdtl::round_up((int64_t)nbytes, (int64_t)priv_aligment());
mdtl::round_up((int64_t)nbytes, (int64_t)m_system_page_size);
m_vm_region =
mdtl::reserve_aligned_vm_region(priv_aligment(), m_vm_region_size);
mdtl::reserve_aligned_vm_region(m_system_page_size, m_vm_region_size);

if (!m_vm_region) {
std::stringstream ss;
ss << "Cannot reserve a VM region " << nbytes << " bytes";
ss << "Cannot reserve a VM region " << m_vm_region_size << " bytes";
logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str());
m_vm_region_size = 0;
return false;
} else {
std::stringstream ss;
ss << "Reserved a VM region: " << m_vm_region_size << " bytes at "
<< (uint64_t)m_vm_region;
logger::out(logger::level::verbose, __FILE__, __LINE__, ss.str().c_str());
}
assert(reinterpret_cast<uint64_t>(m_vm_region) % priv_aligment() == 0);

return true;
}
Expand Down Expand Up @@ -352,8 +389,8 @@ class segment_storage {
return false;
}

const auto size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
if (mdtl::map_anonymous_write_mode(addr, size, MAP_FIXED) != addr) {
logger::out(logger::level::error, __FILE__, __LINE__,
"Cannot allocate segment header");
Expand All @@ -368,8 +405,8 @@ class segment_storage {

bool priv_deallocate_segment_header() {
std::destroy_at(&m_segment_header);
const auto size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto size = mdtl::round_up(sizeof(segment_header_type),
int64_t(m_system_page_size));
const auto ret = mdtl::munmap(m_segment_header, size, false);
m_segment_header = nullptr;
if (!ret) {
Expand All @@ -379,7 +416,8 @@ class segment_storage {
return ret;
}

bool priv_create(const path_type &top_path, const std::size_t capacity) {
bool priv_create(const path_type &top_path,
const std::size_t segment_capacity_request) {
if (!check_sanity()) return false;
if (is_open())
return false; // Cannot open multiple segments simultaneously.
Expand All @@ -398,15 +436,10 @@ class segment_storage {
}
}

const auto header_size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto vm_region_size = header_size + capacity;
if (!priv_reserve_vm(vm_region_size)) {
if (!priv_prepare_header_and_segment(segment_capacity_request)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
priv_construct_segment_header(m_vm_region);

m_top_path = top_path;
m_read_only = false;
Expand All @@ -431,7 +464,8 @@ class segment_storage {
return true;
}

bool priv_open(const path_type &top_path, const std::size_t capacity,
bool priv_open(const path_type &top_path,
const std::size_t segment_capacity_request,
const bool read_only) {
if (!check_sanity()) return false;
if (is_open())
Expand All @@ -442,16 +476,11 @@ class segment_storage {
logger::out(logger::level::verbose, __FILE__, __LINE__, s.c_str());
}

const auto header_size =
mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment()));
const auto vm_size =
header_size + ((read_only) ? priv_get_size(top_path) : capacity);
if (!priv_reserve_vm(vm_size)) {
if (!priv_prepare_header_and_segment(
read_only ? priv_get_size(top_path) : segment_capacity_request)) {
priv_set_broken_status();
return false;
}
m_segment = reinterpret_cast<char *>(m_vm_region) + header_size;
priv_construct_segment_header(m_vm_region);

m_top_path = top_path;
m_read_only = read_only;
Expand Down Expand Up @@ -516,7 +545,7 @@ class segment_storage {
return false;
}

if (request_size > m_vm_region_size) {
if (request_size > m_segment_capacity) {
logger::out(logger::level::error, __FILE__, __LINE__,
"Requested segment size is bigger than the reserved VM size");
return false;
Expand Down Expand Up @@ -587,7 +616,7 @@ class segment_storage {
assert(!path.empty());
assert(file_size > 0);
assert(segment_offset >= 0);
assert(segment_offset + file_size <= m_vm_region_size);
assert(segment_offset + file_size <= m_segment_capacity);

[[maybe_unused]] static constexpr int map_nosync =
#ifdef MAP_NOSYNC
Expand Down Expand Up @@ -628,7 +657,7 @@ class segment_storage {
assert(!path.empty());
assert(region_size > 0);
assert(segment_offset >= 0);
assert(segment_offset + region_size <= m_vm_region_size);
assert(segment_offset + region_size <= m_segment_capacity);

const auto map_addr = static_cast<char *>(m_segment) + segment_offset;
{
Expand Down Expand Up @@ -924,6 +953,7 @@ class segment_storage {
ssize_t m_system_page_size{0};
std::size_t m_num_blocks{0};
std::size_t m_vm_region_size{0};
std::size_t m_segment_capacity{0};
std::size_t m_current_segment_size{0};
void *m_vm_region{nullptr};
void *m_segment{nullptr};
Expand Down
5 changes: 3 additions & 2 deletions test/kernel/manager_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1154,10 +1154,11 @@ TEST(ManagerTest, AlignedAllocation) {
{
manager_type::remove(dir_path());
manager_type manager(metall::create_only, dir_path());
const std::size_t page_size = metall::mtlldetail::get_page_size();

for (std::size_t alignment = k_min_object_size; alignment <= k_chunk_size;
for (std::size_t alignment = k_min_object_size; alignment <= page_size;
alignment *= 2) {
for (std::size_t sz = alignment; sz <= k_chunk_size * 2;
for (std::size_t sz = alignment; sz <= page_size * 2;
sz += alignment) {
auto addr1 =
static_cast<char *>(manager.allocate_aligned(sz, alignment));
Expand Down
Loading