From abf6d86e7257893682103af61068fd861ca3ce93 Mon Sep 17 00:00:00 2001 From: iwabuchi Date: Thu, 7 Dec 2023 20:34:16 -0800 Subject: [PATCH] (wip] Delete VM operations to segment storage --- include/metall/basic_manager.hpp | 10 +- include/metall/defs.hpp | 2 +- include/metall/kernel/manager_kernel.hpp | 25 +- include/metall/kernel/manager_kernel_impl.ipp | 171 ++--------- include/metall/kernel/segment_allocator.hpp | 49 ++-- include/metall/kernel/segment_storage.hpp | 267 ++++++++++++------ include/metall/stl_allocator.hpp | 2 +- test/container/stl_allocator_test.cpp | 4 +- test/kernel/CMakeLists.txt | 2 +- ..._file_test.cpp => copy_datastore_test.cpp} | 13 + test/kernel/segment_storage_test.cpp | 66 +---- 11 files changed, 275 insertions(+), 336 deletions(-) rename test/kernel/{copy_file_test.cpp => copy_datastore_test.cpp} (85%) diff --git a/include/metall/basic_manager.hpp b/include/metall/basic_manager.hpp index c1206f2f..fd45270f 100644 --- a/include/metall/basic_manager.hpp +++ b/include/metall/basic_manager.hpp @@ -168,8 +168,12 @@ class basic_manager { } /// \brief Creates a new data store (an existing data store will be - /// overwritten). \param base_path Path to create a data store. \param - /// capacity Maximum total allocation size. + /// overwritten). + /// \param base_path Path to create a data store. + /// \param capacity Total allocation size. Metall uses this value as a hint. + // The actual limit could be smaller or larger than this value, depending on + // the internal implementation. However, a close minimum capacity should be + // available. basic_manager(create_only_t, const path_type &base_path, const size_type capacity) noexcept { try { @@ -1291,7 +1295,7 @@ class basic_manager { } try { return allocator_type(reinterpret_cast( - &(m_kernel->get_segment_header()->manager_kernel_address))); + &(m_kernel->get_segment_header().manager_kernel_address))); } catch (...) { logger::out(logger::level::error, __FILE__, __LINE__, "An exception has been thrown"); diff --git a/include/metall/defs.hpp b/include/metall/defs.hpp index 0d5604ed..f8385e72 100644 --- a/include/metall/defs.hpp +++ b/include/metall/defs.hpp @@ -49,7 +49,7 @@ #endif // -------------------- -// Macros for the default segment storage manager +// Macros for the default segment storage // -------------------- /// \def METALL_SEGMENT_BLOCK_SIZE diff --git a/include/metall/kernel/manager_kernel.hpp b/include/metall/kernel/manager_kernel.hpp index 024da0db..9ec8d0d3 100644 --- a/include/metall/kernel/manager_kernel.hpp +++ b/include/metall/kernel/manager_kernel.hpp @@ -93,15 +93,6 @@ class manager_kernel { static_assert(k_default_vm_reserve_size <= k_max_segment_size, "k_default_vm_reserve_size must be <= k_max_segment_size"); -#ifndef METALL_SEGMENT_BLOCK_SIZE -#error "METALL_SEGMENT_BLOCK_SIZE is not defined." -#endif - static constexpr size_type k_initial_segment_size = METALL_SEGMENT_BLOCK_SIZE; - static_assert(k_initial_segment_size <= k_default_vm_reserve_size, - "k_initial_segment_size must be <= k_default_vm_reserve_size"); - static_assert(k_chunk_size <= k_initial_segment_size, - "Chunk size must be <= k_initial_segment_size"); - using segment_header_type = segment_header; static constexpr size_type k_segment_header_size = mdtl::round_up(sizeof(segment_header_type), k_chunk_size); @@ -344,9 +335,9 @@ class manager_kernel { T *generic_construct(char_ptr_holder_type name, size_type num, bool try2find, bool do_throw, mdtl::in_place_interface &table); - /// \brief Get the address of the segment header. - /// \return Returns the address of the segment header. - const segment_header_type *get_segment_header() const; + /// \brief Return a reference to the segment header. + /// \return A reference to the segment header. + const segment_header_type &get_segment_header() const; /// \brief Get the address of the application segment segment. /// \return Returns the address of the application segment segment. @@ -481,7 +472,7 @@ class manager_kernel { // Private methods // -------------------- // - void priv_sanity_check() const; + void priv_check_sanity() const; bool priv_validate_runtime_configuration() const; difference_type priv_to_offset(const void *ptr) const; void *priv_to_address(difference_type offset) const; @@ -521,11 +512,6 @@ class manager_kernel { void priv_destruct_and_free_memory(difference_type offset, size_type length); // ---------- For segment ---------- // - bool priv_reserve_vm_region(size_type nbytes); - bool priv_release_vm_region(); - bool priv_allocate_segment_header(void *addr); - bool priv_deallocate_segment_header(); - bool priv_open(const path_type &base_path, bool read_only, size_type vm_reserve_size_request = 0); bool priv_create(const path_type &base_path, size_type vm_reserve_size); @@ -571,9 +557,6 @@ class manager_kernel { // -------------------- // bool m_good{false}; path_type m_base_path{}; - size_type m_vm_region_size{0}; - void *m_vm_region{nullptr}; - segment_header_type *m_segment_header{nullptr}; attributed_object_directory_type m_named_object_directory{}; attributed_object_directory_type m_unique_object_directory{}; attributed_object_directory_type m_anonymous_object_directory{}; diff --git a/include/metall/kernel/manager_kernel_impl.ipp b/include/metall/kernel/manager_kernel_impl.ipp index 698db23c..0ed49374 100644 --- a/include/metall/kernel/manager_kernel_impl.ipp +++ b/include/metall/kernel/manager_kernel_impl.ipp @@ -8,8 +8,7 @@ #include -namespace metall { -namespace kernel { +namespace metall::kernel { // -------------------- // // Constructor @@ -58,17 +57,15 @@ bool manager_kernel::open( template void manager_kernel::close() { - if (m_vm_region) { - priv_sanity_check(); + if (m_segment_storage.is_open()) { + priv_check_sanity(); if (!m_segment_storage.read_only()) { priv_serialize_management_data(); m_segment_storage.sync(true); } m_good = false; - m_segment_storage.destroy(); - priv_deallocate_segment_header(); - priv_release_vm_region(); + m_segment_storage.release(); if (!m_segment_storage.read_only()) { // This function must be called at the end @@ -79,14 +76,14 @@ void manager_kernel::close() { template void manager_kernel::flush(const bool synchronous) { - priv_sanity_check(); + priv_check_sanity(); m_segment_storage.sync(synchronous); } template void *manager_kernel::allocate( const manager_kernel::size_type nbytes) { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return nullptr; const auto offset = m_segment_memory_allocator.allocate(nbytes); @@ -102,7 +99,7 @@ template void *manager_kernel::allocate_aligned( const manager_kernel::size_type nbytes, const manager_kernel::size_type alignment) { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return nullptr; // This requirement could be removed, but it would need some work to do @@ -122,7 +119,7 @@ void *manager_kernel::allocate_aligned( template void manager_kernel::deallocate(void *const addr) { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return; if (!addr) return; m_segment_memory_allocator.deallocate(priv_to_offset(addr)); @@ -130,7 +127,7 @@ void manager_kernel::deallocate(void *const addr) { template bool manager_kernel::all_memory_deallocated() const { - priv_sanity_check(); + priv_check_sanity(); return m_segment_memory_allocator.all_memory_deallocated(); } @@ -138,7 +135,7 @@ template template std::pair::size_type> manager_kernel::find(char_ptr_holder_type name) const { - priv_sanity_check(); + priv_check_sanity(); if (name.is_anonymous()) { return std::make_pair(nullptr, 0); @@ -166,7 +163,7 @@ manager_kernel::find(char_ptr_holder_type name) const { template template bool manager_kernel::destroy(char_ptr_holder_type name) { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return false; if (name.is_anonymous()) { @@ -199,7 +196,7 @@ bool manager_kernel::destroy(char_ptr_holder_type name) { template template bool manager_kernel::destroy_ptr(const T *ptr) { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return false; size_type length = 0; @@ -425,14 +422,14 @@ template T *manager_kernel::generic_construct( char_ptr_holder_type name, const size_type num, const bool try2find, [[maybe_unused]] const bool do_throw, mdtl::in_place_interface &table) { - priv_sanity_check(); + priv_check_sanity(); return priv_generic_construct(name, num, try2find, table); } template -const typename manager_kernel::segment_header_type * +const typename manager_kernel::segment_header_type & manager_kernel::get_segment_header() const { - return reinterpret_cast(m_segment_header); + return m_segment_storage.get_segment_header(); } template @@ -613,14 +610,11 @@ bool manager_kernel::priv_create_datastore_directory( } template -void manager_kernel::priv_sanity_check() const { +void manager_kernel::priv_check_sanity() const { assert(m_good); assert(!m_base_path.empty()); - assert(m_vm_region_size > 0); - assert(m_vm_region); - assert(m_segment_header); + assert(m_segment_storage.check_sanity()); // TODO: add sanity check functions in other classes - assert(m_segment_storage.get_segment()); assert(m_manager_metadata); } @@ -693,76 +687,6 @@ bool manager_kernel::priv_unmark_properly_closed( storage::get_path(base_path, k_properly_closed_mark_file_name)); } -template -bool manager_kernel::priv_reserve_vm_region( - const size_type nbytes) { - // Align the VM region to the page size to decrease the implementation cost of - // some features, such as supporting Umap and aligned allocation - const auto alignment = k_chunk_size; - - assert(alignment > 0); - m_vm_region_size = mdtl::round_up((int64_t)nbytes, alignment); - m_vm_region = mdtl::reserve_aligned_vm_region(alignment, m_vm_region_size); - if (!m_vm_region) { - std::stringstream ss; - ss << "Cannot reserve a VM region " << nbytes << " bytes"; - logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); - m_vm_region_size = 0; - return false; - } - assert(reinterpret_cast(m_vm_region) % alignment == 0); - - return true; -} - -template -bool manager_kernel::priv_release_vm_region() { - if (!mdtl::munmap(m_vm_region, m_vm_region_size, false)) { - std::stringstream ss; - ss << "Cannot release a VM region " << (uint64_t)m_vm_region << ", " - << m_vm_region_size << " bytes."; - logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); - return false; - } - m_vm_region = nullptr; - m_vm_region_size = 0; - - return true; -} - -template -bool manager_kernel::priv_allocate_segment_header( - void *const addr) { - if (!addr) { - return false; - } - - if (mdtl::map_anonymous_write_mode(addr, k_segment_header_size, MAP_FIXED) != - addr) { - logger::out(logger::level::error, __FILE__, __LINE__, - "Cannot allocate segment header"); - return false; - } - m_segment_header = reinterpret_cast(addr); - - new (m_segment_header) segment_header_type(); - m_segment_header->manager_kernel_address = this; - - return true; -} - -template -bool manager_kernel::priv_deallocate_segment_header() { - std::destroy_at(&m_segment_header); - const auto ret = mdtl::munmap(m_segment_header, k_segment_header_size, false); - m_segment_header = nullptr; - if (!ret) { - logger::out(logger::level::error, __FILE__, __LINE__, - "Failed to deallocate segment header"); - } - return ret; -} - template template T *manager_kernel::priv_generic_construct( @@ -929,42 +853,23 @@ bool manager_kernel::priv_open( m_base_path = base_path; - const size_type segment_size = segment_storage::get_size(m_base_path); - const size_type vm_reserve_size = - (read_only) ? segment_size + k_segment_header_size - : std::max(segment_size + k_segment_header_size, - vm_reserve_size_request); - if (!priv_reserve_vm_region(vm_reserve_size)) { - return false; - } - - if (!priv_allocate_segment_header(m_vm_region)) { - priv_release_vm_region(); - return false; - } - // Clear the consistent mark before opening with the write mode if (!read_only && !priv_unmark_properly_closed(m_base_path)) { logger::out(logger::level::error, __FILE__, __LINE__, "Failed to erase the properly close mark before opening"); - priv_deallocate_segment_header(); - priv_release_vm_region(); return false; } - if (!m_segment_storage.open( - m_base_path, m_vm_region_size - k_segment_header_size, - static_cast(m_vm_region) + k_segment_header_size, - read_only)) { - priv_deallocate_segment_header(); - priv_release_vm_region(); + if (!m_segment_storage.open(m_base_path, vm_reserve_size_request, + read_only)) { + logger::out(logger::level::error, __FILE__, __LINE__, + "Failed to open the application data segment"); return false; } + m_segment_storage.get_segment_header().manager_kernel_address = this; if (!priv_deserialize_management_data()) { - m_segment_storage.destroy(); - priv_deallocate_segment_header(); - priv_release_vm_region(); + m_segment_storage.release(); return false; } @@ -1000,34 +905,19 @@ bool manager_kernel::priv_create( return false; } - if (!priv_reserve_vm_region(vm_reserve_size)) { - return false; - } - - if (!priv_allocate_segment_header(m_vm_region)) { - priv_release_vm_region(); - return false; - } - m_base_path = base_path; - if (!m_segment_storage.create( - m_base_path, m_vm_region_size - k_segment_header_size, - static_cast(m_vm_region) + k_segment_header_size, - k_initial_segment_size)) { + if (!m_segment_storage.create(m_base_path, vm_reserve_size)) { logger::out(logger::level::error, __FILE__, __LINE__, - "Cannot create application data segment"); - priv_deallocate_segment_header(); - priv_release_vm_region(); + "Cannot create an application data segment"); return false; } + m_segment_storage.get_segment_header().manager_kernel_address = this; if (!priv_set_uuid(m_manager_metadata.get()) || !priv_set_version(m_manager_metadata.get()) || !priv_write_management_metadata(m_base_path, *m_manager_metadata)) { - m_segment_storage.destroy(); - priv_deallocate_segment_header(); - priv_release_vm_region(); + m_segment_storage.release(); return false; } @@ -1037,7 +927,7 @@ bool manager_kernel::priv_create( // ---------- For serializing/deserializing ---------- // template bool manager_kernel::priv_serialize_management_data() { - priv_sanity_check(); + priv_check_sanity(); if (m_segment_storage.read_only()) return true; @@ -1120,7 +1010,7 @@ template bool manager_kernel::priv_snapshot( const path_type &destination_base_path, const bool clone, const int num_max_copy_threads) { - priv_sanity_check(); + priv_check_sanity(); priv_serialize_management_data(); if (!priv_create_datastore_directory(destination_base_path)) { @@ -1380,7 +1270,6 @@ bool manager_kernel::priv_write_description( return true; } -} // namespace kernel -} // namespace metall +} // namespace metall::kernel #endif // METALL_DETAIL_KERNEL_MANAGER_KERNEL_IMPL_IPP diff --git a/include/metall/kernel/segment_allocator.hpp b/include/metall/kernel/segment_allocator.hpp index a507f7c5..7e601cea 100644 --- a/include/metall/kernel/segment_allocator.hpp +++ b/include/metall/kernel/segment_allocator.hpp @@ -45,7 +45,7 @@ namespace mdtl = metall::mtlldetail; template + std::size_t _max_size, typename _segment_storage_type> class segment_allocator { public: // -------------------- // @@ -58,7 +58,7 @@ class segment_allocator { static constexpr size_type k_max_size = _max_size; static constexpr difference_type k_null_offset = std::numeric_limits::max(); - using data_store_type = _data_store_type; + using segment_storage_type = _segment_storage_type; private: // -------------------- // @@ -67,8 +67,9 @@ class segment_allocator { static_assert(k_max_size < std::numeric_limits::max(), "Max allocation size is too big"); - using myself = segment_allocator<_chunk_no_type, size_type, difference_type, - _chunk_size, _max_size, _data_store_type>; + using myself = + segment_allocator<_chunk_no_type, size_type, difference_type, _chunk_size, + _max_size, _segment_storage_type>; // For bin using bin_no_mngr = bin_number_manager; @@ -107,10 +108,10 @@ class segment_allocator { // -------------------- // // Constructor & assign operator // -------------------- // - explicit segment_allocator(data_store_type *data_store) + explicit segment_allocator(segment_storage_type *segment_storage) : m_non_full_chunk_bin(), m_chunk_directory(k_max_size / k_chunk_size), - m_data_store(data_store) + m_segment_storage(segment_storage) #ifndef METALL_DISABLE_OBJECT_CACHE , m_object_cache() @@ -504,11 +505,11 @@ class segment_allocator { const size_type num_chunks) { const size_type required_segment_size = (head_chunk_no + num_chunks) * k_chunk_size; - if (required_segment_size <= m_data_store->size()) { + if (required_segment_size <= m_segment_storage->size()) { return true; // Has an enough segment size already } - if (!m_data_store->extend(required_segment_size)) { + if (!m_segment_storage->extend(required_segment_size)) { std::stringstream ss; ss << "Failed to extend the segment to " << required_segment_size << " bytes"; @@ -584,8 +585,8 @@ class segment_allocator { const size_type min_free_size_hint) { // To simplify the implementation, free slots only when object_size is at // least double of the page size - const size_type min_free_size = - std::max((size_type)m_data_store->page_size() * 2, min_free_size_hint); + const size_type min_free_size = std::max( + (size_type)m_segment_storage->page_size() * 2, min_free_size_hint); if (object_size < min_free_size) return; // This function assumes that small objects are equal to or smaller than the @@ -596,45 +597,47 @@ class segment_allocator { chunk_no * k_chunk_size + slot_no * object_size; // Adjust the beginning of the range to free if it is not page aligned - if (range_begin % m_data_store->page_size() != 0) { + if (range_begin % m_segment_storage->page_size() != 0) { assert(slot_no > 0); // Assume that chunk is page aligned if (m_chunk_directory.marked_slot(chunk_no, slot_no - 1)) { // Round up to the next multiple of page size // The left region will be freed when the previous slot is freed - range_begin = mdtl::round_up(range_begin, m_data_store->page_size()); + range_begin = + mdtl::round_up(range_begin, m_segment_storage->page_size()); } else { // The previous slot is not used, so round down the range_begin to align // it with the page size - range_begin = mdtl::round_down(range_begin, m_data_store->page_size()); + range_begin = + mdtl::round_down(range_begin, m_segment_storage->page_size()); } } - assert(range_begin % m_data_store->page_size() == 0); + assert(range_begin % m_segment_storage->page_size() == 0); assert(range_begin / k_chunk_size == chunk_no); difference_type range_end = chunk_no * k_chunk_size + slot_no * object_size + object_size; // Adjust the end of the range to free if it is not page aligned // Use the same logic as range_begin - if (range_end % m_data_store->page_size() != 0) { + if (range_end % m_segment_storage->page_size() != 0) { // If this is the last slot of the chunk, the end position must be page // aligned assert(object_size * (slot_no + 1) < k_chunk_size); if (m_chunk_directory.marked_slot(chunk_no, slot_no + 1)) { - range_end = mdtl::round_down(range_end, m_data_store->page_size()); + range_end = mdtl::round_down(range_end, m_segment_storage->page_size()); } else { - range_end = mdtl::round_up(range_end, m_data_store->page_size()); + range_end = mdtl::round_up(range_end, m_segment_storage->page_size()); } } - assert(range_end % m_data_store->page_size() == 0); + assert(range_end % m_segment_storage->page_size() == 0); assert((range_end - 1) / k_chunk_size == chunk_no); assert(range_begin < range_end); const size_type free_size = range_end - range_begin; - assert(free_size % m_data_store->page_size() == 0); + assert(free_size % m_segment_storage->page_size() == 0); - m_data_store->free_region(range_begin, free_size); + m_segment_storage->free_region(range_begin, free_size); } void priv_deallocate_large_object(const chunk_no_type chunk_no, @@ -652,8 +655,8 @@ class segment_allocator { const size_type num_chunks) { const off_t offset = head_chunk_no * k_chunk_size; const size_type length = num_chunks * k_chunk_size; - assert(offset + length <= m_data_store->size()); - m_data_store->free_region(offset, length); + assert(offset + length <= m_segment_storage->size()); + m_segment_storage->free_region(offset, length); } // ---------- For object cache ---------- // @@ -702,7 +705,7 @@ class segment_allocator { // -------------------- // non_full_chunk_bin_type m_non_full_chunk_bin; chunk_directory_type m_chunk_directory; - data_store_type *m_data_store{nullptr}; + segment_storage_type *m_segment_storage{nullptr}; #ifndef METALL_DISABLE_OBJECT_CACHE small_object_cache_type m_object_cache; diff --git a/include/metall/kernel/segment_storage.hpp b/include/metall/kernel/segment_storage.hpp index c422aba5..51981e1b 100644 --- a/include/metall/kernel/segment_storage.hpp +++ b/include/metall/kernel/segment_storage.hpp @@ -3,8 +3,8 @@ // // SPDX-License-Identifier: (Apache-2.0 OR MIT) -#ifndef METALL_KERNEL_MMAP_DATA_STORAGE_HPP -#define METALL_KERNEL_MMAP_DATA_STORAGE_HPP +#ifndef METALL_KERNEL_SEGMENT_STORAGE_HPP +#define METALL_KERNEL_SEGMENT_STORAGE_HPP #include #include @@ -13,15 +13,16 @@ #include #include +#include "metall/defs.hpp" #include "metall/detail/file.hpp" #include "metall/detail/file_clone.hpp" #include "metall/detail/mmap.hpp" #include "metall/detail/utilities.hpp" #include "metall/logger.hpp" #include "metall/kernel/storage.hpp" +#include "metall/kernel/segment_header.hpp" -namespace metall { -namespace kernel { +namespace metall::kernel { namespace { namespace mdtl = metall::mtlldetail; @@ -31,8 +32,15 @@ class segment_storage { private: static constexpr const char *k_dir_name = "segment"; +#ifndef METALL_SEGMENT_BLOCK_SIZE +#error "METALL_SEGMENT_BLOCK_SIZE is not defined." +#endif + // TODO: check block size is a multiple of page size + static constexpr std::size_t k_block_size = METALL_SEGMENT_BLOCK_SIZE; + public: using path_type = storage::path_type; + using segment_header_type = segment_header; segment_storage() { #ifdef METALL_USE_ANONYMOUS_NEW_MAP @@ -50,7 +58,7 @@ class segment_storage { if (is_open()) { ret &= sync(true); if (!ret) { - ret &= destroy(); + ret &= release(); } } @@ -68,12 +76,13 @@ class segment_storage { m_num_blocks(other.m_num_blocks), m_vm_region_size(other.m_vm_region_size), m_current_segment_size(other.m_current_segment_size), + m_vm_region(other.m_vm_region), m_segment(other.m_segment), + m_segment_header(other.m_segment_header), m_top_path(other.m_top_path), m_read_only(other.m_read_only), m_free_file_space(other.m_free_file_space), - m_block_fd_list(std::move(other.m_block_fd_list)), - m_block_size(other.m_block_size) + m_block_fd_list(std::move(other.m_block_fd_list)) #ifdef METALL_USE_ANONYMOUS_NEW_MAP , m_anonymous_map_flag_list(other.m_anonymous_map_flag_list) @@ -87,12 +96,13 @@ class segment_storage { m_num_blocks = other.m_num_blocks; m_vm_region_size = other.m_vm_region_size; m_current_segment_size = other.m_current_segment_size; + m_vm_region = other.m_vm_region; + m_segment_header = other.m_segment_header; m_segment = other.m_segment; m_top_path = std::move(other.m_top_path); m_read_only = other.m_read_only; m_free_file_space = other.m_free_file_space; m_block_fd_list = std::move(other.m_block_fd_list); - m_block_size = other.m_block_size; #ifdef METALL_USE_ANONYMOUS_NEW_MAP m_anonymous_map_flag_list = std::move(other.m_anonymous_map_flag_list); #endif @@ -100,20 +110,6 @@ class segment_storage { return (*this); } - /// \brief Gets the size of an existing segment. - /// This is a static version of size() method. - /// \param base_path A path to a segment. - static std::size_t get_size(const path_type &base_path) { - return priv_get_size(priv_top_dir_path(base_path)); - } - - /// \brief Checks if a segment is openable. - /// \param base_path A path to a segment. - /// \return Return true if success; otherwise, false. - static bool openable(const path_type &base_path) { - return priv_openable(priv_top_dir_path(base_path)); - } - /// \brief Copies segment to another location. /// \param source_path A path to a source segment. /// \param destination_path A destination path. @@ -131,26 +127,25 @@ class segment_storage { /// \brief Creates a new segment. /// Calling this function fails if this class already manages an opened - /// segment. \base_path A base directory path to create a segment. \param - /// vm_region_size The size of a reserved VM region. \param vm_region The - /// address of a reserved VM region. \block_size The block size. \return + /// segment. + /// \base_path A base directory path to create a segment. + /// \param capacity A segment capacity to reserve. /// Return true if success; otherwise, false. - bool create(const path_type &base_path, const std::size_t vm_region_size, - void *const vm_region, const std::size_t block_size) { - return priv_create(priv_top_dir_path(base_path), vm_region_size, vm_region, - block_size); + bool create(const path_type &base_path, const std::size_t capacity) { + return priv_create(priv_top_dir_path(base_path), capacity); } /// \brief Opens an existing segment. /// Calling this function fails if this class already manages an opened - /// segment. \base_path A base directory path to create a segment. \param - /// vm_region_size The size of a VM region. \param vm_region The address of a - /// VM region. \param read_only If true, this segment is read only. \return - /// Return true if success; otherwise, false. - bool open(const path_type &base_path, const std::size_t vm_region_size, - void *const vm_region, const bool read_only) { - return priv_open(priv_top_dir_path(base_path), vm_region_size, vm_region, - read_only); + /// segment. + /// \param base_path A base directory path to open a segment. + /// \param capacity A segment capacity to reserve. + /// This value will is ignored if read_only is true. + /// \param read_only If true, this segment is read only. + /// \return Return true if success; otherwise, false. + bool open(const path_type &base_path, const std::size_t capacity, + const bool read_only) { + return priv_open(priv_top_dir_path(base_path), capacity, read_only); } /// \brief Extends the currently opened segment if necessary. @@ -161,9 +156,9 @@ class segment_storage { return priv_extend(request_size); } - /// \brief Destroys the segment --- the data will be lost. + /// \brief Releases the segment --- the data will be lost. /// To save data to files, sync() must be called beforehand. - bool destroy() { return priv_destroy_segment(); } + bool release() { return priv_release_segment(); } /// \brief Syncs the segment with backing files. /// \param sync If false is specified, this function returns before finishing @@ -196,6 +191,18 @@ class segment_storage { /// \return The address of the segment. void *get_segment() const { return m_segment; } + /// \brief Returns a reference to the segment header. + /// \return A reference to the segment header. + segment_header_type &get_segment_header() { + return *reinterpret_cast(m_vm_region); + } + + /// \brief Returns a reference to the segment header. + /// \return A reference to the segment header. + const segment_header_type &get_segment_header() const { + return *reinterpret_cast(m_vm_region); + } + /// \brief Returns the current size. /// \return The current segment size. std::size_t size() const { return m_current_segment_size; } @@ -249,12 +256,18 @@ class segment_storage { return total_file_size; } + std::size_t priv_aligment() const { + return std::max((size_t)m_system_page_size, (size_t)k_block_size); + } + void priv_clear_status() { m_system_page_size = 0; m_num_blocks = 0; m_vm_region_size = 0; m_current_segment_size = 0; + m_vm_region = nullptr; m_segment = nullptr; + m_segment_header = nullptr; // m_read_only must not be modified here. } @@ -265,9 +278,8 @@ class segment_storage { bool priv_is_open() const { return (check_sanity() && m_system_page_size > 0 && m_num_blocks > 0 && - m_vm_region_size > 0 && m_current_segment_size > 0 && m_segment && - !m_top_path.empty() && !m_block_fd_list.empty() && - m_block_size > 0); + m_vm_region_size > 0 && m_current_segment_size > 0 && m_vm_region && + m_segment && !m_top_path.empty() && !m_block_fd_list.empty()); } static bool priv_copy(const path_type &source_path, @@ -297,8 +309,71 @@ class segment_storage { return false; } - bool priv_create(const path_type &top_path, const std::size_t vm_region_size, - void *const vm_region, const std::size_t block_size) { + bool priv_reserve_vm(const std::size_t nbytes) { + m_vm_region_size = + mdtl::round_up((int64_t)nbytes, (int64_t)priv_aligment()); + m_vm_region = + mdtl::reserve_aligned_vm_region(priv_aligment(), m_vm_region_size); + + if (!m_vm_region) { + std::stringstream ss; + ss << "Cannot reserve a VM region " << nbytes << " bytes"; + logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); + m_vm_region_size = 0; + return false; + } + assert(reinterpret_cast(m_vm_region) % priv_aligment() == 0); + + return true; + } + + bool priv_release_vm_region() { + if (!mdtl::munmap(m_vm_region, m_vm_region_size, false)) { + std::stringstream ss; + ss << "Cannot release a VM region " << (uint64_t)m_vm_region << ", " + << m_vm_region_size << " bytes."; + logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); + return false; + } + m_vm_region = nullptr; + m_vm_region_size = 0; + + return true; + } + + bool priv_construct_segment_header(void *const addr) { + if (!addr) { + return false; + } + + const auto size = + mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + if (mdtl::map_anonymous_write_mode(addr, size, MAP_FIXED) != addr) { + logger::out(logger::level::error, __FILE__, __LINE__, + "Cannot allocate segment header"); + return false; + } + m_segment_header = reinterpret_cast(addr); + + new (m_segment_header) segment_header_type(); + + return true; + } + + bool priv_deallocate_segment_header() { + std::destroy_at(&m_segment_header); + const auto size = + mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + const auto ret = mdtl::munmap(m_segment_header, size, false); + m_segment_header = nullptr; + if (!ret) { + logger::out(logger::level::error, __FILE__, __LINE__, + "Failed to deallocate segment header"); + } + return ret; + } + + bool priv_create(const path_type &top_path, const std::size_t capacity) { if (!check_sanity()) return false; if (is_open()) return false; // Cannot open multiple segments simultaneously. @@ -317,27 +392,32 @@ class segment_storage { } } - m_block_size = - mdtl::round_up(std::min(vm_region_size, block_size), page_size()); + const auto header_size = + mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + const auto vm_region_size = header_size + capacity; + if (!priv_reserve_vm(vm_region_size)) { + priv_set_broken_status(); + return false; + } + m_segment = reinterpret_cast(m_vm_region) + header_size; + priv_construct_segment_header(m_vm_region); + m_top_path = top_path; - m_vm_region_size = mdtl::round_down(vm_region_size, page_size()); - m_segment = reinterpret_cast( - mdtl::round_up(reinterpret_cast(vm_region), page_size())); m_read_only = false; // Create the first block so that we can assume that there is a block always // in a segment. - if (!priv_create_new_map(m_top_path, 0, m_block_size, 0)) { + if (!priv_create_new_map(m_top_path, 0, k_block_size, 0)) { priv_set_broken_status(); return false; } - m_current_segment_size = m_block_size; + m_current_segment_size = k_block_size; m_num_blocks = 1; if (!priv_test_file_space_free(top_path)) { std::string s("Failed to test file space free: " + top_path.string()); logger::out(logger::level::error, __FILE__, __LINE__, s.c_str()); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } @@ -345,8 +425,8 @@ class segment_storage { return true; } - bool priv_open(const path_type &top_path, const std::size_t vm_region_size, - void *const vm_region, const bool read_only) { + bool priv_open(const path_type &top_path, const std::size_t capacity, + const bool read_only) { if (!check_sanity()) return false; if (is_open()) return false; // Cannot open multiple segments simultaneously. @@ -356,10 +436,18 @@ class segment_storage { logger::out(logger::level::info, __FILE__, __LINE__, s.c_str()); } + const auto header_size = + mdtl::round_up(sizeof(segment_header_type), int64_t(priv_aligment())); + const auto vm_size = + header_size + ((read_only) ? priv_get_size(top_path) : capacity); + if (!priv_reserve_vm(vm_size)) { + priv_set_broken_status(); + return false; + } + m_segment = reinterpret_cast(m_vm_region) + header_size; + priv_construct_segment_header(m_vm_region); + m_top_path = top_path; - m_vm_region_size = mdtl::round_down(vm_region_size, page_size()); - m_segment = reinterpret_cast( - mdtl::round_up(reinterpret_cast(vm_region), page_size())); m_read_only = read_only; // Maps block files one by one @@ -372,22 +460,22 @@ class segment_storage { const auto file_size = mdtl::get_file_size(file_name); assert(file_size % page_size() == 0); - if (m_block_size > 0 && m_block_size != (std::size_t)file_size) { + if (k_block_size != (std::size_t)file_size) { logger::out(logger::level::error, __FILE__, __LINE__, "File sizes are not the same"); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } - m_block_size = file_size; - const auto fd = priv_map_file(file_name, m_block_size, - m_current_segment_size, read_only); + const auto fd = + priv_map_file(file_name, k_block_size, + std::ptrdiff_t(m_current_segment_size), read_only); if (fd == -1) { std::stringstream ss; ss << "Failed to map a file " << file_name; logger::out(logger::level::error, __FILE__, __LINE__, ss.str().c_str()); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } @@ -395,14 +483,14 @@ class segment_storage { #ifdef METALL_USE_ANONYMOUS_NEW_MAP m_anonymous_map_flag_list.push_back(false); #endif - m_current_segment_size += m_block_size; + m_current_segment_size += k_block_size; ++m_num_blocks; } if (!read_only && !priv_test_file_space_free(m_top_path)) { std::string s("Failed to test file space free: " + m_top_path.string()); logger::out(logger::level::error, __FILE__, __LINE__, s.c_str()); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } @@ -433,16 +521,16 @@ class segment_storage { } while (m_current_segment_size < request_size) { - if (!priv_create_new_map(m_top_path, m_num_blocks, m_block_size, - m_current_segment_size)) { + if (!priv_create_new_map(m_top_path, m_num_blocks, k_block_size, + std::ptrdiff_t(m_current_segment_size))) { logger::out(logger::level::error, __FILE__, __LINE__, "Failed to extend the segment"); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } ++m_num_blocks; - m_current_segment_size += m_block_size; + m_current_segment_size += k_block_size; } return true; @@ -570,7 +658,7 @@ class segment_storage { return fd; } - bool priv_destroy_segment() { + bool priv_release_segment() { if (!is_open()) return false; int succeeded = true; @@ -578,13 +666,21 @@ class segment_storage { succeeded &= mdtl::os_close(fd); } + succeeded &= priv_deallocate_segment_header(); + // Destroy the mapping region by calling mmap with PROT_NONE over the // region. As the unmap system call syncs the data first, this approach is // significantly fast. succeeded &= mdtl::map_with_prot_none(m_segment, m_current_segment_size); - // NOTE: the VM region will be unmapped by another class + succeeded &= priv_release_vm_region(); - priv_clear_status(); + if (!succeeded) { + logger::out(logger::level::error, __FILE__, __LINE__, + "Failed to release the segment"); + priv_set_broken_status(); + } else { + priv_clear_status(); + } return succeeded; } @@ -642,8 +738,8 @@ class segment_storage { } #endif const auto map = - static_cast(m_segment) + block_no * m_block_size; - num_successes.fetch_add(mdtl::os_msync(map, m_block_size, sync) ? 1 + static_cast(m_segment) + block_no * k_block_size; + num_successes.fetch_add(mdtl::os_msync(map, k_block_size, sync) ? 1 : 0); } else { break; @@ -678,7 +774,7 @@ class segment_storage { if (offset + nbytes > m_current_segment_size) return false; #ifdef METALL_USE_ANONYMOUS_NEW_MAP - const auto block_no = offset / m_block_size; + const auto block_no = offset / k_block_size; assert(m_anonymous_map_flag_list.size() > block_no); if (m_anonymous_map_flag_list[block_no]) { return priv_uncommit_private_anonymous_pages(offset, nbytes); @@ -717,12 +813,12 @@ class segment_storage { logger::out(logger::level::info, __FILE__, __LINE__, s.c_str()); } - auto *const addr = static_cast(m_segment) + block_no * m_block_size; - if (::write(m_block_fd_list[block_no], addr, m_block_size) != - (ssize_t)m_block_size) { + auto *const addr = static_cast(m_segment) + block_no * k_block_size; + if (::write(m_block_fd_list[block_no], addr, k_block_size) != + (ssize_t)k_block_size) { std::string s("Failed to write back a block"); logger::perror(logger::level::error, __FILE__, __LINE__, s.c_str()); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } @@ -740,12 +836,12 @@ class segment_storage { 0; #endif const auto mapped_addr = - mdtl::map_file_write_mode(m_block_fd_list[block_no], addr, m_block_size, + mdtl::map_file_write_mode(m_block_fd_list[block_no], addr, k_block_size, 0, MAP_FIXED | map_nosync); if (!mapped_addr || mapped_addr != addr) { std::string s("Failed to map a block"); logger::out(logger::level::error, __FILE__, __LINE__, s.c_str()); - priv_destroy_segment(); + priv_release_segment(); priv_set_broken_status(); return false; } @@ -827,18 +923,19 @@ class segment_storage { std::size_t m_num_blocks{0}; std::size_t m_vm_region_size{0}; std::size_t m_current_segment_size{0}; + void *m_vm_region{nullptr}; void *m_segment{nullptr}; + segment_header_type *m_segment_header{nullptr}; path_type m_top_path; bool m_read_only{false}; bool m_free_file_space{true}; std::vector m_block_fd_list; - std::size_t m_block_size{0}; bool m_broken{false}; #ifdef METALL_USE_ANONYMOUS_NEW_MAP std::vector m_anonymous_map_flag_list; #endif }; -} // namespace kernel -} // namespace metall -#endif // METALL_KERNEL_MMAP_DATA_STORAGE_HPP +} // namespace metall::kernel + +#endif // METALL_KERNEL_SEGMENT_STORAGE_HPP diff --git a/include/metall/stl_allocator.hpp b/include/metall/stl_allocator.hpp index eb1297d1..d7aaff7f 100644 --- a/include/metall/stl_allocator.hpp +++ b/include/metall/stl_allocator.hpp @@ -165,7 +165,7 @@ class stl_allocator { "nullptr: cannot access to manager kernel"); throw std::bad_alloc(); } - auto manager_kernel = *get_pointer_to_manager_kernel(); + auto* manager_kernel = *get_pointer_to_manager_kernel(); if (!manager_kernel) { logger::out(logger::level::error, __FILE__, __LINE__, "nullptr: cannot access to manager kernel"); diff --git a/test/container/stl_allocator_test.cpp b/test/container/stl_allocator_test.cpp index 7935186a..1a3759fd 100644 --- a/test/container/stl_allocator_test.cpp +++ b/test/container/stl_allocator_test.cpp @@ -134,13 +134,11 @@ TEST(StlAllocatorTest, Exception) { alloc_type allocator = manager.get_allocator(); - ASSERT_NO_THROW({ allocator.deallocate(allocator.allocate(1), 1); }); - // Turn off log temporary because the following exception test cases could // show error messages metall::logger::set_log_level(metall::logger::level::critical); - ASSERT_THROW({ allocator.allocate(1UL << 24UL); }, std::bad_alloc); + ASSERT_NO_THROW({ allocator.deallocate(allocator.allocate(1), 1); }); ASSERT_THROW({ allocator.allocate(allocator.max_size() + 1); }, std::bad_array_new_length); diff --git a/test/kernel/CMakeLists.txt b/test/kernel/CMakeLists.txt index 349bcc8b..c28d067a 100644 --- a/test/kernel/CMakeLists.txt +++ b/test/kernel/CMakeLists.txt @@ -17,7 +17,7 @@ target_compile_definitions(manager_test_single_thread PRIVATE "METALL_DISABLE_CO add_metall_test_executable(snapshot_test snapshot_test.cpp) -add_metall_test_executable(copy_file_test copy_file_test.cpp) +add_metall_test_executable(copy_datastore_test copy_datastore_test.cpp) include(setup_omp) if (OpenMP_CXX_FOUND) diff --git a/test/kernel/copy_file_test.cpp b/test/kernel/copy_datastore_test.cpp similarity index 85% rename from test/kernel/copy_file_test.cpp rename to test/kernel/copy_datastore_test.cpp index 6a561f16..70a39f15 100644 --- a/test/kernel/copy_file_test.cpp +++ b/test/kernel/copy_datastore_test.cpp @@ -19,6 +19,15 @@ void create(const std::string &dir_path) { manager.construct("b")(2); } +void modify(const std::string &dir_path) { + metall::manager manager(metall::open_only, dir_path.c_str()); + + auto a = manager.find("a").first; + *a = 10; + auto b = manager.find("b").first; + *b = 20; +} + void open(const std::string &dir_path) { metall::manager manager(metall::open_read_only, dir_path.c_str()); @@ -48,6 +57,8 @@ TEST(CopyFileTest, SyncCopy) { ASSERT_TRUE(metall::manager::copy(original_dir_path().c_str(), copy_dir_path().c_str())); + modify(original_dir_path()); + open(copy_dir_path()); } @@ -61,6 +72,8 @@ TEST(CopyFileTest, AsyncCopy) { copy_dir_path().c_str()); ASSERT_TRUE(handler.get()); + modify(original_dir_path()); + open(copy_dir_path()); } } // namespace \ No newline at end of file diff --git a/test/kernel/segment_storage_test.cpp b/test/kernel/segment_storage_test.cpp index c3781ce8..d5692c25 100644 --- a/test/kernel/segment_storage_test.cpp +++ b/test/kernel/segment_storage_test.cpp @@ -33,14 +33,10 @@ TEST(MultifileSegmentStorageTest, PageSize) { TEST(MultifileSegmentStorageTest, Create) { constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); - { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size / 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); ASSERT_TRUE(data_storage.is_open()); ASSERT_TRUE(data_storage.check_sanity()); ASSERT_NE(data_storage.get_segment(), nullptr); @@ -54,8 +50,7 @@ TEST(MultifileSegmentStorageTest, Create) { { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size * 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); ASSERT_TRUE(data_storage.is_open()); ASSERT_TRUE(data_storage.check_sanity()); ASSERT_NE(data_storage.get_segment(), nullptr); @@ -65,94 +60,57 @@ TEST(MultifileSegmentStorageTest, Create) { ASSERT_EQ(buf[i], '1'); } } - - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); } TEST(MultifileSegmentStorageTest, GetSize) { constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); // vm_size < single_file_size { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size / 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); ASSERT_GE(data_storage.size(), vm_size / 2); - ASSERT_GE(segment_storage_type::get_size(test_file_prefix()), vm_size / 2); } // vm_size > single_file_size { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size * 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); ASSERT_GE(data_storage.size(), vm_size); - ASSERT_GE(segment_storage_type::get_size(test_file_prefix()), vm_size); } - - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); } TEST(MultifileSegmentStorageTest, Extend) { constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); - { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size / 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); // Has enough space already ASSERT_TRUE(data_storage.extend(vm_size / 2)); ASSERT_GE(data_storage.size(), vm_size / 2); - ASSERT_GE(segment_storage_type::get_size(test_file_prefix()), vm_size / 2); // Extend the space ASSERT_TRUE(data_storage.extend(vm_size)); ASSERT_GE(data_storage.size(), vm_size); - ASSERT_GE(segment_storage_type::get_size(test_file_prefix()), vm_size); auto buf = static_cast(data_storage.get_segment()); for (std::size_t i = 0; i < vm_size; ++i) { buf[i] = '1'; ASSERT_EQ(buf[i], '1'); } } - - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); -} - -TEST(MultifileSegmentStorageTest, Openable) { - constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); - { - prepare_test_dir(); - segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size)); - } - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); - - ASSERT_TRUE(segment_storage_type::openable(test_file_prefix())); - ASSERT_FALSE(segment_storage_type::openable(test_file_prefix() + "_dummy")); } TEST(MultifileSegmentStorageTest, Open) { constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); auto buf = static_cast(data_storage.get_segment()); for (std::size_t i = 0; i < vm_size; ++i) { buf[i] = '1'; @@ -163,7 +121,7 @@ TEST(MultifileSegmentStorageTest, Open) { // Open and Update { segment_storage_type data_storage; - ASSERT_TRUE(data_storage.open(test_file_prefix(), vm_size, addr, false)); + ASSERT_TRUE(data_storage.open(test_file_prefix(), vm_size, false)); ASSERT_TRUE(data_storage.is_open()); ASSERT_TRUE(data_storage.check_sanity()); ASSERT_FALSE(data_storage.read_only()); @@ -177,7 +135,7 @@ TEST(MultifileSegmentStorageTest, Open) { // Read only { segment_storage_type data_storage; - ASSERT_TRUE(data_storage.open(test_file_prefix(), vm_size, addr, true)); + ASSERT_TRUE(data_storage.open(test_file_prefix(), vm_size, true)); ASSERT_TRUE(data_storage.is_open()); ASSERT_TRUE(data_storage.check_sanity()); ASSERT_TRUE(data_storage.read_only()); @@ -186,20 +144,16 @@ TEST(MultifileSegmentStorageTest, Open) { ASSERT_EQ(buf[i], '2'); } } - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); } TEST(MultifileSegmentStorageTest, Sync) { constexpr std::size_t vm_size = 1ULL << 22ULL; - auto addr = metall::mtlldetail::reserve_vm_region(vm_size); - ASSERT_NE(addr, nullptr); { prepare_test_dir(); segment_storage_type data_storage; - ASSERT_TRUE( - data_storage.create(test_file_prefix(), vm_size, addr, vm_size / 2)); + ASSERT_TRUE(data_storage.create(test_file_prefix(), vm_size)); auto buf = static_cast(data_storage.get_segment()); for (std::size_t i = 0; i < vm_size / 2; ++i) { buf[i] = '1'; @@ -221,7 +175,5 @@ TEST(MultifileSegmentStorageTest, Sync) { ASSERT_EQ(buf[i], '2'); } } - - ASSERT_TRUE(metall::mtlldetail::munmap(addr, vm_size, true)); } } // namespace \ No newline at end of file