LCOV - code coverage report
Current view: top level - src/support - lockedpool.cpp (source / functions) Hit Total Coverage
Test: total_coverage.info Lines: 135 141 95.7 %
Date: 2025-02-23 09:33:43 Functions: 19 22 86.4 %

          Line data    Source code
       1             : // Copyright (c) 2016 The Bitcoin Core developers
       2             : // Distributed under the MIT software license, see the accompanying
       3             : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
       4             : 
       5             : #include "support/lockedpool.h"
       6             : #include "support/cleanse.h"
       7             : 
       8             : #if defined(HAVE_CONFIG_H)
       9             : #include "config/pivx-config.h"
      10             : #endif
      11             : 
      12             : #ifdef WIN32
      13             : #ifdef _WIN32_WINNT
      14             : #undef _WIN32_WINNT
      15             : #endif
      16             : #define _WIN32_WINNT 0x0501
      17             : #define WIN32_LEAN_AND_MEAN 1
      18             : #ifndef NOMINMAX
      19             : #define NOMINMAX
      20             : #endif
      21             : #include <windows.h>
      22             : #else
      23             : #include <sys/mman.h> // for mmap
      24             : #include <sys/resource.h> // for getrlimit
      25             : #include <limits.h> // for PAGESIZE
      26             : #include <unistd.h> // for sysconf
      27             : #endif
      28             : 
      29             : #include <algorithm>
      30             : #ifdef ARENA_DEBUG
      31             : #include <iomanip>
      32             : #include <iostream>
      33             : #endif
      34             : 
      35             : LockedPoolManager* LockedPoolManager::_instance = nullptr;
      36             : std::once_flag LockedPoolManager::init_flag;
      37             : 
      38             : /*******************************************************************************/
      39             : // Utilities
      40             : //
      41             : /** Align up to power of 2 */
      42     4066746 : static inline size_t align_up(size_t x, size_t align)
      43             : {
      44     4066746 :     return (x + align - 1) & ~(align - 1);
      45             : }
      46             : 
      47             : /*******************************************************************************/
      48             : // Implementation: Arena
      49             : 
      50         484 : Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
      51         484 :     base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
      52             : {
      53             :     // Start with one free chunk that covers the entire arena
      54         484 :     auto it = size_to_free_chunk.emplace(size_in, base);
      55         484 :     chunks_free.emplace(base, it);
      56         484 :     chunks_free_end.emplace(base + size_in, it);
      57         484 : }
      58             : 
      59         484 : Arena::~Arena()
      60             : {
      61         484 : }
      62             : 
      63     4065788 : void* Arena::alloc(size_t size)
      64             : {
      65             :     // Round to next multiple of alignment
      66     4065788 :     size = align_up(size, alignment);
      67             : 
      68             :     // Don't handle zero-sized chunks
      69     4065788 :     if (size == 0)
      70             :         return nullptr;
      71             : 
      72             :     // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
      73             :     // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
      74             :     // Wilson et. al. 1995, http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
      75             :     // policies seem to work well in practice.
      76     4065786 :     auto size_ptr_it = size_to_free_chunk.lower_bound(size);
      77     4065786 :     if (size_ptr_it == size_to_free_chunk.end())
      78             :         return nullptr;
      79             : 
      80             :     // Create the used-chunk, taking its space from the end of the free-chunk
      81     4065168 :     const size_t size_remaining = size_ptr_it->first - size;
      82     4065168 :     auto alloced = chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
      83     4065168 :     chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first);
      84     4065168 :     if (size_ptr_it->first == size) {
      85             :         // whole chunk is used up
      86     3774667 :         chunks_free.erase(size_ptr_it->second);
      87             :     } else {
      88             :         // still some memory left in the chunk
      89      290501 :         auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
      90      290501 :         chunks_free[size_ptr_it->second] = it_remaining;
      91      290501 :         chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
      92             :     }
      93     4065168 :     size_to_free_chunk.erase(size_ptr_it);
      94             : 
      95     4065168 :     return reinterpret_cast<void*>(alloced->first);
      96             : }
      97             : 
      98     4068543 : void Arena::free(void *ptr)
      99             : {
     100             :     // Freeing the nullptr pointer is OK.
     101     4068543 :     if (ptr == nullptr) {
     102        3373 :         return;
     103             :     }
     104             : 
     105             :     // Remove chunk from used map
     106     4065170 :     auto i = chunks_used.find(static_cast<char*>(ptr));
     107     4065170 :     if (i == chunks_used.end()) {
     108           2 :         throw std::runtime_error("Arena: invalid or double free");
     109             :     }
     110     4065168 :     std::pair<char*, size_t> freed = *i;
     111     4065168 :     chunks_used.erase(i);
     112             : 
     113             :     // coalesce freed with previous chunk
     114     4065168 :     auto prev = chunks_free_end.find(freed.first);
     115     4065168 :     if (prev != chunks_free_end.end()) {
     116      273983 :         freed.first -= prev->second->first;
     117      273983 :         freed.second += prev->second->first;
     118      273983 :         size_to_free_chunk.erase(prev->second);
     119      273983 :         chunks_free_end.erase(prev);
     120             :     }
     121             : 
     122             :     // coalesce freed with chunk after freed
     123     4065168 :     auto next = chunks_free.find(freed.first + freed.second);
     124     4065168 :     if (next != chunks_free.end()) {
     125       16518 :         freed.second += next->second->first;
     126       16518 :         size_to_free_chunk.erase(next->second);
     127       16518 :         chunks_free.erase(next);
     128             :     }
     129             : 
     130             :     // Add/set space with coalesced free chunk
     131     4065168 :     auto it = size_to_free_chunk.emplace(freed.second, freed.first);
     132     4065168 :     chunks_free[freed.first] = it;
     133     4065168 :     chunks_free_end[freed.first + freed.second] = it;
     134             : }
     135             : 
     136          35 : Arena::Stats Arena::stats() const
     137             : {
     138          35 :     Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
     139        1093 :     for (const auto& chunk: chunks_used)
     140        1058 :         r.used += chunk.second;
     141          74 :     for (const auto& chunk: chunks_free)
     142          39 :         r.free += chunk.second->first;
     143          35 :     r.total = r.used + r.free;
     144          35 :     return r;
     145             : }
     146             : 
     147             : #ifdef ARENA_DEBUG
     148             : static void printchunk(void* base, size_t sz, bool used) {
     149             :     std::cout <<
     150             :         "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
     151             :         " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
     152             :         " 0x" << used << std::endl;
     153             : }
     154             : void Arena::walk() const
     155             : {
     156             :     for (const auto& chunk: chunks_used)
     157             :         printchunk(chunk.first, chunk.second, true);
     158             :     std::cout << std::endl;
     159             :     for (const auto& chunk: chunks_free)
     160             :         printchunk(chunk.first, chunk.second->first, false);
     161             :     std::cout << std::endl;
     162             : }
     163             : #endif
     164             : 
     165             : /*******************************************************************************/
     166             : // Implementation: Win32LockedPageAllocator
     167             : 
     168             : #ifdef WIN32
     169             : /** LockedPageAllocator specialized for Windows.
     170             :  */
     171             : class Win32LockedPageAllocator: public LockedPageAllocator
     172             : {
     173             : public:
     174             :     Win32LockedPageAllocator();
     175             :     void* AllocateLocked(size_t len, bool *lockingSuccess);
     176             :     void FreeLocked(void* addr, size_t len);
     177             :     size_t GetLimit();
     178             : private:
     179             :     size_t page_size;
     180             : };
     181             : 
     182             : Win32LockedPageAllocator::Win32LockedPageAllocator()
     183             : {
     184             :     // Determine system page size in bytes
     185             :     SYSTEM_INFO sSysInfo;
     186             :     GetSystemInfo(&sSysInfo);
     187             :     page_size = sSysInfo.dwPageSize;
     188             : }
     189             : void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
     190             : {
     191             :     len = align_up(len, page_size);
     192             :     void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
     193             :     if (addr) {
     194             :         // VirtualLock is used to attempt to keep keying material out of swap. Note
     195             :         // that it does not provide this as a guarantee, but, in practice, memory
     196             :         // that has been VirtualLock'd almost never gets written to the pagefile
     197             :         // except in rare circumstances where memory is extremely low.
     198             :         *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
     199             :     }
     200             :     return addr;
     201             : }
     202             : void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
     203             : {
     204             :     len = align_up(len, page_size);
     205             :     memory_cleanse(addr, len);
     206             :     VirtualUnlock(const_cast<void*>(addr), len);
     207             : }
     208             : 
     209             : size_t Win32LockedPageAllocator::GetLimit()
     210             : {
     211             :     // TODO is there a limit on windows, how to get it?
     212             :     return std::numeric_limits<size_t>::max();
     213             : }
     214             : #endif
     215             : 
     216             : /*******************************************************************************/
     217             : // Implementation: PosixLockedPageAllocator
     218             : 
     219             : #ifndef WIN32
     220             : /** LockedPageAllocator specialized for OSes that don't try to be
     221             :  * special snowflakes.
     222             :  */
     223             : class PosixLockedPageAllocator: public LockedPageAllocator
     224             : {
     225             : public:
     226             :     PosixLockedPageAllocator();
     227             :     void* AllocateLocked(size_t len, bool *lockingSuccess);
     228             :     void FreeLocked(void* addr, size_t len);
     229             :     size_t GetLimit();
     230             : private:
     231             :     size_t page_size;
     232             : };
     233             : 
     234         480 : PosixLockedPageAllocator::PosixLockedPageAllocator()
     235             : {
     236             :     // Determine system page size in bytes
     237             : #if defined(PAGESIZE) // defined in limits.h
     238             :     page_size = PAGESIZE;
     239             : #else                   // assume some POSIX OS
     240         480 :     page_size = sysconf(_SC_PAGESIZE);
     241             : #endif
     242         480 : }
     243             : 
     244             : // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
     245             : // MAP_ANON which is deprecated
     246             : #ifndef MAP_ANONYMOUS
     247             : #define MAP_ANONYMOUS MAP_ANON
     248             : #endif
     249             : 
     250         480 : void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
     251             : {
     252         480 :     void *addr;
     253         480 :     len = align_up(len, page_size);
     254         480 :     addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
     255         480 :     if (addr == MAP_FAILED) {
     256             :         return nullptr;
     257             :     }
     258         480 :     if (addr) {
     259         480 :         *lockingSuccess = mlock(addr, len) == 0;
     260             : #if defined(MADV_DONTDUMP) // Linux
     261         480 :         madvise(addr, len, MADV_DONTDUMP);
     262             : #elif defined(MADV_NOCORE) // FreeBSD
     263             :         madvise(addr, len, MADV_NOCORE);
     264             : #endif
     265             :     }
     266             :     return addr;
     267             : }
     268         480 : void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
     269             : {
     270         480 :     len = align_up(len, page_size);
     271         480 :     memory_cleanse(addr, len);
     272         480 :     munlock(addr, len);
     273         480 :     munmap(addr, len);
     274         480 : }
     275         480 : size_t PosixLockedPageAllocator::GetLimit()
     276             : {
     277             : #ifdef RLIMIT_MEMLOCK
     278         480 :     struct rlimit rlim;
     279         480 :     if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
     280         480 :         if (rlim.rlim_cur != RLIM_INFINITY) {
     281         480 :             return rlim.rlim_cur;
     282             :         }
     283             :     }
     284             : #endif
     285             :     return std::numeric_limits<size_t>::max();
     286             : }
     287             : #endif
     288             : 
     289             : /*******************************************************************************/
     290             : // Implementation: LockedPool
     291             : 
     292         481 : LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
     293         481 :     allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
     294             : {
     295         481 : }
     296             : 
     297         481 : LockedPool::~LockedPool()
     298             : {
     299         481 : }
     300     4059937 : void* LockedPool::alloc(size_t size)
     301             : {
     302     8119864 :     std::lock_guard<std::mutex> lock(mutex);
     303             : 
     304             :     // Don't handle impossible sizes
     305     4059937 :     if (size == 0 || size > ARENA_SIZE)
     306             :         return nullptr;
     307             : 
     308             :     // Try allocating from each current arena
     309     4059944 :     for (auto &arena: arenas) {
     310     4059461 :         void *addr = arena.alloc(size);
     311     4059461 :         if (addr) {
     312     4059452 :             return addr;
     313             :         }
     314             :     }
     315             :     // If that fails, create a new one
     316         484 :     if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
     317         483 :         return arenas.back().alloc(size);
     318             :     }
     319             :     return nullptr;
     320             : }
     321             : 
     322     4059935 : void LockedPool::free(void *ptr)
     323             : {
     324     4059935 :     std::lock_guard<std::mutex> lock(mutex);
     325             :     // TODO we can do better than this linear search by keeping a map of arena
     326             :     // extents to arena, and looking up the address.
     327     4059941 :     for (auto &arena: arenas) {
     328     4059941 :         if (arena.addressInArena(ptr)) {
     329     4059935 :             arena.free(ptr);
     330     8119858 :             return;
     331             :         }
     332             :     }
     333           0 :     throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
     334             : }
     335             : 
     336          15 : LockedPool::Stats LockedPool::stats() const
     337             : {
     338          15 :     std::lock_guard<std::mutex> lock(mutex);
     339          15 :     LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
     340          30 :     for (const auto &arena: arenas) {
     341          15 :         Arena::Stats i = arena.stats();
     342          15 :         r.used += i.used;
     343          15 :         r.free += i.free;
     344          15 :         r.total += i.total;
     345          15 :         r.chunks_used += i.chunks_used;
     346          15 :         r.chunks_free += i.chunks_free;
     347             :     }
     348          15 :     return r;
     349             : }
     350             : 
     351         484 : bool LockedPool::new_arena(size_t size, size_t align)
     352             : {
     353         484 :     bool locked;
     354             :     // If this is the first arena, handle this specially: Cap the upper size
     355             :     // by the process limit. This makes sure that the first arena will at least
     356             :     // be locked. An exception to this is if the process limit is 0:
     357             :     // in this case no memory can be locked at all so we'll skip past this logic.
     358         484 :     if (arenas.empty()) {
     359         481 :         size_t limit = allocator->GetLimit();
     360         481 :         if (limit > 0) {
     361         481 :             size = std::min(size, limit);
     362             :         }
     363             :     }
     364         484 :     void *addr = allocator->AllocateLocked(size, &locked);
     365         484 :     if (!addr) {
     366             :         return false;
     367             :     }
     368         483 :     if (locked) {
     369         481 :         cumulative_bytes_locked += size;
     370           2 :     } else if (lf_cb) { // Call the locking-failed callback if locking failed
     371           0 :         if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
     372           0 :             allocator->FreeLocked(addr, size);
     373           0 :             return false;
     374             :         }
     375             :     }
     376         483 :     arenas.emplace_back(allocator.get(), addr, size, align);
     377         483 :     return true;
     378             : }
     379             : 
     380         483 : LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
     381         483 :     Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
     382             : {
     383         483 : }
     384         483 : LockedPool::LockedPageArena::~LockedPageArena()
     385             : {
     386         483 :     allocator->FreeLocked(base, size);
     387         483 : }
     388             : 
     389             : /*******************************************************************************/
     390             : // Implementation: LockedPoolManager
     391             : //
     392         480 : LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator):
     393         480 :     LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed)
     394             : {
     395         480 : }
     396             : 
     397           0 : bool LockedPoolManager::LockingFailed()
     398             : {
     399             :     // TODO: log something but how? without including util.h
     400           0 :     return true;
     401             : }
     402             : 
     403         480 : void LockedPoolManager::CreateInstance()
     404             : {
     405             :     // Using a local static instance guarantees that the object is initialized
     406             :     // when it's first needed and also deinitialized after all objects that use
     407             :     // it are done with it.  I can think of one unlikely scenario where we may
     408             :     // have a static deinitialization order/problem, but the check in
     409             :     // LockedPoolManagerBase's destructor helps us detect if that ever happens.
     410             : #ifdef WIN32
     411             :     std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
     412             : #else
     413         480 :     std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
     414             : #endif
     415         480 :     static LockedPoolManager instance(std::move(allocator));
     416         480 :     LockedPoolManager::_instance = &instance;
     417         480 : }

Generated by: LCOV version 1.14