LCOV - code coverage report
Current view: top level - src/support - lockedpool.h (source / functions) Hit Total Coverage
Test: total_coverage.info Lines: 5 5 100.0 %
Date: 2025-02-23 09:33:43 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright (c) 2016 The Bitcoin Core developers
       2             : // Distributed under the MIT software license, see the accompanying
       3             : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
       4             : 
       5             : #ifndef PIVX_SUPPORT_LOCKEDPOOL_H
       6             : #define PIVX_SUPPORT_LOCKEDPOOL_H
       7             : 
       8             : #include <list>
       9             : #include <map>
      10             : #include <memory>
      11             : #include <mutex>
      12             : #include <stdexcept>
      13             : #include <stdint.h>
      14             : #include <unordered_map>
      15             : 
      16             : /**
      17             :  * OS-dependent allocation and deallocation of locked/pinned memory pages.
      18             :  * Abstract base class.
      19             :  */
      20         481 : class LockedPageAllocator
      21             : {
      22             : public:
      23             :     virtual ~LockedPageAllocator() {}
      24             :     /** Allocate and lock memory pages.
      25             :      * If len is not a multiple of the system page size, it is rounded up.
      26             :      * Returns nullptr in case of allocation failure.
      27             :      *
      28             :      * If locking the memory pages could not be accomplished it will still
      29             :      * return the memory, however the lockingSuccess flag will be false.
      30             :      * lockingSuccess is undefined if the allocation fails.
      31             :      */
      32             :     virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
      33             : 
      34             :     /** Unlock and free memory pages.
      35             :      * Clear the memory before unlocking.
      36             :      */
      37             :     virtual void FreeLocked(void* addr, size_t len) = 0;
      38             : 
      39             :     /** Get the total limit on the amount of memory that may be locked by this
      40             :      * process, in bytes. Return size_t max if there is no limit or the limit
      41             :      * is unknown. Return 0 if no memory can be locked at all.
      42             :      */
      43             :     virtual size_t GetLimit() = 0;
      44             : };
      45             : 
      46             : /* An arena manages a contiguous region of memory by dividing it into
      47             :  * chunks.
      48             :  */
      49             : class Arena
      50             : {
      51             : public:
      52             :     Arena(void *base, size_t size, size_t alignment);
      53             :     virtual ~Arena();
      54             : 
      55             :     Arena(const Arena& other) = delete; // non construction-copyable
      56             :     Arena& operator=(const Arena&) = delete; // non copyable
      57             : 
      58             :     /** Memory statistics. */
      59             :     struct Stats
      60             :     {
      61             :         size_t used;
      62             :         size_t free;
      63             :         size_t total;
      64             :         size_t chunks_used;
      65             :         size_t chunks_free;
      66             :     };
      67             : 
      68             :     /** Allocate size bytes from this arena.
      69             :      * Returns pointer on success, or 0 if memory is full or
      70             :      * the application tried to allocate 0 bytes.
      71             :      */
      72             :     void* alloc(size_t size);
      73             : 
      74             :     /** Free a previously allocated chunk of memory.
      75             :      * Freeing the zero pointer has no effect.
      76             :      * Raises std::runtime_error in case of error.
      77             :      */
      78             :     void free(void *ptr);
      79             : 
      80             :     /** Get arena usage statistics */
      81             :     Stats stats() const;
      82             : 
      83             : #ifdef ARENA_DEBUG
      84             :     void walk() const;
      85             : #endif
      86             : 
      87             :     /** Return whether a pointer points inside this arena.
      88             :      * This returns base <= ptr < (base+size) so only use it for (inclusive)
      89             :      * chunk starting addresses.
      90             :      */
      91     4059941 :     bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
      92             : private:
      93             :     typedef std::multimap<size_t, char*> SizeToChunkSortedMap;
      94             :     /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */
      95             :     SizeToChunkSortedMap size_to_free_chunk;
      96             : 
      97             :     typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator> ChunkToSizeMap;
      98             :     /** Map from begin of free chunk to its node in size_to_free_chunk */
      99             :     ChunkToSizeMap chunks_free;
     100             :     /** Map from end of free chunk to its node in size_to_free_chunk */
     101             :     ChunkToSizeMap chunks_free_end;
     102             : 
     103             :     /** Map from begin of used chunk to its size */
     104             :     std::unordered_map<char*, size_t> chunks_used;
     105             : 
     106             :     /** Base address of arena */
     107             :     char* base;
     108             :     /** End address of arena */
     109             :     char* end;
     110             :     /** Minimum chunk alignment */
     111             :     size_t alignment;
     112             : };
     113             : 
     114             : /** Pool for locked memory chunks.
     115             :  *
     116             :  * To avoid sensitive key data from being swapped to disk, the memory in this pool
     117             :  * is locked/pinned.
     118             :  *
     119             :  * An arena manages a contiguous region of memory. The pool starts out with one arena
     120             :  * but can grow to multiple arenas if the need arises.
     121             :  *
     122             :  * Unlike a normal C heap, the administrative structures are separate from the managed
     123             :  * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
     124             :  * information, as to conserve precious locked memory. In some operating systems
     125             :  * the amount of memory that can be locked is small.
     126             :  */
     127             : class LockedPool
     128             : {
     129             : public:
     130             :     /** Size of one arena of locked memory. This is a compromise.
     131             :      * Do not set this too low, as managing many arenas will increase
     132             :      * allocation and deallocation overhead. Setting it too high allocates
     133             :      * more locked memory from the OS than strictly necessary.
     134             :      */
     135             :     static const size_t ARENA_SIZE = 256*1024;
     136             :     /** Chunk alignment. Another compromise. Setting this too high will waste
     137             :      * memory, setting it too low will facilitate fragmentation.
     138             :      */
     139             :     static const size_t ARENA_ALIGN = 16;
     140             : 
     141             :     /** Callback when allocation succeeds but locking fails.
     142             :      */
     143             :     typedef bool (*LockingFailed_Callback)();
     144             : 
     145             :     /** Memory statistics. */
     146             :     struct Stats
     147             :     {
     148             :         size_t used;
     149             :         size_t free;
     150             :         size_t total;
     151             :         size_t locked;
     152             :         size_t chunks_used;
     153             :         size_t chunks_free;
     154             :     };
     155             : 
     156             :     /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
     157             :      * you can only instantiate this with LockedPool(std::move(...)).
     158             :      *
     159             :      * The second argument is an optional callback when locking a newly allocated arena failed.
     160             :      * If this callback is provided and returns false, the allocation fails (hard fail), if
     161             :      * it returns true the allocation proceeds, but it could warn.
     162             :      */
     163             :     explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0);
     164             :     ~LockedPool();
     165             : 
     166             :     LockedPool(const LockedPool& other) = delete; // non construction-copyable
     167             :     LockedPool& operator=(const LockedPool&) = delete; // non copyable
     168             : 
     169             :     /** Allocate size bytes from this arena.
     170             :      * Returns pointer on success, or 0 if memory is full or
     171             :      * the application tried to allocate 0 bytes.
     172             :      */
     173             :     void* alloc(size_t size);
     174             : 
     175             :     /** Free a previously allocated chunk of memory.
     176             :      * Freeing the zero pointer has no effect.
     177             :      * Raises std::runtime_error in case of error.
     178             :      */
     179             :     void free(void *ptr);
     180             : 
     181             :     /** Get pool usage statistics */
     182             :     Stats stats() const;
     183             : private:
     184             :     std::unique_ptr<LockedPageAllocator> allocator;
     185             : 
     186             :     /** Create an arena from locked pages */
     187             :     class LockedPageArena: public Arena
     188             :     {
     189             :     public:
     190             :         LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
     191             :         ~LockedPageArena();
     192             :     private:
     193             :         void *base;
     194             :         size_t size;
     195             :         LockedPageAllocator *allocator;
     196             :     };
     197             : 
     198             :     bool new_arena(size_t size, size_t align);
     199             : 
     200             :     std::list<LockedPageArena> arenas;
     201             :     LockingFailed_Callback lf_cb;
     202             :     size_t cumulative_bytes_locked;
     203             :     /** Mutex protects access to this pool's data structures, including arenas.
     204             :      */
     205             :     mutable std::mutex mutex;
     206             : };
     207             : 
     208             : /**
     209             :  * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
     210             :  * std::allocator templates.
     211             :  *
     212             :  * Some implementations of the STL allocate memory in some constructors (i.e., see
     213             :  * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
     214             :  * Due to the unpredictable order of static initializers, we have to make sure the
     215             :  * LockedPoolManager instance exists before any other STL-based objects that use
     216             :  * secure_allocator are created. So instead of having LockedPoolManager also be
     217             :  * static-initialized, it is created on demand.
     218             :  */
     219             : class LockedPoolManager : public LockedPool
     220             : {
     221             : public:
     222             :     /** Return the current instance, or create it once */
     223     8119922 :     static LockedPoolManager& Instance()
     224             :     {
     225     8119922 :         std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
     226     8119922 :         return *LockedPoolManager::_instance;
     227             :     }
     228             : 
     229             : private:
     230             :     explicit LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
     231             : 
     232             :     /** Create a new LockedPoolManager specialized to the OS */
     233             :     static void CreateInstance();
     234             :     /** Called when locking fails, warn the user here */
     235             :     static bool LockingFailed();
     236             : 
     237             :     static LockedPoolManager* _instance;
     238             :     static std::once_flag init_flag;
     239             : };
     240             : 
     241             : #endif // PIVX_SUPPORT_LOCKEDPOOL_H

Generated by: LCOV version 1.14