The hashmap uses the memorypool for allocating/deallocating its Nodes
(It is faster and it saves approximately 70kB the DS or other small devices will appreciate having) svn-id: r31321
This commit is contained in:
parent
411a588850
commit
a84d1ea78b
2 changed files with 38 additions and 6 deletions
|
@ -58,6 +58,12 @@
|
|||
#include "common/str.h"
|
||||
#include "common/util.h"
|
||||
|
||||
#define USE_HASHMAP_MEMORY_POOL
|
||||
#ifdef USE_HASHMAP_MEMORY_POOL
|
||||
#include "common/memorypool.h"
|
||||
#include <new>
|
||||
#endif
|
||||
|
||||
namespace Common {
|
||||
|
||||
// The table sizes ideally are primes. We use a helper function to find
|
||||
|
@ -70,6 +76,7 @@ uint nextTableSize(uint x);
|
|||
// hash table that is too small).
|
||||
//#define DEBUG_HASH_COLLISIONS
|
||||
|
||||
|
||||
/**
|
||||
* HashMap<Key,Val> maps objects of type Key to objects of type Val.
|
||||
* For each used Key type, we need an "uint hashit(Key,uint)" function
|
||||
|
@ -98,6 +105,20 @@ public:
|
|||
Node(const Key &key) : _key(key), _value() {}
|
||||
};
|
||||
|
||||
|
||||
#ifdef USE_HASHMAP_MEMORY_POOL
|
||||
MemoryPool _nodePool;
|
||||
|
||||
Node *allocNode(const Key& key) {
|
||||
void* mem = _nodePool.malloc();
|
||||
return new (mem) Node(key);
|
||||
}
|
||||
|
||||
void freeNode(Node* node) {
|
||||
node->~Node();
|
||||
_nodePool.free(node);
|
||||
}
|
||||
#else
|
||||
Node* allocNode(const Key& key) {
|
||||
return new Node(key);
|
||||
}
|
||||
|
@ -105,6 +126,7 @@ public:
|
|||
void freeNode(Node *node) {
|
||||
delete node;
|
||||
}
|
||||
#endif
|
||||
|
||||
Node **_arr; // hashtable of size arrsize.
|
||||
uint _arrsize, _nele;
|
||||
|
@ -328,8 +350,11 @@ public:
|
|||
* Base constructor, creates an empty hashmap.
|
||||
*/
|
||||
template <class Key, class Val, class HashFunc, class EqualFunc>
|
||||
HashMap<Key, Val, HashFunc, EqualFunc>::HashMap()
|
||||
: _defaultVal() {
|
||||
HashMap<Key, Val, HashFunc, EqualFunc>::HashMap() :
|
||||
#ifdef USE_HASHMAP_MEMORY_POOL
|
||||
_nodePool(sizeof(Node)),
|
||||
#endif
|
||||
_defaultVal() {
|
||||
_arrsize = nextTableSize(0);
|
||||
_arr = new Node *[_arrsize];
|
||||
assert(_arr != NULL);
|
||||
|
@ -349,8 +374,11 @@ HashMap<Key, Val, HashFunc, EqualFunc>::HashMap()
|
|||
* to heap buffers for the internal storage.
|
||||
*/
|
||||
template <class Key, class Val, class HashFunc, class EqualFunc>
|
||||
HashMap<Key, Val, HashFunc, EqualFunc>::HashMap(const HM_t& map)
|
||||
: _defaultVal() {
|
||||
HashMap<Key, Val, HashFunc, EqualFunc>::HashMap(const HM_t& map) :
|
||||
#ifdef USE_HASHMAP_MEMORY_POOL
|
||||
_nodePool(sizeof(Node)),
|
||||
#endif
|
||||
_defaultVal() {
|
||||
assign(map);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ MemoryPool::MemoryPool(size_t chunkSize) {
|
|||
|
||||
MemoryPool::~MemoryPool() {
|
||||
for(size_t i=0; i<_pages.size(); ++i)
|
||||
free(_pages[i]);
|
||||
::free(_pages[i]);
|
||||
}
|
||||
|
||||
void* MemoryPool::malloc() {
|
||||
|
@ -83,12 +83,16 @@ void MemoryPool::freeUnusedPages() {
|
|||
iterator = *(void**)iterator;
|
||||
}
|
||||
|
||||
size_t freedPagesCount = 0;
|
||||
for(size_t i=0; i<_pages.size(); ++i) {
|
||||
if(numberOfFreeChunksPerPage[i] == CHUNK_PAGE_SIZE) {
|
||||
free(_pages[i]);
|
||||
::free(_pages[i]);
|
||||
_pages[i] = NULL; // TODO : Remove NULL values
|
||||
}
|
||||
++freedPagesCount;
|
||||
}
|
||||
}
|
||||
|
||||
printf("%d freed pages\n", freedPagesCount);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue