In efficient C ++ programming, we can see a good memory pool implementation solution. Here we share it and let's see what's wrong.
The Code is as follows:
Template <typename T>
Class CMemoryPool
{
Public:
Enum {EXPANSION_SIZE = 32 };
CMemoryPool (unsigned int nItemCount = EXPANSION_SIZE)
{
ExpandFreeList (nItemCount );
}
~ CMemoryPool ()
{
// Free all memory in the list
CMemoryPool <T> * pNext = NULL;
For (pNext = m_pFreeList; pNext! = NULL; pNext = m_pFreeList)
{
M_pFreeList = m_pFreeList-> m_pFreeList;
Delete [] (char *) pNext;
}
}
Void * Alloc (unsigned int/* size */)
{
If (m_pFreeList = NULL)
{
ExpandFreeList ();
}
// Get free memory from head
CMemoryPool <T> * pHead = m_pFreeList;
M_pFreeList = m_pFreeList-> m_pFreeList;
Return pHead;
}
Void Free (void * p)
{
// Push the free memory back to list
CMemoryPool <T> * pHead = static_cast <CMemoryPool <T> *> (p );
PHead-> m_pFreeList = m_pFreeList;
M_pFreeList = pHead;
}
Protected:
// Allocate memory and push to the list
Void ExpandFreeList (unsigned nItemCount = EXPANSION_SIZE)
{
Unsigned int nSize = sizeof (T)> sizeof (CMemoryPool <T> *)? Sizeof (T): sizeof (CMemoryPool <T> *);
CMemoryPool <T> * pLastItem = static_cast <CMemoryPool <T> *> (static_cast <void *> (new char [nSize]);
M_pFreeList = pLastItem;
For (int I = 0; I <nItemCount-1; ++ I)
{
PLastItem-> m_pFreeList = static_cast <CMemoryPool <T> *> (static_cast <void *> (new char [nSize]);
PLastItem = pLastItem-> m_pFreeList;
}
PLastItem-> m_pFreeList = NULL;
}
Private:
CMemoryPool <T> * m_pFreeList;
};
Its implementation idea is to retrieve the memory from the List header each time. If the memory cannot be obtained, a certain number of resources will be re-allocated. After the memory is used up, it will be put back to the List header, which is highly efficient, because every time the List can be obtained, it must be idle memory.
Of course, the above Code is only for a single thread. To support multithreading, it is easy to add a layer to the outside,
The Code is as follows:
Class CCriticalSection
{
Public:
CCriticalSection ()
{
InitializeCriticalSection (& m_cs );
}
~ CCriticalSection ()
{
DeleteCriticalSection (& m_cs );
}
Void Lock ()
{
EnterCriticalSection (& m_cs );
}
Void Unlock ()
{
LeaveCriticalSection (& m_cs );
}
Protected:
CRITICAL_SECTION m_cs;
};
Template <typename POOLTYPE, typename LOCKTYPE>
Class CMTMemoryPool
{
Public:
Void * Alloc (unsigned int size)
{
Void * p = NULL;
M_lock.Lock ();
P = m_pool.Alloc (size );
M_lock.Unlock ();
Return p;
}
Void Free (void * p)
{
M_lock.Lock ();
M_pool.Free (p );
M_lock.Unlock ();
}
Private:
POOLTYPE m_pool;
LOCKTYPE m_lock;
};
Here is my test code:
# Include <iostream>
# Include <windows. h>
Using namespace std;
# Include "MemoryPool. h"
# Include "MTMemoryPool. h"
Class CTest
{
Public:
Int m_n;
Int m_n1;
Void * operator new (size_t size)
{
Void * p = s_pool-> Alloc (size );
Return p;
}
Void operator delete (void * p, size_t size)
{
S_pool-> Free (p );
}
Static void NewPool ()
{
// S_pool = new CMemoryPool <CTest>;
S_pool = new CMTMemoryPool <CMemoryPool <CTest>, CCriticalSection>;
}
Static void DeletePool ()
{
Delete s_pool;
S_pool = NULL;
}
// Static CMemoryPool <CTest> * s_pool;
Static CMTMemoryPool <CMemoryPool <CTest>, CCriticalSection> * s_pool;
};
// CMemoryPool <CTest> * CTest: s_pool = NULL;
CMTMemoryPool <CMemoryPool <CTest>, CCriticalSection> * CTest: s_pool = NULL;
Void testFun ()
{
Int I;
Const int nLoop = 10;
Const int nCount = 10000;
For (int j = 0; j <nLoop; ++ j)
{
Typedef CTest * LPTest;
LPTest arData [nCount];
For (I = 0; I <nCount; ++ I)
{
ArData [I] = new CTest;
}
For (I = 0; I <nCount; ++ I)
{
Delete arData [I];
}
}
}
Int main (int argc, char * argv [])
{
{
Unsigned int dwStartTickCount = GetTickCount ();
CTest: NewPool ();
TestFun ();
CTest: DeletePool ();
Cout <"total cost" <GetTickCount ()-dwStartTickCount <endl;
}
System ("pause ");
Return 0;
}
The test result on my machine is N times more efficient than the default CRT.
From thick and thin hair