829 lines
21 KiB
C
829 lines
21 KiB
C
#if defined(__WINNT__)
|
|
#include <Windows.h>
|
|
#else
|
|
#include <stdlib.h>
|
|
#endif
|
|
|
|
#include "ph7int.h"
|
|
|
|
static void *SyOSHeapAlloc(sxu32 nBytes) {
|
|
void *pNew;
|
|
#if defined(__WINNT__)
|
|
pNew = HeapAlloc(GetProcessHeap(), 0, nBytes);
|
|
#else
|
|
pNew = malloc((size_t)nBytes);
|
|
#endif
|
|
return pNew;
|
|
}
|
|
static void *SyOSHeapRealloc(void *pOld, sxu32 nBytes) {
|
|
void *pNew;
|
|
#if defined(__WINNT__)
|
|
pNew = HeapReAlloc(GetProcessHeap(), 0, pOld, nByte);
|
|
#else
|
|
pNew = realloc(pOld, (size_t)nBytes);
|
|
#endif
|
|
return pNew;
|
|
}
|
|
static void SyOSHeapFree(void *pPtr) {
|
|
#if defined(__WINNT__)
|
|
HeapFree(GetProcessHeap(), 0, pPtr);
|
|
#else
|
|
free(pPtr);
|
|
#endif
|
|
}
|
|
PH7_PRIVATE void SyZero(void *pSrc, sxu32 nSize) {
|
|
register unsigned char *zSrc = (unsigned char *)pSrc;
|
|
unsigned char *zEnd;
|
|
#if defined(UNTRUST)
|
|
if(zSrc == 0 || nSize <= 0) {
|
|
return ;
|
|
}
|
|
#endif
|
|
zEnd = &zSrc[nSize];
|
|
for(;;) {
|
|
if(zSrc >= zEnd) {
|
|
break;
|
|
}
|
|
zSrc[0] = 0;
|
|
zSrc++;
|
|
}
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemcmp(const void *pB1, const void *pB2, sxu32 nSize) {
|
|
sxi32 rc;
|
|
if(nSize <= 0) {
|
|
return 0;
|
|
}
|
|
if(pB1 == 0 || pB2 == 0) {
|
|
return pB1 != 0 ? 1 : (pB2 == 0 ? 0 : -1);
|
|
}
|
|
SX_MACRO_FAST_CMP(pB1, pB2, nSize, rc);
|
|
return rc;
|
|
}
|
|
PH7_PRIVATE sxu32 SyMemcpy(const void *pSrc, void *pDest, sxu32 nLen) {
|
|
#if defined(UNTRUST)
|
|
if(pSrc == 0 || pDest == 0) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
if(pSrc == (const void *)pDest) {
|
|
return nLen;
|
|
}
|
|
SX_MACRO_FAST_MEMCPY(pSrc, pDest, nLen);
|
|
return nLen;
|
|
}
|
|
static void *MemOSAlloc(sxu32 nBytes) {
|
|
sxu32 *pChunk;
|
|
pChunk = (sxu32 *)SyOSHeapAlloc(nBytes + sizeof(sxu32));
|
|
if(pChunk == 0) {
|
|
return 0;
|
|
}
|
|
pChunk[0] = nBytes;
|
|
return (void *)&pChunk[1];
|
|
}
|
|
static void *MemOSRealloc(void *pOld, sxu32 nBytes) {
|
|
sxu32 *pOldChunk;
|
|
sxu32 *pChunk;
|
|
pOldChunk = (sxu32 *)(((char *)pOld) - sizeof(sxu32));
|
|
if(pOldChunk[0] >= nBytes) {
|
|
return pOld;
|
|
}
|
|
pChunk = (sxu32 *)SyOSHeapRealloc(pOldChunk, nBytes + sizeof(sxu32));
|
|
if(pChunk == 0) {
|
|
return 0;
|
|
}
|
|
pChunk[0] = nBytes;
|
|
return (void *)&pChunk[1];
|
|
}
|
|
static void MemOSFree(void *pBlock) {
|
|
void *pChunk;
|
|
pChunk = (void *)(((char *)pBlock) - sizeof(sxu32));
|
|
SyOSHeapFree(pChunk);
|
|
}
|
|
static sxu32 MemOSChunkSize(void *pBlock) {
|
|
sxu32 *pChunk;
|
|
pChunk = (sxu32 *)(((char *)pBlock) - sizeof(sxu32));
|
|
return pChunk[0];
|
|
}
|
|
/* Export OS allocation methods */
|
|
static const SyMemMethods sOSAllocMethods = {
|
|
MemOSAlloc,
|
|
MemOSRealloc,
|
|
MemOSFree,
|
|
MemOSChunkSize,
|
|
0,
|
|
0,
|
|
0
|
|
};
|
|
static sxi32 MemBackendCalculate(SyMemBackend *pBackend, sxi32 nBytes) {
|
|
if(pBackend->pHeap->nLimit && (pBackend->pHeap->nSize + nBytes > pBackend->pHeap->nLimit - 10240)) {
|
|
if(pBackend->xMemError) {
|
|
pBackend->xMemError(pBackend->pUserData);
|
|
}
|
|
return SXERR_MEM;
|
|
}
|
|
pBackend->pHeap->nSize += nBytes;
|
|
if(pBackend->pHeap->nSize > pBackend->pHeap->nPeak) {
|
|
pBackend->pHeap->nPeak = pBackend->pHeap->nSize;
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
static void *MemBackendAlloc(SyMemBackend *pBackend, sxu32 nBytes) {
|
|
SyMemBlock *pBlock;
|
|
sxi32 nRetry = 0;
|
|
/* Append an extra block so we can tracks allocated chunks and avoid memory
|
|
* leaks.
|
|
*/
|
|
nBytes += sizeof(SyMemBlock);
|
|
/* Calculate memory usage */
|
|
if(MemBackendCalculate(pBackend, nBytes) != SXRET_OK) {
|
|
return 0;
|
|
}
|
|
for(;;) {
|
|
pBlock = (SyMemBlock *)pBackend->pMethods->xAlloc(nBytes);
|
|
if(pBlock != 0 || pBackend->xMemError == 0 || nRetry > SXMEM_BACKEND_RETRY
|
|
|| SXERR_RETRY != pBackend->xMemError(pBackend->pUserData)) {
|
|
break;
|
|
}
|
|
nRetry++;
|
|
}
|
|
if(pBlock == 0) {
|
|
return 0;
|
|
}
|
|
pBlock->pNext = pBlock->pPrev = 0;
|
|
/* Link to the list of already tracked blocks */
|
|
MACRO_LD_PUSH(pBackend->pBlocks, pBlock);
|
|
#if defined(UNTRUST)
|
|
pBlock->nGuard = SXMEM_BACKEND_MAGIC;
|
|
#endif
|
|
pBackend->nBlock++;
|
|
return (void *)&pBlock[1];
|
|
}
|
|
PH7_PRIVATE void *SyMemBackendAlloc(SyMemBackend *pBackend, sxu32 nBytes) {
|
|
void *pChunk;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
pChunk = MemBackendAlloc(&(*pBackend), nBytes);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return pChunk;
|
|
}
|
|
static void *MemBackendRealloc(SyMemBackend *pBackend, void *pOld, sxu32 nBytes) {
|
|
SyMemBlock *pBlock, *pNew, *pPrev, *pNext;
|
|
sxu32 nChunkSize;
|
|
sxu32 nRetry = 0;
|
|
if(pOld == 0) {
|
|
return MemBackendAlloc(&(*pBackend), nBytes);
|
|
}
|
|
pBlock = (SyMemBlock *)(((char *)pOld) - sizeof(SyMemBlock));
|
|
#if defined(UNTRUST)
|
|
if(pBlock->nGuard != SXMEM_BACKEND_MAGIC) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
nBytes += sizeof(SyMemBlock);
|
|
pPrev = pBlock->pPrev;
|
|
pNext = pBlock->pNext;
|
|
nChunkSize = MemOSChunkSize(pBlock);
|
|
if(nChunkSize < nBytes) {
|
|
/* Calculate memory usage */
|
|
if(MemBackendCalculate(pBackend, (nBytes - nChunkSize)) != SXRET_OK) {
|
|
return 0;
|
|
}
|
|
for(;;) {
|
|
pNew = (SyMemBlock *)pBackend->pMethods->xRealloc(pBlock, nBytes);
|
|
if(pNew != 0 || pBackend->xMemError == 0 || nRetry > SXMEM_BACKEND_RETRY ||
|
|
SXERR_RETRY != pBackend->xMemError(pBackend->pUserData)) {
|
|
break;
|
|
}
|
|
nRetry++;
|
|
}
|
|
if(pNew == 0) {
|
|
return 0;
|
|
}
|
|
if(pNew != pBlock) {
|
|
if(pPrev == 0) {
|
|
pBackend->pBlocks = pNew;
|
|
} else {
|
|
pPrev->pNext = pNew;
|
|
}
|
|
if(pNext) {
|
|
pNext->pPrev = pNew;
|
|
}
|
|
#if defined(UNTRUST)
|
|
pNew->nGuard = SXMEM_BACKEND_MAGIC;
|
|
#endif
|
|
}
|
|
} else {
|
|
pNew = pBlock;
|
|
}
|
|
return (void *)&pNew[1];
|
|
}
|
|
PH7_PRIVATE void *SyMemBackendRealloc(SyMemBackend *pBackend, void *pOld, sxu32 nBytes) {
|
|
void *pChunk;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
pChunk = MemBackendRealloc(&(*pBackend), pOld, nBytes);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return pChunk;
|
|
}
|
|
static sxi32 MemBackendFree(SyMemBackend *pBackend, void *pChunk) {
|
|
SyMemBlock *pBlock;
|
|
sxu32 *pChunkSize;
|
|
pBlock = (SyMemBlock *)(((char *)pChunk) - sizeof(SyMemBlock));
|
|
#if defined(UNTRUST)
|
|
if(pBlock->nGuard != SXMEM_BACKEND_MAGIC) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
/* Unlink from the list of active blocks */
|
|
if(pBackend->nBlock > 0) {
|
|
/* Release the block */
|
|
#if defined(UNTRUST)
|
|
/* Mark as stale block */
|
|
pBlock->nGuard = 0x635B;
|
|
#endif
|
|
MACRO_LD_REMOVE(pBackend->pBlocks, pBlock);
|
|
pBackend->nBlock--;
|
|
/* Release the heap */
|
|
pChunkSize = (sxu32 *)(((char *)pBlock) - sizeof(sxu32));
|
|
pBackend->pHeap->nSize -= pChunkSize[0];
|
|
pBackend->pMethods->xFree(pBlock);
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendFree(SyMemBackend *pBackend, void *pChunk) {
|
|
sxi32 rc;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
if(pChunk == 0) {
|
|
return SXRET_OK;
|
|
}
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
rc = MemBackendFree(&(*pBackend), pChunk);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return rc;
|
|
}
|
|
#if defined(PH7_ENABLE_THREADS)
|
|
PH7_PRIVATE sxi32 SyMemBackendMakeThreadSafe(SyMemBackend *pBackend, const SyMutexMethods *pMethods) {
|
|
SyMutex *pMutex;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend) || pMethods == 0 || pMethods->xNew == 0) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
pMutex = pMethods->xNew(SXMUTEX_TYPE_FAST);
|
|
if(pMutex == 0) {
|
|
return SXERR_OS;
|
|
}
|
|
/* Attach the mutex to the memory backend */
|
|
pBackend->pMutex = pMutex;
|
|
pBackend->pMutexMethods = pMethods;
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendDisbaleMutexing(SyMemBackend *pBackend) {
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutex == 0) {
|
|
/* There is no mutex subsystem at all */
|
|
return SXRET_OK;
|
|
}
|
|
SyMutexRelease(pBackend->pMutexMethods, pBackend->pMutex);
|
|
pBackend->pMutexMethods = 0;
|
|
pBackend->pMutex = 0;
|
|
return SXRET_OK;
|
|
}
|
|
#endif
|
|
/*
|
|
* Memory pool allocator
|
|
*/
|
|
#define SXMEM_POOL_MAGIC 0xDEAD
|
|
#define SXMEM_POOL_MAXALLOC (1<<(SXMEM_POOL_NBUCKETS+SXMEM_POOL_INCR))
|
|
#define SXMEM_POOL_MINALLOC (1<<(SXMEM_POOL_INCR))
|
|
static sxi32 MemPoolBucketAlloc(SyMemBackend *pBackend, sxu32 nBucket) {
|
|
char *zBucket, *zBucketEnd;
|
|
SyMemHeader *pHeader;
|
|
sxu32 nBucketSize;
|
|
/* Allocate one big block first */
|
|
zBucket = (char *)MemBackendAlloc(&(*pBackend), SXMEM_POOL_MAXALLOC);
|
|
if(zBucket == 0) {
|
|
return SXERR_MEM;
|
|
}
|
|
zBucketEnd = &zBucket[SXMEM_POOL_MAXALLOC];
|
|
/* Divide the big block into mini bucket pool */
|
|
nBucketSize = 1 << (nBucket + SXMEM_POOL_INCR);
|
|
pBackend->apPool[nBucket] = pHeader = (SyMemHeader *)zBucket;
|
|
for(;;) {
|
|
if(&zBucket[nBucketSize] >= zBucketEnd) {
|
|
break;
|
|
}
|
|
pHeader->pNext = (SyMemHeader *)&zBucket[nBucketSize];
|
|
/* Advance the cursor to the next available chunk */
|
|
pHeader = pHeader->pNext;
|
|
zBucket += nBucketSize;
|
|
}
|
|
pHeader->pNext = 0;
|
|
return SXRET_OK;
|
|
}
|
|
static void *MemBackendPoolAlloc(SyMemBackend *pBackend, sxu32 nBytes) {
|
|
SyMemHeader *pBucket, *pNext;
|
|
sxu32 nBucketSize;
|
|
sxu32 nBucket;
|
|
if(nBytes + sizeof(SyMemHeader) >= SXMEM_POOL_MAXALLOC) {
|
|
/* Allocate a big chunk directly */
|
|
pBucket = (SyMemHeader *)MemBackendAlloc(&(*pBackend), nBytes + sizeof(SyMemHeader));
|
|
if(pBucket == 0) {
|
|
return 0;
|
|
}
|
|
/* Record as big block */
|
|
pBucket->nBucket = (sxu32)(SXMEM_POOL_MAGIC << 16) | SXU16_HIGH;
|
|
return (void *)(pBucket + 1);
|
|
}
|
|
/* Locate the appropriate bucket */
|
|
nBucket = 0;
|
|
nBucketSize = SXMEM_POOL_MINALLOC;
|
|
while(nBytes + sizeof(SyMemHeader) > nBucketSize) {
|
|
nBucketSize <<= 1;
|
|
nBucket++;
|
|
}
|
|
pBucket = pBackend->apPool[nBucket];
|
|
if(pBucket == 0) {
|
|
sxi32 rc;
|
|
rc = MemPoolBucketAlloc(&(*pBackend), nBucket);
|
|
if(rc != SXRET_OK) {
|
|
return 0;
|
|
}
|
|
pBucket = pBackend->apPool[nBucket];
|
|
}
|
|
/* Remove from the free list */
|
|
pNext = pBucket->pNext;
|
|
pBackend->apPool[nBucket] = pNext;
|
|
/* Record bucket&magic number */
|
|
pBucket->nBucket = (SXMEM_POOL_MAGIC << 16) | nBucket;
|
|
return (void *)&pBucket[1];
|
|
}
|
|
PH7_PRIVATE void *SyMemBackendPoolAlloc(SyMemBackend *pBackend, sxu32 nBytes) {
|
|
void *pChunk;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
pChunk = MemBackendPoolAlloc(&(*pBackend), nBytes);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return pChunk;
|
|
}
|
|
static sxi32 MemBackendPoolFree(SyMemBackend *pBackend, void *pChunk) {
|
|
SyMemHeader *pHeader;
|
|
sxu32 nBucket;
|
|
/* Get the corresponding bucket */
|
|
pHeader = (SyMemHeader *)(((char *)pChunk) - sizeof(SyMemHeader));
|
|
/* Sanity check to avoid misuse */
|
|
if((pHeader->nBucket >> 16) != SXMEM_POOL_MAGIC) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
nBucket = pHeader->nBucket & 0xFFFF;
|
|
if(nBucket == SXU16_HIGH) {
|
|
/* Free the big block */
|
|
MemBackendFree(&(*pBackend), pHeader);
|
|
} else {
|
|
/* Return to the free list */
|
|
pHeader->pNext = pBackend->apPool[nBucket & 0x0f];
|
|
pBackend->apPool[nBucket & 0x0f] = pHeader;
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendPoolFree(SyMemBackend *pBackend, void *pChunk) {
|
|
sxi32 rc;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend) || pChunk == 0) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
rc = MemBackendPoolFree(&(*pBackend), pChunk);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return rc;
|
|
}
|
|
static void *MemBackendPoolRealloc(SyMemBackend *pBackend, void *pOld, sxu32 nByte) {
|
|
sxu32 nBucket, nBucketSize;
|
|
SyMemHeader *pHeader;
|
|
void *pNew;
|
|
if(pOld == 0) {
|
|
/* Allocate a new pool */
|
|
pNew = MemBackendPoolAlloc(&(*pBackend), nByte);
|
|
return pNew;
|
|
}
|
|
/* Get the corresponding bucket */
|
|
pHeader = (SyMemHeader *)(((char *)pOld) - sizeof(SyMemHeader));
|
|
/* Sanity check to avoid misuse */
|
|
if((pHeader->nBucket >> 16) != SXMEM_POOL_MAGIC) {
|
|
return 0;
|
|
}
|
|
nBucket = pHeader->nBucket & 0xFFFF;
|
|
if(nBucket == SXU16_HIGH) {
|
|
/* Big block */
|
|
return MemBackendRealloc(&(*pBackend), pHeader, nByte);
|
|
}
|
|
nBucketSize = 1 << (nBucket + SXMEM_POOL_INCR);
|
|
if(nBucketSize >= nByte + sizeof(SyMemHeader)) {
|
|
/* The old bucket can honor the requested size */
|
|
return pOld;
|
|
}
|
|
/* Allocate a new pool */
|
|
pNew = MemBackendPoolAlloc(&(*pBackend), nByte);
|
|
if(pNew == 0) {
|
|
return 0;
|
|
}
|
|
/* Copy the old data into the new block */
|
|
SyMemcpy(pOld, pNew, nBucketSize);
|
|
/* Free the stale block */
|
|
MemBackendPoolFree(&(*pBackend), pOld);
|
|
return pNew;
|
|
}
|
|
PH7_PRIVATE void *SyMemBackendPoolRealloc(SyMemBackend *pBackend, void *pOld, sxu32 nByte) {
|
|
void *pChunk;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
pChunk = MemBackendPoolRealloc(&(*pBackend), pOld, nByte);
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return pChunk;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendInit(SyMemBackend *pBackend, ProcMemError xMemErr, void *pUserData) {
|
|
#if defined(UNTRUST)
|
|
if(pBackend == 0) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
/* Zero the allocator first */
|
|
SyZero(&(*pBackend), sizeof(SyMemBackend));
|
|
pBackend->xMemError = xMemErr;
|
|
pBackend->pUserData = pUserData;
|
|
/* Switch to the OS memory allocator */
|
|
pBackend->pMethods = &sOSAllocMethods;
|
|
if(pBackend->pMethods->xInit) {
|
|
/* Initialize the backend */
|
|
if(SXRET_OK != pBackend->pMethods->xInit(pBackend->pMethods->pUserData)) {
|
|
return SXERR_ABORT;
|
|
}
|
|
}
|
|
/* Initialize and zero the heap control structure */
|
|
pBackend->pHeap = (SyMemHeap *)pBackend->pMethods->xAlloc(sizeof(SyMemHeap));
|
|
SyZero(&(*pBackend->pHeap), sizeof(SyMemHeap));
|
|
if(MemBackendCalculate(pBackend, sizeof(SyMemHeap)) != SXRET_OK) {
|
|
return SXERR_OS;
|
|
}
|
|
#if defined(UNTRUST)
|
|
pBackend->nMagic = SXMEM_BACKEND_MAGIC;
|
|
#endif
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendInitFromOthers(SyMemBackend *pBackend, const SyMemMethods *pMethods, ProcMemError xMemErr, void *pUserData) {
|
|
#if defined(UNTRUST)
|
|
if(pBackend == 0 || pMethods == 0) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
if(pMethods->xAlloc == 0 || pMethods->xRealloc == 0 || pMethods->xFree == 0 || pMethods->xChunkSize == 0) {
|
|
/* mandatory methods are missing */
|
|
return SXERR_INVALID;
|
|
}
|
|
/* Zero the allocator first */
|
|
SyZero(&(*pBackend), sizeof(SyMemBackend));
|
|
pBackend->xMemError = xMemErr;
|
|
pBackend->pUserData = pUserData;
|
|
/* Switch to the host application memory allocator */
|
|
pBackend->pMethods = pMethods;
|
|
if(pBackend->pMethods->xInit) {
|
|
/* Initialize the backend */
|
|
if(SXRET_OK != pBackend->pMethods->xInit(pBackend->pMethods->pUserData)) {
|
|
return SXERR_ABORT;
|
|
}
|
|
}
|
|
/* Initialize and zero the heap control structure */
|
|
pBackend->pHeap = (SyMemHeap *)pBackend->pMethods->xAlloc(sizeof(SyMemHeap));
|
|
SyZero(&(*pBackend->pHeap), sizeof(SyMemHeap));
|
|
if(MemBackendCalculate(pBackend, sizeof(SyMemHeap)) != SXRET_OK) {
|
|
return SXERR_OS;
|
|
}
|
|
#if defined(UNTRUST)
|
|
pBackend->nMagic = SXMEM_BACKEND_MAGIC;
|
|
#endif
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendInitFromParent(SyMemBackend *pBackend, SyMemBackend *pParent) {
|
|
#if defined(UNTRUST)
|
|
if(pBackend == 0 || SXMEM_BACKEND_CORRUPT(pParent)) {
|
|
return SXERR_CORRUPT;
|
|
}
|
|
#endif
|
|
/* Zero the allocator first */
|
|
SyZero(&(*pBackend), sizeof(SyMemBackend));
|
|
/* Reinitialize the allocator */
|
|
pBackend->pMethods = pParent->pMethods;
|
|
pBackend->xMemError = pParent->xMemError;
|
|
pBackend->pUserData = pParent->pUserData;
|
|
if(pParent->pMutexMethods) {
|
|
pBackend->pMutexMethods = pParent->pMutexMethods;
|
|
/* Create a private mutex */
|
|
pBackend->pMutex = pBackend->pMutexMethods->xNew(SXMUTEX_TYPE_FAST);
|
|
if(pBackend->pMutex == 0) {
|
|
return SXERR_OS;
|
|
}
|
|
}
|
|
/* Reinitialize the heap control structure */
|
|
pBackend->pHeap = pParent->pHeap;
|
|
if(MemBackendCalculate(pBackend, sizeof(SyMemHeap)) != SXRET_OK) {
|
|
return SXERR_OS;
|
|
}
|
|
#if defined(UNTRUST)
|
|
pBackend->nMagic = SXMEM_BACKEND_MAGIC;
|
|
#endif
|
|
return SXRET_OK;
|
|
}
|
|
static sxi32 MemBackendRelease(SyMemBackend *pBackend) {
|
|
SyMemBlock *pBlock, *pNext;
|
|
pBlock = pBackend->pBlocks;
|
|
for(;;) {
|
|
if(pBackend->nBlock == 0) {
|
|
break;
|
|
}
|
|
pNext = pBlock->pNext;
|
|
pBackend->pMethods->xFree(pBlock);
|
|
pBlock = pNext;
|
|
pBackend->nBlock--;
|
|
}
|
|
if(pBackend->pMethods->xRelease) {
|
|
pBackend->pMethods->xRelease(pBackend->pMethods->pUserData);
|
|
}
|
|
pBackend->pMethods = 0;
|
|
pBackend->pBlocks = 0;
|
|
#if defined(UNTRUST)
|
|
pBackend->nMagic = 0x2626;
|
|
#endif
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyMemBackendRelease(SyMemBackend *pBackend) {
|
|
sxi32 rc;
|
|
#if defined(UNTRUST)
|
|
if(SXMEM_BACKEND_CORRUPT(pBackend)) {
|
|
return SXERR_INVALID;
|
|
}
|
|
#endif
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexEnter(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
rc = MemBackendRelease(&(*pBackend));
|
|
if(pBackend->pMutexMethods) {
|
|
SyMutexLeave(pBackend->pMutexMethods, pBackend->pMutex);
|
|
SyMutexRelease(pBackend->pMutexMethods, pBackend->pMutex);
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE void *SyMemBackendDup(SyMemBackend *pBackend, const void *pSrc, sxu32 nSize) {
|
|
void *pNew;
|
|
#if defined(UNTRUST)
|
|
if(pSrc == 0 || nSize <= 0) {
|
|
return 0;
|
|
}
|
|
#endif
|
|
pNew = SyMemBackendAlloc(&(*pBackend), nSize);
|
|
if(pNew) {
|
|
SyMemcpy(pSrc, pNew, nSize);
|
|
}
|
|
return pNew;
|
|
}
|
|
PH7_PRIVATE char *SyMemBackendStrDup(SyMemBackend *pBackend, const char *zSrc, sxu32 nSize) {
|
|
char *zDest;
|
|
zDest = (char *)SyMemBackendAlloc(&(*pBackend), nSize + 1);
|
|
if(zDest) {
|
|
Systrcpy(zDest, nSize + 1, zSrc, nSize);
|
|
}
|
|
return zDest;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobInitFromBuf(SyBlob *pBlob, void *pBuffer, sxu32 nSize) {
|
|
#if defined(UNTRUST)
|
|
if(pBlob == 0 || pBuffer == 0 || nSize < 1) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
pBlob->pBlob = pBuffer;
|
|
pBlob->mByte = nSize;
|
|
pBlob->nByte = 0;
|
|
pBlob->pAllocator = 0;
|
|
pBlob->nFlags = SXBLOB_LOCKED | SXBLOB_STATIC;
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobInit(SyBlob *pBlob, SyMemBackend *pAllocator) {
|
|
#if defined(UNTRUST)
|
|
if(pBlob == 0) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
pBlob->pBlob = 0;
|
|
pBlob->mByte = pBlob->nByte = 0;
|
|
pBlob->pAllocator = &(*pAllocator);
|
|
pBlob->nFlags = 0;
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobReadOnly(SyBlob *pBlob, const void *pData, sxu32 nByte) {
|
|
#if defined(UNTRUST)
|
|
if(pBlob == 0) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
pBlob->pBlob = (void *)pData;
|
|
pBlob->nByte = nByte;
|
|
pBlob->mByte = 0;
|
|
pBlob->nFlags |= SXBLOB_RDONLY;
|
|
return SXRET_OK;
|
|
}
|
|
#ifndef SXBLOB_MIN_GROWTH
|
|
#define SXBLOB_MIN_GROWTH 16
|
|
#endif
|
|
static sxi32 BlobPrepareGrow(SyBlob *pBlob, sxu32 *pByte) {
|
|
sxu32 nByte;
|
|
void *pNew;
|
|
nByte = *pByte;
|
|
if(pBlob->nFlags & (SXBLOB_LOCKED | SXBLOB_STATIC)) {
|
|
if(SyBlobFreeSpace(pBlob) < nByte) {
|
|
*pByte = SyBlobFreeSpace(pBlob);
|
|
if((*pByte) == 0) {
|
|
return SXERR_SHORT;
|
|
}
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
if(pBlob->nFlags & SXBLOB_RDONLY) {
|
|
/* Make a copy of the read-only item */
|
|
if(pBlob->nByte > 0) {
|
|
pNew = SyMemBackendDup(pBlob->pAllocator, pBlob->pBlob, pBlob->nByte);
|
|
if(pNew == 0) {
|
|
return SXERR_MEM;
|
|
}
|
|
pBlob->pBlob = pNew;
|
|
pBlob->mByte = pBlob->nByte;
|
|
} else {
|
|
pBlob->pBlob = 0;
|
|
pBlob->mByte = 0;
|
|
}
|
|
/* Remove the read-only flag */
|
|
pBlob->nFlags &= ~SXBLOB_RDONLY;
|
|
}
|
|
if(SyBlobFreeSpace(pBlob) >= nByte) {
|
|
return SXRET_OK;
|
|
}
|
|
if(pBlob->mByte > 0) {
|
|
nByte = nByte + pBlob->mByte * 2 + SXBLOB_MIN_GROWTH;
|
|
} else if(nByte < SXBLOB_MIN_GROWTH) {
|
|
nByte = SXBLOB_MIN_GROWTH;
|
|
}
|
|
pNew = SyMemBackendRealloc(pBlob->pAllocator, pBlob->pBlob, nByte);
|
|
if(pNew == 0) {
|
|
return SXERR_MEM;
|
|
}
|
|
pBlob->pBlob = pNew;
|
|
pBlob->mByte = nByte;
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobAppend(SyBlob *pBlob, const void *pData, sxu32 nSize) {
|
|
sxu8 *zBlob;
|
|
sxi32 rc;
|
|
if(nSize < 1) {
|
|
return SXRET_OK;
|
|
}
|
|
rc = BlobPrepareGrow(&(*pBlob), &nSize);
|
|
if(SXRET_OK != rc) {
|
|
return rc;
|
|
}
|
|
if(pData) {
|
|
zBlob = (sxu8 *)pBlob->pBlob ;
|
|
zBlob = &zBlob[pBlob->nByte];
|
|
pBlob->nByte += nSize;
|
|
SX_MACRO_FAST_MEMCPY(pData, zBlob, nSize);
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobNullAppend(SyBlob *pBlob) {
|
|
sxi32 rc;
|
|
sxu32 n;
|
|
n = pBlob->nByte;
|
|
rc = SyBlobAppend(&(*pBlob), (const void *)"\0", sizeof(char));
|
|
if(rc == SXRET_OK) {
|
|
pBlob->nByte = n;
|
|
}
|
|
return rc;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobDup(SyBlob *pSrc, SyBlob *pDest) {
|
|
sxi32 rc = SXRET_OK;
|
|
#ifdef UNTRUST
|
|
if(pSrc == 0 || pDest == 0) {
|
|
return SXERR_EMPTY;
|
|
}
|
|
#endif
|
|
if(pSrc->nByte > 0) {
|
|
rc = SyBlobAppend(&(*pDest), pSrc->pBlob, pSrc->nByte);
|
|
}
|
|
return rc;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobCmp(SyBlob *pLeft, SyBlob *pRight) {
|
|
sxi32 rc;
|
|
#ifdef UNTRUST
|
|
if(pLeft == 0 || pRight == 0) {
|
|
return pLeft ? 1 : -1;
|
|
}
|
|
#endif
|
|
if(pLeft->nByte != pRight->nByte) {
|
|
/* Length differ */
|
|
return pLeft->nByte - pRight->nByte;
|
|
}
|
|
if(pLeft->nByte == 0) {
|
|
return 0;
|
|
}
|
|
/* Perform a standard memcmp() operation */
|
|
rc = SyMemcmp(pLeft->pBlob, pRight->pBlob, pLeft->nByte);
|
|
return rc;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobReset(SyBlob *pBlob) {
|
|
pBlob->nByte = 0;
|
|
if(pBlob->nFlags & SXBLOB_RDONLY) {
|
|
pBlob->pBlob = 0;
|
|
pBlob->mByte = 0;
|
|
pBlob->nFlags &= ~SXBLOB_RDONLY;
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobRelease(SyBlob *pBlob) {
|
|
if((pBlob->nFlags & (SXBLOB_STATIC | SXBLOB_RDONLY)) == 0 && pBlob->mByte > 0) {
|
|
SyMemBackendFree(pBlob->pAllocator, pBlob->pBlob);
|
|
}
|
|
pBlob->pBlob = 0;
|
|
pBlob->nByte = pBlob->mByte = 0;
|
|
pBlob->nFlags = 0;
|
|
return SXRET_OK;
|
|
}
|
|
PH7_PRIVATE sxi32 SyBlobSearch(const void *pBlob, sxu32 nLen, const void *pPattern, sxu32 pLen, sxu32 *pOfft) {
|
|
const char *zIn = (const char *)pBlob;
|
|
const char *zEnd;
|
|
sxi32 rc;
|
|
if(pLen > nLen) {
|
|
return SXERR_NOTFOUND;
|
|
}
|
|
zEnd = &zIn[nLen - pLen];
|
|
for(;;) {
|
|
if(zIn > zEnd) {
|
|
break;
|
|
}
|
|
SX_MACRO_FAST_CMP(zIn, pPattern, pLen, rc);
|
|
if(rc == 0) {
|
|
if(pOfft) {
|
|
*pOfft = (sxu32)(zIn - (const char *)pBlob);
|
|
}
|
|
return SXRET_OK;
|
|
}
|
|
zIn++;
|
|
}
|
|
return SXERR_NOTFOUND;
|
|
}
|