Browse Source

Small rework of rtmempool, based on mod-host changes

Keep thread-safety disabled (optional), as carla does that internally.
Update test code to check for min/max usage
tags/1.9.7
falkTX 8 years ago
parent
commit
34a38c835d
5 changed files with 133 additions and 171 deletions
  1. +1
    -1
      source/modules/rtmempool/rtmempool-lv2.h
  2. +76
    -146
      source/modules/rtmempool/rtmempool.c
  3. +1
    -19
      source/modules/rtmempool/rtmempool.h
  4. +52
    -4
      source/tests/RtLinkedList.cpp
  5. +3
    -1
      source/utils/RtLinkedList.hpp

+ 1
- 1
source/modules/rtmempool/rtmempool-lv2.h View File

@@ -1,6 +1,6 @@
/*
* RealTime Memory Pool, heavily based on work by Nedko Arnaudov
* Copyright (C) 2013-2014 Filipe Coelho <falktx@falktx.com>
* Copyright (C) 2013-2016 Filipe Coelho <falktx@falktx.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as


+ 76
- 146
source/modules/rtmempool/rtmempool.c View File

@@ -1,7 +1,7 @@
/*
* RealTime Memory Pool, heavily based on work by Nedko Arnaudov
* Copyright (C) 2006-2009 Nedko Arnaudov <nedko@arnaudov.name>
* Copyright (C) 2013-2014 Filipe Coelho <falktx@falktx.com>
* Copyright (C) 2013-2016 Filipe Coelho <falktx@falktx.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -26,6 +26,16 @@
#include <stdlib.h>
#include <string.h>

//#define RTMEMPOOL_THREAD_SAFETY 1

#if RTMEMPOOL_THREAD_SAFETY
#define rtmempool_mutex_lock(m) pthread_mutex_lock(m)
#define rtmempool_mutex_unlock(m) pthread_mutex_unlock(m)
#else
#define rtmempool_mutex_lock(...)
#define rtmempool_mutex_unlock(...)
#endif

// ------------------------------------------------------------------------------------------------

typedef struct list_head k_list_head;
@@ -46,92 +56,57 @@ typedef struct _RtMemPool
k_list_head unused;
unsigned int unusedCount;

bool enforceThreadSafety;

// next members are initialized/used only if enforceThreadSafety is true
pthread_mutex_t mutex;
unsigned int unusedCount2;
k_list_head pending;
size_t usedSize;

} RtMemPool;

// ------------------------------------------------------------------------------------------------
// adjust unused list size

static void rtsafe_memory_pool_sleepy(RtMemPool* poolPtr)
static void rtsafe_memory_pool_sleepy(RtMemPool* poolPtr, bool* overMaxOrMallocFailed)
{
k_list_head* nodePtr;
unsigned int count;

if (poolPtr->enforceThreadSafety)
{
pthread_mutex_lock(&poolPtr->mutex);

count = poolPtr->unusedCount2;
k_list_head unused;
unsigned int unusedCount;

assert(poolPtr->minPreallocated < poolPtr->maxPreallocated);
INIT_LIST_HEAD(&unused);
unusedCount = 0;

while (count < poolPtr->minPreallocated)
while (poolPtr->unusedCount + unusedCount < poolPtr->minPreallocated)
{
if (poolPtr->usedCount + poolPtr->unusedCount + unusedCount >= poolPtr->maxPreallocated)
{
nodePtr = malloc(sizeof(k_list_head) + poolPtr->dataSize);

if (nodePtr == NULL)
{
break;
}

list_add_tail(nodePtr, &poolPtr->pending);

count++;

poolPtr->usedSize += poolPtr->dataSize;
*overMaxOrMallocFailed = true;
break;
}

while (count > poolPtr->maxPreallocated && ! list_empty(&poolPtr->pending))
{
nodePtr = poolPtr->pending.next;

list_del(nodePtr);

free(nodePtr);
nodePtr = malloc(sizeof(k_list_head) + poolPtr->dataSize);

count--;

poolPtr->usedSize -= poolPtr->dataSize;
if (nodePtr == NULL)
{
*overMaxOrMallocFailed = true;
break;
}

pthread_mutex_unlock(&poolPtr->mutex);
list_add_tail(nodePtr, &unused);
++unusedCount;
}
else
{
while (poolPtr->unusedCount < poolPtr->minPreallocated)
{
nodePtr = malloc(sizeof(k_list_head) + poolPtr->dataSize);

if (nodePtr == NULL)
{
return;
}

list_add_tail(nodePtr, &poolPtr->unused);
poolPtr->unusedCount++;
poolPtr->usedSize += poolPtr->dataSize;
}

while (poolPtr->unusedCount > poolPtr->maxPreallocated)
{
assert(! list_empty(&poolPtr->unused));
rtmempool_mutex_lock(&poolPtr->mutex);

nodePtr = poolPtr->unused.next;
poolPtr->unusedCount += unusedCount;

list_del(nodePtr);
poolPtr->unusedCount--;
while (unusedCount != 0)
{
assert(! list_empty(&unused));

free(nodePtr);
poolPtr->usedSize -= poolPtr->dataSize;
}
nodePtr = unused.next;
list_del(nodePtr);
list_add_tail(nodePtr, &poolPtr->unused);
--unusedCount;
}

rtmempool_mutex_unlock(&poolPtr->mutex);
}

// ------------------------------------------------------------------------------------------------
@@ -140,12 +115,12 @@ static bool rtsafe_memory_pool_create2(RtMemPool_Handle* handlePtr,
const char* poolName,
size_t dataSize,
size_t minPreallocated,
size_t maxPreallocated,
int enforceThreadSafety)
size_t maxPreallocated)
{
assert(minPreallocated <= maxPreallocated);
assert(poolName == NULL || strlen(poolName) < RTSAFE_MEMORY_POOL_NAME_MAX);

k_list_head* nodePtr;
RtMemPool* poolPtr;

poolPtr = malloc(sizeof(RtMemPool));
@@ -174,23 +149,27 @@ static bool rtsafe_memory_pool_create2(RtMemPool_Handle* handlePtr,
INIT_LIST_HEAD(&poolPtr->unused);
poolPtr->unusedCount = 0;

poolPtr->enforceThreadSafety = (enforceThreadSafety != 0);
pthread_mutexattr_t atts;
pthread_mutexattr_init(&atts);
#ifdef __ARM_ARCH_7A__
pthread_mutexattr_setprotocol(&atts, PTHREAD_PRIO_INHERIT);
#endif
pthread_mutex_init(&poolPtr->mutex, &atts);
pthread_mutexattr_destroy(&atts);

if (poolPtr->enforceThreadSafety)
while (poolPtr->unusedCount < poolPtr->minPreallocated)
{
if (pthread_mutex_init(&poolPtr->mutex, NULL) != 0)
nodePtr = malloc(sizeof(k_list_head) + poolPtr->dataSize);

if (nodePtr == NULL)
{
free(poolPtr);
return false;
break;
}

INIT_LIST_HEAD(&poolPtr->pending);
list_add_tail(nodePtr, &poolPtr->unused);
poolPtr->unusedCount++;
}

poolPtr->unusedCount2 = 0;
poolPtr->usedSize = 0;

rtsafe_memory_pool_sleepy(poolPtr);
*handlePtr = (RtMemPool_Handle)poolPtr;

return true;
@@ -198,9 +177,13 @@ static bool rtsafe_memory_pool_create2(RtMemPool_Handle* handlePtr,

// ------------------------------------------------------------------------------------------------

static unsigned char rtsafe_memory_pool_create_old(const char* poolName, size_t dataSize, size_t minPreallocated, size_t maxPreallocated, RtMemPool_Handle* handlePtr)
static unsigned char rtsafe_memory_pool_create_old(const char* poolName,
size_t dataSize,
size_t minPreallocated,
size_t maxPreallocated,
RtMemPool_Handle* handlePtr)
{
return rtsafe_memory_pool_create2(handlePtr, poolName, dataSize, minPreallocated, maxPreallocated, 0);
return rtsafe_memory_pool_create2(handlePtr, poolName, dataSize, minPreallocated, maxPreallocated);
}

// ------------------------------------------------------------------------------------------------
@@ -211,18 +194,7 @@ bool rtsafe_memory_pool_create(RtMemPool_Handle* handlePtr,
size_t minPreallocated,
size_t maxPreallocated)
{
return rtsafe_memory_pool_create2(handlePtr, poolName, dataSize, minPreallocated, maxPreallocated, 0);
}

// ------------------------------------------------------------------------------------------------

bool rtsafe_memory_pool_create_safe(RtMemPool_Handle* handlePtr,
const char* poolName,
size_t dataSize,
size_t minPreallocated,
size_t maxPreallocated)
{
return rtsafe_memory_pool_create2(handlePtr, poolName, dataSize, minPreallocated, maxPreallocated, 1);
return rtsafe_memory_pool_create2(handlePtr, poolName, dataSize, minPreallocated, maxPreallocated);
}

// ------------------------------------------------------------------------------------------------
@@ -254,26 +226,7 @@ void rtsafe_memory_pool_destroy(RtMemPool_Handle handle)

assert(list_empty(&poolPtr->unused));

if (poolPtr->enforceThreadSafety)
{
while (! list_empty(&poolPtr->pending))
{
nodePtr = poolPtr->pending.next;

list_del(nodePtr);

free(nodePtr);
}

int ret = pthread_mutex_destroy(&poolPtr->mutex);

#ifdef DEBUG
assert(ret == 0);
#else
// unused
(void)ret;
#endif
}
pthread_mutex_destroy(&poolPtr->mutex);

free(poolPtr);
}
@@ -288,8 +241,11 @@ void* rtsafe_memory_pool_allocate_atomic(RtMemPool_Handle handle)
k_list_head* nodePtr;
RtMemPool* poolPtr = (RtMemPool*)handle;

rtmempool_mutex_lock(&poolPtr->mutex);

if (list_empty(&poolPtr->unused))
{
rtmempool_mutex_unlock(&poolPtr->mutex);
return NULL;
}

@@ -301,22 +257,7 @@ void* rtsafe_memory_pool_allocate_atomic(RtMemPool_Handle handle)

list_add_tail(nodePtr, &poolPtr->used);

if (poolPtr->enforceThreadSafety && pthread_mutex_trylock(&poolPtr->mutex) == 0)
{
while (poolPtr->unusedCount < poolPtr->minPreallocated && ! list_empty(&poolPtr->pending))
{
nodePtr = poolPtr->pending.next;

list_del(nodePtr);
list_add_tail(nodePtr, &poolPtr->unused);

poolPtr->unusedCount++;
}

poolPtr->unusedCount2 = poolPtr->unusedCount;

pthread_mutex_unlock(&poolPtr->mutex);
}
rtmempool_mutex_unlock(&poolPtr->mutex);

return (nodePtr + 1);
}
@@ -328,13 +269,14 @@ void* rtsafe_memory_pool_allocate_sleepy(RtMemPool_Handle handle)
assert(handle);

void* data;
bool overMaxOrMallocFailed = false;
RtMemPool* poolPtr = (RtMemPool*)handle;

do {
rtsafe_memory_pool_sleepy(poolPtr);
rtsafe_memory_pool_sleepy(poolPtr, &overMaxOrMallocFailed);
data = rtsafe_memory_pool_allocate_atomic((RtMemPool_Handle)poolPtr);
}
while (data == NULL);
while (data == NULL && ! overMaxOrMallocFailed);

return data;
}
@@ -346,33 +288,21 @@ void rtsafe_memory_pool_deallocate(RtMemPool_Handle handle, void* memoryPtr)
{
assert(handle);

k_list_head* nodePtr;
RtMemPool* poolPtr = (RtMemPool*)handle;

rtmempool_mutex_lock(&poolPtr->mutex);

list_del((k_list_head*)memoryPtr - 1);
list_add_tail((k_list_head*)memoryPtr - 1, &poolPtr->unused);
poolPtr->usedCount--;
poolPtr->unusedCount++;

if (poolPtr->enforceThreadSafety && pthread_mutex_trylock(&poolPtr->mutex) == 0)
{
while (poolPtr->unusedCount > poolPtr->maxPreallocated)
{
assert(! list_empty(&poolPtr->unused));

nodePtr = poolPtr->unused.next;

list_del(nodePtr);
list_add_tail(nodePtr, &poolPtr->pending);
poolPtr->unusedCount--;
}

poolPtr->unusedCount2 = poolPtr->unusedCount;

pthread_mutex_unlock(&poolPtr->mutex);
}
rtmempool_mutex_unlock(&poolPtr->mutex);
}

// ------------------------------------------------------------------------------------------------
// LV2 stuff

void lv2_rtmempool_init(LV2_RtMemPool_Pool* poolPtr)
{
poolPtr->create = rtsafe_memory_pool_create;


+ 1
- 19
source/modules/rtmempool/rtmempool.h View File

@@ -1,7 +1,7 @@
/*
* RealTime Memory Pool, heavily based on work by Nedko Arnaudov
* Copyright (C) 2006-2009 Nedko Arnaudov <nedko@arnaudov.name>
* Copyright (C) 2013-2014 Filipe Coelho <falktx@falktx.com>
* Copyright (C) 2013-2016 Filipe Coelho <falktx@falktx.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -52,24 +52,6 @@ bool rtsafe_memory_pool_create(RtMemPool_Handle* handlePtr,
size_t minPreallocated,
size_t maxPreallocated);

/**
* Create new memory pool, thread-safe version
*
* <b>may/will sleep</b>
*
* @param poolName pool name, for debug purposes, max RTSAFE_MEMORY_POOL_NAME_MAX chars, including terminating zero char. May be NULL.
* @param dataSize memory chunk size
* @param minPreallocated min chunks preallocated
* @param maxPreallocated max chunks preallocated
*
* @return Success status, true if successful
*/
bool rtsafe_memory_pool_create_safe(RtMemPool_Handle* handlePtr,
const char* poolName,
size_t dataSize,
size_t minPreallocated,
size_t maxPreallocated);

/**
* Destroy previously created memory pool
*


+ 52
- 4
source/tests/RtLinkedList.cpp View File

@@ -21,7 +21,7 @@
#include "CarlaMutex.hpp"

const unsigned short MIN_RT_EVENTS = 5;
const unsigned short MAX_RT_EVENTS = 10;
const unsigned short MAX_RT_EVENTS = 12;

struct MyData {
char str[234];
@@ -79,8 +79,8 @@ void run5Tests()

while (! postRtEvents.data.isEmpty())
{
static MyData fallback = { { '\0' }, 0 };
const MyData& my(postRtEvents.data.getFirst(fallback, true));
static MyData kFallback = { { '\0' }, 0 };
const MyData& my(postRtEvents.data.getFirst(kFallback, true));
allMyData[k++] = my;
}

@@ -102,6 +102,51 @@ void run5Tests()

carla_stdout("Got data: %i %s", my.id, my.str);
}

// append events past minimum size
MyData dummyData = { { '\0' }, 0 };
// 5 initial go ok
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
// afterwards it fails
assert(! postRtEvents.dataPendingRT.append(dummyData));
assert(! postRtEvents.dataPendingRT.append(dummyData));
carla_stdout("here %i", __LINE__);

// adding sleepy works
assert(postRtEvents.dataPendingRT.append_sleepy(dummyData));
carla_stdout("here %i", __LINE__);

// now atomic works too, size was increased
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(postRtEvents.dataPendingRT.append(dummyData));
carla_stdout("here %i", __LINE__);

// fails here now
assert(! postRtEvents.dataPendingRT.append(dummyData));
carla_stdout("here %i", __LINE__);

// adding sleepy still works
assert(postRtEvents.dataPendingRT.append_sleepy(dummyData));
carla_stdout("here %i", __LINE__);

// now atomic works for 1 more
assert(postRtEvents.dataPendingRT.append(dummyData));
assert(! postRtEvents.dataPendingRT.append(dummyData));
carla_stdout("here %i", __LINE__);

// and adding sleepy no longer works
assert(! postRtEvents.dataPendingRT.append_sleepy(dummyData));
carla_stdout("here %i", __LINE__);

// cleanup
postRtEvents.trySplice();
postRtEvents.clear();
}

int main()
@@ -138,7 +183,8 @@ int main()

for (RtLinkedList<MyData>::Itenerator it = postRtEvents.data.begin2(); it.valid(); it.next())
{
const MyData& my(it.getValue());
static const MyData kFallback = { { '\0' }, 0 };
const MyData& my(it.getValue(kFallback));

carla_stdout("FOR DATA!!!: %i %s", my.id, my.str);

@@ -151,8 +197,10 @@ int main()
}
}

#if 0
for (const MyData& my : postRtEvents.data)
carla_stdout("FOR DATA!!! NEW AUTO Itenerator!!!: %i %s", my.id, my.str);
#endif

postRtEvents.trySplice();
assert(postRtEvents.data.count() == 5);


+ 3
- 1
source/utils/RtLinkedList.hpp View File

@@ -153,7 +153,9 @@ private:

bool _add_sleepy(const T& value, const bool inTail) noexcept
{
return this->_add_internal(_allocate_sleepy(), value, inTail, &this->fQueue);
if (typename AbstractLinkedList<T>::Data* const data = _allocate_sleepy())
return this->_add_internal(data, value, inTail, &this->fQueue);
return false;
}

CARLA_PREVENT_VIRTUAL_HEAP_ALLOCATION


Loading…
Cancel
Save