Browse Source

Add rtmempool related files

tags/v0.9.0
falkTX 13 years ago
parent
commit
84f0a387c4
5 changed files with 1427 additions and 0 deletions
  1. +872
    -0
      c++/carla-backend/rtmempool/list.h
  2. +4
    -0
      c++/carla-backend/rtmempool/log.h
  3. +403
    -0
      c++/carla-backend/rtmempool/rtmempool.c
  4. +35
    -0
      c++/carla-backend/rtmempool/rtmempool.h
  5. +113
    -0
      c++/carla-includes/lv2/lv2_rtmempool.h

+ 872
- 0
c++/carla-backend/rtmempool/list.h View File

@@ -0,0 +1,872 @@
/* -*- Mode: C ; c-basic-offset: 2 -*- */
/*****************************************************************************
*
* Linux kernel header adapted for user-mode
* The 2.6.17-rt1 version was used.
*
* Original copyright holders of this code are unknown, they were not
* mentioned in the original file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*****************************************************************************/

#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H

#include <stddef.h>

#if !defined(offsetof)
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif

/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) (type *)((char *)(ptr) - offsetof(type,member))

#define prefetch(x) (x = x)

/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)

/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/

struct list_head {
struct list_head *next, *prev;
};

#define LIST_HEAD_INIT(name) { &(name), &(name) }

#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)

static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}

/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}

/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}

/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}

/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add_rcu(struct list_head * new,
struct list_head * prev, struct list_head * next)
{
new->next = next;
new->prev = prev;
// smp_wmb();
next->prev = new;
prev->next = new;
}

/**
* list_add_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}

/**
* list_add_tail_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_tail_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}

/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}

/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}

/**
* list_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
*
* Note: list_empty on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_del_rcu()
* or list_add_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*
* Note that the caller is not permitted to immediately free
* the newly deleted entry. Instead, either synchronize_rcu()
* or call_rcu() must be used to defer freeing until an RCU
* grace period has elapsed.
*/
static inline void list_del_rcu(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = LIST_POISON2;
}

/*
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The old entry will be replaced with the new entry atomically.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
// smp_wmb();
new->next->prev = new;
new->prev->next = new;
old->prev = LIST_POISON2;
}

/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}

/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}

/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}

/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}

/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}

static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;

first->prev = head;
head->next = first;

last->next = at;
at->prev = last;
}

/**
* list_splice - join two lists
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head);
}

/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
INIT_LIST_HEAD(list);
}
}

/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)

/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
pos = pos->next)

/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This variant differs from list_for_each() in that it's the
* simplest possible list iteration code, no prefetching is done.
* Use this for code that knows the list to be very short (empty
* or 1 entry) most of the time.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)

/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
pos = pos->prev)

/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)

/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))

/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))

/**
* list_prepare_entry - prepare a pos entry for use as a start point in
* list_for_each_entry_continue
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_struct within the struct.
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))

/**
* list_for_each_entry_continue - iterate over list of given type
* continuing after existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))

/**
* list_for_each_entry_from - iterate over list of given type
* continuing from existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))

/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))

/**
* list_for_each_entry_safe_continue - iterate over list of given type
* continuing after existing point safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))

/**
* list_for_each_entry_safe_from - iterate over list of given type
* from existing point safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))

/**
* list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
* removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))

/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_rcu(pos, head) \
for (pos = (head)->next; \
prefetch(rcu_dereference(pos)->next), pos != (head); \
pos = pos->next)

#define __list_for_each_rcu(pos, head) \
for (pos = (head)->next; \
rcu_dereference(pos) != (head); \
pos = pos->next)

/**
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
* against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_safe_rcu(pos, n, head) \
for (pos = (head)->next; \
n = rcu_dereference(pos)->next, pos != (head); \
pos = n)

/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(rcu_dereference(pos)->member.next), \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))


/**
* list_for_each_continue_rcu - iterate over an rcu-protected list
* continuing after existing point.
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
for ((pos) = (pos)->next; \
prefetch(rcu_dereference((pos))->next), (pos) != (head); \
(pos) = (pos)->next)

/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/

struct hlist_head {
struct hlist_node *first;
};

struct hlist_node {
struct hlist_node *next, **pprev;
};

#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = NULL;
h->pprev = NULL;
}

static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}

static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}

static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}

static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}

/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry().
*/
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = LIST_POISON2;
}

static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}

/*
* hlist_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The old entry will be replaced with the new entry atomically.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;

new->next = next;
new->pprev = old->pprev;
// smp_wmb();
if (next)
new->next->pprev = &new->next;
*new->pprev = new;
old->pprev = LIST_POISON2;
}

static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}


/**
* hlist_add_head_rcu - adds the specified element to the specified hlist,
* while permitting racing traversals.
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
// smp_wmb();
if (first)
first->pprev = &n->next;
h->first = n;
}

/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}

static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;

if(next->next)
next->next->pprev = &next->next;
}

/**
* hlist_add_before_rcu - adds the specified element to the specified hlist
* before the specified node while permitting racing traversals.
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
// smp_wmb();
next->pprev = &n->next;
*(n->pprev) = n;
}

/**
* hlist_add_after_rcu - adds the specified element to the specified hlist
* after the specified node while permitting racing traversals.
* @prev: the existing element to add the new element after.
* @n: the new element to add to the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
{
n->next = prev->next;
n->pprev = &prev->next;
// smp_wmb();
prev->next = n;
if (n->next)
n->next->pprev = &n->next;
}

#define hlist_entry(ptr, type, member) container_of(ptr,type,member)

#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)

#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)

/**
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)

/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)

/**
* hlist_for_each_entry_from - iterate over a hlist continuing from existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)

/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)

/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = (head)->first; \
rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)

#endif

+ 4
- 0
c++/carla-backend/rtmempool/log.h View File

@@ -0,0 +1,4 @@
/* simple file for rtmempool compatibility */

#define LOG_DEBUG(format, arg...)
#define LOG_WARNING(format, arg...)

+ 403
- 0
c++/carla-backend/rtmempool/rtmempool.c View File

@@ -0,0 +1,403 @@
/* -*- Mode: C ; c-basic-offset: 2 -*- */
/*****************************************************************************
*
* This file is part of zynjacku
*
* Copyright (C) 2006,2007,2008,2009 Nedko Arnaudov <nedko@arnaudov.name>
* Copyright (C) 2012 Filipe Coelho <falktx@falktx.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*****************************************************************************/

#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <stdio.h> /* sprintf */
#include <stdlib.h>
#include <assert.h>
#include <pthread.h>
#include "lv2/lv2_rtmempool.h"

#include "rtmempool.h"

#include "list.h"
//#define LOG_LEVEL LOG_LEVEL_DEBUG
#include "log.h"

struct rtsafe_memory_pool
{
char name[LV2_RTSAFE_MEMORY_POOL_NAME_MAX];
size_t data_size;
size_t min_preallocated;
size_t max_preallocated;

unsigned int used_count;
struct list_head unused;
struct list_head used;
unsigned int unused_count;

bool enforce_thread_safety;
/* next members are initialized/used only if enforce_thread_safety is true */
pthread_mutex_t mutex;
unsigned int unused_count2;
struct list_head pending;

size_t used_size;
};

static
void
rtsafe_memory_pool_sleepy();

static
bool
rtsafe_memory_pool_create(
LV2_RtMemPool_Handle pool_handle,
const char * pool_name,
size_t data_size,
size_t min_preallocated,
size_t max_preallocated,
bool enforce_thread_safety)
{
int ret;
struct rtsafe_memory_pool * pool_ptr;

assert(min_preallocated <= max_preallocated);

assert(pool_name == NULL || strlen(pool_name) < LV2_RTSAFE_MEMORY_POOL_NAME_MAX);

LOG_DEBUG(
"creating pool \"%s\" (size %u, min = %u, max = %u, enforce = %s)",
pool_name,
(unsigned int)data_size,
(unsigned int)min_preallocated,
(unsigned int)max_preallocated,
enforce_thread_safety ? "true" : "false");

pool_ptr = (struct rtsafe_memory_pool *)pool_handle;
if (pool_ptr == NULL)
{
return false;
}

if (pool_name != NULL)
{
strcpy(pool_ptr->name, pool_name);
}
else
{
sprintf(pool_ptr->name, "%p", pool_ptr);
}

pool_ptr->data_size = data_size;
pool_ptr->min_preallocated = min_preallocated;
pool_ptr->max_preallocated = max_preallocated;

INIT_LIST_HEAD(&pool_ptr->used);
pool_ptr->used_count = 0;

INIT_LIST_HEAD(&pool_ptr->unused);
pool_ptr->unused_count = 0;

pool_ptr->enforce_thread_safety = enforce_thread_safety;
if (enforce_thread_safety)
{
ret = pthread_mutex_init(&pool_ptr->mutex, NULL);
if (ret != 0)
{
free(pool_ptr);
return false;
}

INIT_LIST_HEAD(&pool_ptr->pending);
pool_ptr->unused_count2 = 0;
}

pool_ptr->used_size = 0;

rtsafe_memory_pool_sleepy();

return true;
}

#define pool_ptr ((struct rtsafe_memory_pool *)pool_handle)

static
void
rtsafe_memory_pool_destroy(
LV2_RtMemPool_Handle pool_handle)
{
int ret;
struct list_head * node_ptr;

LOG_DEBUG("destroying pool \"%s\"", pool_ptr->name);

/* caller should deallocate all chunks prior releasing pool itself */
if (pool_ptr->used_count != 0)
{
LOG_WARNING("Deallocating non-empty pool \"%s\", leaking %u entries:", pool_ptr->name, pool_ptr->used_count);

list_for_each(node_ptr, &pool_ptr->used)
{
LOG_WARNING(" %p", node_ptr + 1);
}

assert(0);
}

while (pool_ptr->unused_count != 0)
{
assert(!list_empty(&pool_ptr->unused));

node_ptr = pool_ptr->unused.next;

list_del(node_ptr);
pool_ptr->unused_count--;

free(node_ptr);
}

assert(list_empty(&pool_ptr->unused));

if (pool_ptr->enforce_thread_safety)
{
while (!list_empty(&pool_ptr->pending))
{
node_ptr = pool_ptr->pending.next;

list_del(node_ptr);

free(node_ptr);
}

ret = pthread_mutex_destroy(&pool_ptr->mutex);
assert(ret == 0);
}

// unused variable
(void)ret;
}

/* adjust unused list size */
static
void
rtsafe_memory_pool_sleepy(
LV2_RtMemPool_Handle pool_handle)
{
struct list_head * node_ptr;
unsigned int count;

LOG_DEBUG("pool \"%s\", sleepy", pool_ptr->name);

if (pool_ptr->enforce_thread_safety)
{
pthread_mutex_lock(&pool_ptr->mutex);

count = pool_ptr->unused_count2;

assert(pool_ptr->min_preallocated < pool_ptr->max_preallocated);

while (count < pool_ptr->min_preallocated)
{
node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
if (node_ptr == NULL)
{
LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
break;
}

list_add_tail(node_ptr, &pool_ptr->pending);

count++;

pool_ptr->used_size += pool_ptr->data_size;
}

while (count > pool_ptr->max_preallocated && !list_empty(&pool_ptr->pending))
{
node_ptr = pool_ptr->pending.next;

list_del(node_ptr);

free(node_ptr);

count--;

pool_ptr->used_size -= pool_ptr->data_size;
}

pthread_mutex_unlock(&pool_ptr->mutex);
}
else
{
while (pool_ptr->unused_count < pool_ptr->min_preallocated)
{
node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
if (node_ptr == NULL)
{
LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
return;
}

list_add_tail(node_ptr, &pool_ptr->unused);
pool_ptr->unused_count++;
pool_ptr->used_size += pool_ptr->data_size;
}

while (pool_ptr->unused_count > pool_ptr->max_preallocated)
{
assert(!list_empty(&pool_ptr->unused));

node_ptr = pool_ptr->unused.next;

list_del(node_ptr);
pool_ptr->unused_count--;

free(node_ptr);
pool_ptr->used_size -= pool_ptr->data_size;
}
}
}

/* find entry in unused list, fail if it is empty */
static
void *
rtsafe_memory_pool_allocate_atomic(
LV2_RtMemPool_Handle pool_handle)
{
struct list_head * node_ptr;

LOG_DEBUG("pool \"%s\", allocate (%u, %u)", pool_ptr->name, pool_ptr->used_count, pool_ptr->unused_count);

if (list_empty(&pool_ptr->unused))
{
return NULL;
}

node_ptr = pool_ptr->unused.next;
list_del(node_ptr);
pool_ptr->unused_count--;
pool_ptr->used_count++;
list_add_tail(node_ptr, &pool_ptr->used);

if (pool_ptr->enforce_thread_safety &&
pthread_mutex_trylock(&pool_ptr->mutex) == 0)
{
while (pool_ptr->unused_count < pool_ptr->min_preallocated && !list_empty(&pool_ptr->pending))
{
node_ptr = pool_ptr->pending.next;

list_del(node_ptr);
list_add_tail(node_ptr, &pool_ptr->unused);
pool_ptr->unused_count++;
}

pool_ptr->unused_count2 = pool_ptr->unused_count;

pthread_mutex_unlock(&pool_ptr->mutex);
}

LOG_DEBUG("pool \"%s\", allocated %p (%u)", pool_ptr->name, node_ptr + 1, pool_ptr->used_count);
return (node_ptr + 1);
}

/* move from used to unused list */
static
void
rtsafe_memory_pool_deallocate(
LV2_RtMemPool_Handle pool_handle,
void * data)
{
struct list_head * node_ptr;

LOG_DEBUG("pool \"%s\", deallocate %p (%u)", pool_ptr->name, (struct list_head *)data - 1, pool_ptr->used_count);

list_del((struct list_head *)data - 1);
list_add_tail((struct list_head *)data - 1, &pool_ptr->unused);
pool_ptr->used_count--;
pool_ptr->unused_count++;

if (pool_ptr->enforce_thread_safety &&
pthread_mutex_trylock(&pool_ptr->mutex) == 0)
{
while (pool_ptr->unused_count > pool_ptr->max_preallocated)
{
assert(!list_empty(&pool_ptr->unused));

node_ptr = pool_ptr->unused.next;

list_del(node_ptr);
list_add_tail(node_ptr, &pool_ptr->pending);
pool_ptr->unused_count--;
}

pool_ptr->unused_count2 = pool_ptr->unused_count;

pthread_mutex_unlock(&pool_ptr->mutex);
}
}

static
void *
rtsafe_memory_pool_allocate_sleepy(
LV2_RtMemPool_Handle pool_handle)
{
void * data;

LOG_DEBUG("pool \"%s\", allocate sleepy", pool_ptr->name);

do
{
rtsafe_memory_pool_sleepy(pool_handle);
data = rtsafe_memory_pool_allocate_atomic(pool_handle);
}
while (data == NULL);

return data;
}

#undef pool_ptr

static
bool
rtsafe_memory_pool_create2(
LV2_RtMemPool_Handle pool_handle,
const char * pool_name,
size_t data_size,
size_t min_preallocated,
size_t max_preallocated)
{
return rtsafe_memory_pool_create(pool_handle, pool_name, data_size, min_preallocated, max_preallocated, false);
}

void
rtmempool_allocator_init(
struct _LV2_RtMemPool_Pool * allocator_ptr)
{
allocator_ptr->handle = (LV2_RtMemPool_Handle)malloc(sizeof(struct rtsafe_memory_pool));
allocator_ptr->create = rtsafe_memory_pool_create2;
allocator_ptr->destroy = rtsafe_memory_pool_destroy;
allocator_ptr->allocate_atomic = rtsafe_memory_pool_allocate_atomic;
allocator_ptr->allocate_sleepy = rtsafe_memory_pool_allocate_sleepy;
allocator_ptr->deallocate = rtsafe_memory_pool_deallocate;
}

void
rtmempool_allocator_free(
struct _LV2_RtMemPool_Pool * allocator_ptr)
{
if (allocator_ptr->handle)
free((struct rtsafe_memory_pool *)allocator_ptr->handle);
}

+ 35
- 0
c++/carla-backend/rtmempool/rtmempool.h View File

@@ -0,0 +1,35 @@
/* -*- Mode: C ; c-basic-offset: 2 -*- */
/*****************************************************************************
*
* This file is part of zynjacku
*
* Copyright (C) 2006,2007,2008,2009 Nedko Arnaudov <nedko@arnaudov.name>
* Copyright (C) 2012 Filipe Coelho <falktx@falktx.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*****************************************************************************/

#ifndef RTMEMPOOL_H__1FA54215_11CF_4659_9CF3_C17A10A67A1F__INCLUDED
#define RTMEMPOOL_H__1FA54215_11CF_4659_9CF3_C17A10A67A1F__INCLUDED

void
rtmempool_allocator_init(
struct _LV2_RtMemPool_Pool * allocator_ptr);

void
rtmempool_allocator_free(
struct _LV2_RtMemPool_Pool * allocator_ptr);

#endif /* #ifndef RTMEMPOOL_H__1FA54215_11CF_4659_9CF3_C17A10A67A1F__INCLUDED */

+ 113
- 0
c++/carla-includes/lv2/lv2_rtmempool.h View File

@@ -0,0 +1,113 @@
/*
LV2 realtime safe memory pool extension definition
This work is in public domain.

This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

If you have questions, contact Filipe Coelho (aka falkTX) <falktx@falktx.com>
or ask in #lad channel, FreeNode IRC network.
*/

/**
* @file lv2_rtmempool.h
* C header for the LV2 rtmempool extension <http://kxstudio.sf.net/ns/lv2ext/rtmempool>
*
*/

#ifndef LV2_RTMEMPOOL_H
#define LV2_RTMEMPOOL_H

#define LV2_RTSAFE_MEMORY_POOL_URI "http://kxstudio.sf.net/ns/lv2ext/rtmempool"
#define LV2_RTSAFE_MEMORY_POOL_PREFIX LV2_RTSAFE_MEMORY_POOL_URI "#"

#define LV2_RTSAFE_MEMORY_POOL__Pool LV2_RTSAFE_MEMORY_POOL_URI "Pool"

/** max size of memory pool name, in chars, including terminating zero char */
#define LV2_RTSAFE_MEMORY_POOL_NAME_MAX 128

#ifdef __cplusplus
extern "C" {
#else
#include <stdbool.h>
#endif

/**
* Opaque data to host data for LV2_RtMemPool_Pool.
*/
typedef void* LV2_RtMemPool_Handle;

/**
* On instantiation, host must supply LV2_RTSAFE_MEMORY_POOL__Pool feature.
* LV2_Feature::data must be pointer to LV2_RtMemPool_Pool.
*/
typedef struct _LV2_RtMemPool_Pool {
/**
* Opaque pointer to host data.
*
* This MUST be passed to methods in this struct whenever they are called.
* Otherwise, it must not be interpreted in any way.
*/
LV2_RtMemPool_Handle handle;

/**
* This function is called when plugin wants to create memory pool
*
* <b>may/will sleep</b>
*
* @param pool_name pool name, for debug purposes, max RTSAFE_MEMORY_POOL_NAME_MAX chars, including terminating zero char. May be NULL.
* @param data_size memory chunk size
* @param min_preallocated min chunks preallocated
* @param max_preallocated max chunks preallocated
*
* @return Success status
*/
bool (*create)(LV2_RtMemPool_Handle handle,
const char * pool_name,
size_t data_size,
size_t min_preallocated,
size_t max_preallocated);

/**
* This function is called when plugin wants to destroy previously created memory pool
*
* <b>may/will sleep</b>
*/
void (*destroy)(LV2_RtMemPool_Handle handle);

/**
* This function is called when plugin wants to allocate memory in context where sleeping is not allowed
*
* <b>will not sleep</b>
*
* @return Pointer to allocated memory or NULL if memory no memory is available
*/
void * (*allocate_atomic)(LV2_RtMemPool_Handle handle);

/**
* This function is called when plugin wants to allocate memory in context where sleeping is allowed
*
* <b>may/will sleep</b>
*
* @return Pointer to allocated memory or NULL if memory no memory is available (should not happen under normal conditions)
*/
void * (*allocate_sleepy)(LV2_RtMemPool_Handle handle);

/**
* This function is called when plugin wants to deallocate previously allocated memory
*
* <b>will not sleep</b>
*
* @param memory_ptr pointer to previously allocated memory chunk
*/
void (*deallocate)(LV2_RtMemPool_Handle handle,
void * memory_ptr);

} LV2_RtMemPool_Pool;

#ifdef __cplusplus
} /* extern "C" */
#endif

#endif /* LV2_RTMEMPOOL_H */

Loading…
Cancel
Save