Collection of tools useful for audio production
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

399 lines
9.4KB

  1. /* -*- Mode: C ; c-basic-offset: 2 -*- */
  2. /*****************************************************************************
  3. *
  4. * This file is part of zynjacku
  5. *
  6. * Copyright (C) 2006,2007,2008,2009 Nedko Arnaudov <nedko@arnaudov.name>
  7. * Copyright (C) 2012 Filipe Coelho <falktx@falktx.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21. *
  22. *****************************************************************************/
  23. #include <stddef.h>
  24. #include <stdbool.h>
  25. #include <string.h>
  26. #include <stdio.h> /* sprintf */
  27. #include <stdlib.h>
  28. #include <assert.h>
  29. #include <pthread.h>
  30. #include "lv2/lv2_rtmempool.h"
  31. #include "rtmempool.h"
  32. #include "list.h"
  33. //#define LOG_LEVEL LOG_LEVEL_DEBUG
  34. #include "log.h"
  35. struct rtsafe_memory_pool
  36. {
  37. char name[LV2_RTSAFE_MEMORY_POOL_NAME_MAX];
  38. size_t data_size;
  39. size_t min_preallocated;
  40. size_t max_preallocated;
  41. unsigned int used_count;
  42. struct list_head unused;
  43. struct list_head used;
  44. unsigned int unused_count;
  45. bool enforce_thread_safety;
  46. /* next members are initialized/used only if enforce_thread_safety is true */
  47. pthread_mutex_t mutex;
  48. unsigned int unused_count2;
  49. struct list_head pending;
  50. size_t used_size;
  51. };
  52. static
  53. void
  54. rtsafe_memory_pool_sleepy(
  55. LV2_RtMemPool_Handle pool_handle);
  56. static
  57. bool
  58. rtsafe_memory_pool_create(
  59. LV2_RtMemPool_Handle * pool_handle_ptr,
  60. const char * pool_name,
  61. size_t data_size,
  62. size_t min_preallocated,
  63. size_t max_preallocated,
  64. bool enforce_thread_safety)
  65. {
  66. int ret;
  67. struct rtsafe_memory_pool * pool_ptr;
  68. assert(min_preallocated <= max_preallocated);
  69. assert(pool_name == NULL || strlen(pool_name) < LV2_RTSAFE_MEMORY_POOL_NAME_MAX);
  70. LOG_DEBUG(
  71. "creating pool \"%s\" (size %u, min = %u, max = %u, enforce = %s)",
  72. pool_name,
  73. (unsigned int)data_size,
  74. (unsigned int)min_preallocated,
  75. (unsigned int)max_preallocated,
  76. enforce_thread_safety ? "true" : "false");
  77. pool_ptr = malloc(sizeof(struct rtsafe_memory_pool));
  78. if (pool_ptr == NULL)
  79. {
  80. return false;
  81. }
  82. if (pool_name != NULL)
  83. {
  84. strcpy(pool_ptr->name, pool_name);
  85. }
  86. else
  87. {
  88. sprintf(pool_ptr->name, "%p", pool_ptr);
  89. }
  90. pool_ptr->data_size = data_size;
  91. pool_ptr->min_preallocated = min_preallocated;
  92. pool_ptr->max_preallocated = max_preallocated;
  93. INIT_LIST_HEAD(&pool_ptr->used);
  94. pool_ptr->used_count = 0;
  95. INIT_LIST_HEAD(&pool_ptr->unused);
  96. pool_ptr->unused_count = 0;
  97. pool_ptr->enforce_thread_safety = enforce_thread_safety;
  98. if (enforce_thread_safety)
  99. {
  100. ret = pthread_mutex_init(&pool_ptr->mutex, NULL);
  101. if (ret != 0)
  102. {
  103. free(pool_ptr);
  104. return false;
  105. }
  106. INIT_LIST_HEAD(&pool_ptr->pending);
  107. pool_ptr->unused_count2 = 0;
  108. }
  109. pool_ptr->used_size = 0;
  110. rtsafe_memory_pool_sleepy((LV2_RtMemPool_Handle)pool_ptr);
  111. *pool_handle_ptr = (LV2_RtMemPool_Handle)pool_ptr;
  112. return true;
  113. }
  114. #define pool_ptr ((struct rtsafe_memory_pool *)pool_handle)
  115. static
  116. void
  117. rtsafe_memory_pool_destroy(
  118. LV2_RtMemPool_Handle pool_handle)
  119. {
  120. int ret;
  121. struct list_head * node_ptr;
  122. LOG_DEBUG("destroying pool \"%s\"", pool_ptr->name);
  123. /* caller should deallocate all chunks prior releasing pool itself */
  124. if (pool_ptr->used_count != 0)
  125. {
  126. LOG_WARNING("Deallocating non-empty pool \"%s\", leaking %u entries:", pool_ptr->name, pool_ptr->used_count);
  127. list_for_each(node_ptr, &pool_ptr->used)
  128. {
  129. LOG_WARNING(" %p", node_ptr + 1);
  130. }
  131. assert(0);
  132. }
  133. while (pool_ptr->unused_count != 0)
  134. {
  135. assert(!list_empty(&pool_ptr->unused));
  136. node_ptr = pool_ptr->unused.next;
  137. list_del(node_ptr);
  138. pool_ptr->unused_count--;
  139. free(node_ptr);
  140. }
  141. assert(list_empty(&pool_ptr->unused));
  142. if (pool_ptr->enforce_thread_safety)
  143. {
  144. while (!list_empty(&pool_ptr->pending))
  145. {
  146. node_ptr = pool_ptr->pending.next;
  147. list_del(node_ptr);
  148. free(node_ptr);
  149. }
  150. ret = pthread_mutex_destroy(&pool_ptr->mutex);
  151. assert(ret == 0);
  152. }
  153. free(pool_ptr);
  154. // unused variable
  155. (void)ret;
  156. }
  157. /* adjust unused list size */
  158. static
  159. void
  160. rtsafe_memory_pool_sleepy(
  161. LV2_RtMemPool_Handle pool_handle)
  162. {
  163. struct list_head * node_ptr;
  164. unsigned int count;
  165. LOG_DEBUG("pool \"%s\", sleepy", pool_ptr->name);
  166. if (pool_ptr->enforce_thread_safety)
  167. {
  168. pthread_mutex_lock(&pool_ptr->mutex);
  169. count = pool_ptr->unused_count2;
  170. assert(pool_ptr->min_preallocated < pool_ptr->max_preallocated);
  171. while (count < pool_ptr->min_preallocated)
  172. {
  173. node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
  174. if (node_ptr == NULL)
  175. {
  176. LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
  177. break;
  178. }
  179. list_add_tail(node_ptr, &pool_ptr->pending);
  180. count++;
  181. pool_ptr->used_size += pool_ptr->data_size;
  182. }
  183. while (count > pool_ptr->max_preallocated && !list_empty(&pool_ptr->pending))
  184. {
  185. node_ptr = pool_ptr->pending.next;
  186. list_del(node_ptr);
  187. free(node_ptr);
  188. count--;
  189. pool_ptr->used_size -= pool_ptr->data_size;
  190. }
  191. pthread_mutex_unlock(&pool_ptr->mutex);
  192. }
  193. else
  194. {
  195. while (pool_ptr->unused_count < pool_ptr->min_preallocated)
  196. {
  197. node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
  198. if (node_ptr == NULL)
  199. {
  200. LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
  201. return;
  202. }
  203. list_add_tail(node_ptr, &pool_ptr->unused);
  204. pool_ptr->unused_count++;
  205. pool_ptr->used_size += pool_ptr->data_size;
  206. }
  207. while (pool_ptr->unused_count > pool_ptr->max_preallocated)
  208. {
  209. assert(!list_empty(&pool_ptr->unused));
  210. node_ptr = pool_ptr->unused.next;
  211. list_del(node_ptr);
  212. pool_ptr->unused_count--;
  213. free(node_ptr);
  214. pool_ptr->used_size -= pool_ptr->data_size;
  215. }
  216. }
  217. }
  218. /* find entry in unused list, fail if it is empty */
  219. static
  220. void *
  221. rtsafe_memory_pool_allocate_atomic(
  222. LV2_RtMemPool_Handle pool_handle)
  223. {
  224. struct list_head * node_ptr;
  225. LOG_DEBUG("pool \"%s\", allocate (%u, %u)", pool_ptr->name, pool_ptr->used_count, pool_ptr->unused_count);
  226. if (list_empty(&pool_ptr->unused))
  227. {
  228. return NULL;
  229. }
  230. node_ptr = pool_ptr->unused.next;
  231. list_del(node_ptr);
  232. pool_ptr->unused_count--;
  233. pool_ptr->used_count++;
  234. list_add_tail(node_ptr, &pool_ptr->used);
  235. if (pool_ptr->enforce_thread_safety &&
  236. pthread_mutex_trylock(&pool_ptr->mutex) == 0)
  237. {
  238. while (pool_ptr->unused_count < pool_ptr->min_preallocated && !list_empty(&pool_ptr->pending))
  239. {
  240. node_ptr = pool_ptr->pending.next;
  241. list_del(node_ptr);
  242. list_add_tail(node_ptr, &pool_ptr->unused);
  243. pool_ptr->unused_count++;
  244. }
  245. pool_ptr->unused_count2 = pool_ptr->unused_count;
  246. pthread_mutex_unlock(&pool_ptr->mutex);
  247. }
  248. LOG_DEBUG("pool \"%s\", allocated %p (%u)", pool_ptr->name, node_ptr + 1, pool_ptr->used_count);
  249. return (node_ptr + 1);
  250. }
  251. /* move from used to unused list */
  252. static
  253. void
  254. rtsafe_memory_pool_deallocate(
  255. LV2_RtMemPool_Handle pool_handle,
  256. void * data)
  257. {
  258. struct list_head * node_ptr;
  259. LOG_DEBUG("pool \"%s\", deallocate %p (%u)", pool_ptr->name, (struct list_head *)data - 1, pool_ptr->used_count);
  260. list_del((struct list_head *)data - 1);
  261. list_add_tail((struct list_head *)data - 1, &pool_ptr->unused);
  262. pool_ptr->used_count--;
  263. pool_ptr->unused_count++;
  264. if (pool_ptr->enforce_thread_safety &&
  265. pthread_mutex_trylock(&pool_ptr->mutex) == 0)
  266. {
  267. while (pool_ptr->unused_count > pool_ptr->max_preallocated)
  268. {
  269. assert(!list_empty(&pool_ptr->unused));
  270. node_ptr = pool_ptr->unused.next;
  271. list_del(node_ptr);
  272. list_add_tail(node_ptr, &pool_ptr->pending);
  273. pool_ptr->unused_count--;
  274. }
  275. pool_ptr->unused_count2 = pool_ptr->unused_count;
  276. pthread_mutex_unlock(&pool_ptr->mutex);
  277. }
  278. }
  279. static
  280. void *
  281. rtsafe_memory_pool_allocate_sleepy(
  282. LV2_RtMemPool_Handle pool_handle)
  283. {
  284. void * data;
  285. LOG_DEBUG("pool \"%s\", allocate sleepy", pool_ptr->name);
  286. do
  287. {
  288. rtsafe_memory_pool_sleepy(pool_handle);
  289. data = rtsafe_memory_pool_allocate_atomic(pool_handle);
  290. }
  291. while (data == NULL);
  292. return data;
  293. }
  294. #undef pool_ptr
  295. static
  296. bool
  297. rtsafe_memory_pool_create2(
  298. LV2_RtMemPool_Handle * pool_handle_ptr,
  299. const char * pool_name,
  300. size_t data_size,
  301. size_t min_preallocated,
  302. size_t max_preallocated)
  303. {
  304. return rtsafe_memory_pool_create(pool_handle_ptr, pool_name, data_size, min_preallocated, max_preallocated, false);
  305. }
  306. void
  307. rtmempool_allocator_init(
  308. struct _LV2_RtMemPool_Pool * allocator_ptr)
  309. {
  310. allocator_ptr->create = rtsafe_memory_pool_create2;
  311. allocator_ptr->destroy = rtsafe_memory_pool_destroy;
  312. allocator_ptr->allocate_atomic = rtsafe_memory_pool_allocate_atomic;
  313. allocator_ptr->allocate_sleepy = rtsafe_memory_pool_allocate_sleepy;
  314. allocator_ptr->deallocate = rtsafe_memory_pool_deallocate;
  315. }