Collection of tools useful for audio production
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

404 lines
9.5KB

  1. /* -*- Mode: C ; c-basic-offset: 2 -*- */
  2. /*****************************************************************************
  3. *
  4. * This file is part of zynjacku
  5. *
  6. * Copyright (C) 2006,2007,2008,2009 Nedko Arnaudov <nedko@arnaudov.name>
  7. * Copyright (C) 2012 Filipe Coelho <falktx@falktx.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21. *
  22. *****************************************************************************/
  23. #include <stddef.h>
  24. #include <stdbool.h>
  25. #include <string.h>
  26. #include <stdio.h> /* sprintf */
  27. #include <stdlib.h>
  28. #include <assert.h>
  29. #include <pthread.h>
  30. #include "lv2/lv2_rtmempool.h"
  31. #include "rtmempool.h"
  32. #include "list.h"
  33. //#define LOG_LEVEL LOG_LEVEL_DEBUG
  34. #include "log.h"
  35. struct rtsafe_memory_pool
  36. {
  37. char name[LV2_RTSAFE_MEMORY_POOL_NAME_MAX];
  38. size_t data_size;
  39. size_t min_preallocated;
  40. size_t max_preallocated;
  41. unsigned int used_count;
  42. struct list_head unused;
  43. struct list_head used;
  44. unsigned int unused_count;
  45. bool enforce_thread_safety;
  46. /* next members are initialized/used only if enforce_thread_safety is true */
  47. pthread_mutex_t mutex;
  48. unsigned int unused_count2;
  49. struct list_head pending;
  50. size_t used_size;
  51. };
  52. static
  53. void
  54. rtsafe_memory_pool_sleepy();
  55. static
  56. bool
  57. rtsafe_memory_pool_create(
  58. LV2_RtMemPool_Handle pool_handle,
  59. const char * pool_name,
  60. size_t data_size,
  61. size_t min_preallocated,
  62. size_t max_preallocated,
  63. bool enforce_thread_safety)
  64. {
  65. int ret;
  66. struct rtsafe_memory_pool * pool_ptr;
  67. assert(min_preallocated <= max_preallocated);
  68. assert(pool_name == NULL || strlen(pool_name) < LV2_RTSAFE_MEMORY_POOL_NAME_MAX);
  69. LOG_DEBUG(
  70. "creating pool \"%s\" (size %u, min = %u, max = %u, enforce = %s)",
  71. pool_name,
  72. (unsigned int)data_size,
  73. (unsigned int)min_preallocated,
  74. (unsigned int)max_preallocated,
  75. enforce_thread_safety ? "true" : "false");
  76. pool_ptr = (struct rtsafe_memory_pool *)pool_handle;
  77. if (pool_ptr == NULL)
  78. {
  79. return false;
  80. }
  81. if (pool_name != NULL)
  82. {
  83. strcpy(pool_ptr->name, pool_name);
  84. }
  85. else
  86. {
  87. sprintf(pool_ptr->name, "%p", pool_ptr);
  88. }
  89. pool_ptr->data_size = data_size;
  90. pool_ptr->min_preallocated = min_preallocated;
  91. pool_ptr->max_preallocated = max_preallocated;
  92. INIT_LIST_HEAD(&pool_ptr->used);
  93. pool_ptr->used_count = 0;
  94. INIT_LIST_HEAD(&pool_ptr->unused);
  95. pool_ptr->unused_count = 0;
  96. pool_ptr->enforce_thread_safety = enforce_thread_safety;
  97. if (enforce_thread_safety)
  98. {
  99. ret = pthread_mutex_init(&pool_ptr->mutex, NULL);
  100. if (ret != 0)
  101. {
  102. free(pool_ptr);
  103. return false;
  104. }
  105. INIT_LIST_HEAD(&pool_ptr->pending);
  106. pool_ptr->unused_count2 = 0;
  107. }
  108. pool_ptr->used_size = 0;
  109. rtsafe_memory_pool_sleepy();
  110. return true;
  111. }
  112. #define pool_ptr ((struct rtsafe_memory_pool *)pool_handle)
  113. static
  114. void
  115. rtsafe_memory_pool_destroy(
  116. LV2_RtMemPool_Handle pool_handle)
  117. {
  118. int ret;
  119. struct list_head * node_ptr;
  120. LOG_DEBUG("destroying pool \"%s\"", pool_ptr->name);
  121. /* caller should deallocate all chunks prior releasing pool itself */
  122. if (pool_ptr->used_count != 0)
  123. {
  124. LOG_WARNING("Deallocating non-empty pool \"%s\", leaking %u entries:", pool_ptr->name, pool_ptr->used_count);
  125. list_for_each(node_ptr, &pool_ptr->used)
  126. {
  127. LOG_WARNING(" %p", node_ptr + 1);
  128. }
  129. assert(0);
  130. }
  131. while (pool_ptr->unused_count != 0)
  132. {
  133. assert(!list_empty(&pool_ptr->unused));
  134. node_ptr = pool_ptr->unused.next;
  135. list_del(node_ptr);
  136. pool_ptr->unused_count--;
  137. free(node_ptr);
  138. }
  139. assert(list_empty(&pool_ptr->unused));
  140. if (pool_ptr->enforce_thread_safety)
  141. {
  142. while (!list_empty(&pool_ptr->pending))
  143. {
  144. node_ptr = pool_ptr->pending.next;
  145. list_del(node_ptr);
  146. free(node_ptr);
  147. }
  148. ret = pthread_mutex_destroy(&pool_ptr->mutex);
  149. assert(ret == 0);
  150. }
  151. // unused variable
  152. (void)ret;
  153. }
  154. /* adjust unused list size */
  155. static
  156. void
  157. rtsafe_memory_pool_sleepy(
  158. LV2_RtMemPool_Handle pool_handle)
  159. {
  160. struct list_head * node_ptr;
  161. unsigned int count;
  162. LOG_DEBUG("pool \"%s\", sleepy", pool_ptr->name);
  163. if (pool_ptr->enforce_thread_safety)
  164. {
  165. pthread_mutex_lock(&pool_ptr->mutex);
  166. count = pool_ptr->unused_count2;
  167. assert(pool_ptr->min_preallocated < pool_ptr->max_preallocated);
  168. while (count < pool_ptr->min_preallocated)
  169. {
  170. node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
  171. if (node_ptr == NULL)
  172. {
  173. LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
  174. break;
  175. }
  176. list_add_tail(node_ptr, &pool_ptr->pending);
  177. count++;
  178. pool_ptr->used_size += pool_ptr->data_size;
  179. }
  180. while (count > pool_ptr->max_preallocated && !list_empty(&pool_ptr->pending))
  181. {
  182. node_ptr = pool_ptr->pending.next;
  183. list_del(node_ptr);
  184. free(node_ptr);
  185. count--;
  186. pool_ptr->used_size -= pool_ptr->data_size;
  187. }
  188. pthread_mutex_unlock(&pool_ptr->mutex);
  189. }
  190. else
  191. {
  192. while (pool_ptr->unused_count < pool_ptr->min_preallocated)
  193. {
  194. node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
  195. if (node_ptr == NULL)
  196. {
  197. LOG_DEBUG("malloc() failed (%u)", (unsigned int)pool_ptr->used_size);
  198. return;
  199. }
  200. list_add_tail(node_ptr, &pool_ptr->unused);
  201. pool_ptr->unused_count++;
  202. pool_ptr->used_size += pool_ptr->data_size;
  203. }
  204. while (pool_ptr->unused_count > pool_ptr->max_preallocated)
  205. {
  206. assert(!list_empty(&pool_ptr->unused));
  207. node_ptr = pool_ptr->unused.next;
  208. list_del(node_ptr);
  209. pool_ptr->unused_count--;
  210. free(node_ptr);
  211. pool_ptr->used_size -= pool_ptr->data_size;
  212. }
  213. }
  214. }
  215. /* find entry in unused list, fail if it is empty */
  216. static
  217. void *
  218. rtsafe_memory_pool_allocate_atomic(
  219. LV2_RtMemPool_Handle pool_handle)
  220. {
  221. struct list_head * node_ptr;
  222. LOG_DEBUG("pool \"%s\", allocate (%u, %u)", pool_ptr->name, pool_ptr->used_count, pool_ptr->unused_count);
  223. if (list_empty(&pool_ptr->unused))
  224. {
  225. return NULL;
  226. }
  227. node_ptr = pool_ptr->unused.next;
  228. list_del(node_ptr);
  229. pool_ptr->unused_count--;
  230. pool_ptr->used_count++;
  231. list_add_tail(node_ptr, &pool_ptr->used);
  232. if (pool_ptr->enforce_thread_safety &&
  233. pthread_mutex_trylock(&pool_ptr->mutex) == 0)
  234. {
  235. while (pool_ptr->unused_count < pool_ptr->min_preallocated && !list_empty(&pool_ptr->pending))
  236. {
  237. node_ptr = pool_ptr->pending.next;
  238. list_del(node_ptr);
  239. list_add_tail(node_ptr, &pool_ptr->unused);
  240. pool_ptr->unused_count++;
  241. }
  242. pool_ptr->unused_count2 = pool_ptr->unused_count;
  243. pthread_mutex_unlock(&pool_ptr->mutex);
  244. }
  245. LOG_DEBUG("pool \"%s\", allocated %p (%u)", pool_ptr->name, node_ptr + 1, pool_ptr->used_count);
  246. return (node_ptr + 1);
  247. }
  248. /* move from used to unused list */
  249. static
  250. void
  251. rtsafe_memory_pool_deallocate(
  252. LV2_RtMemPool_Handle pool_handle,
  253. void * data)
  254. {
  255. struct list_head * node_ptr;
  256. LOG_DEBUG("pool \"%s\", deallocate %p (%u)", pool_ptr->name, (struct list_head *)data - 1, pool_ptr->used_count);
  257. list_del((struct list_head *)data - 1);
  258. list_add_tail((struct list_head *)data - 1, &pool_ptr->unused);
  259. pool_ptr->used_count--;
  260. pool_ptr->unused_count++;
  261. if (pool_ptr->enforce_thread_safety &&
  262. pthread_mutex_trylock(&pool_ptr->mutex) == 0)
  263. {
  264. while (pool_ptr->unused_count > pool_ptr->max_preallocated)
  265. {
  266. assert(!list_empty(&pool_ptr->unused));
  267. node_ptr = pool_ptr->unused.next;
  268. list_del(node_ptr);
  269. list_add_tail(node_ptr, &pool_ptr->pending);
  270. pool_ptr->unused_count--;
  271. }
  272. pool_ptr->unused_count2 = pool_ptr->unused_count;
  273. pthread_mutex_unlock(&pool_ptr->mutex);
  274. }
  275. }
  276. static
  277. void *
  278. rtsafe_memory_pool_allocate_sleepy(
  279. LV2_RtMemPool_Handle pool_handle)
  280. {
  281. void * data;
  282. LOG_DEBUG("pool \"%s\", allocate sleepy", pool_ptr->name);
  283. do
  284. {
  285. rtsafe_memory_pool_sleepy(pool_handle);
  286. data = rtsafe_memory_pool_allocate_atomic(pool_handle);
  287. }
  288. while (data == NULL);
  289. return data;
  290. }
  291. #undef pool_ptr
  292. static
  293. bool
  294. rtsafe_memory_pool_create2(
  295. LV2_RtMemPool_Handle pool_handle,
  296. const char * pool_name,
  297. size_t data_size,
  298. size_t min_preallocated,
  299. size_t max_preallocated)
  300. {
  301. return rtsafe_memory_pool_create(pool_handle, pool_name, data_size, min_preallocated, max_preallocated, false);
  302. }
  303. void
  304. rtmempool_allocator_init(
  305. struct _LV2_RtMemPool_Pool * allocator_ptr)
  306. {
  307. allocator_ptr->handle = (LV2_RtMemPool_Handle)malloc(sizeof(struct rtsafe_memory_pool));
  308. allocator_ptr->create = rtsafe_memory_pool_create2;
  309. allocator_ptr->destroy = rtsafe_memory_pool_destroy;
  310. allocator_ptr->allocate_atomic = rtsafe_memory_pool_allocate_atomic;
  311. allocator_ptr->allocate_sleepy = rtsafe_memory_pool_allocate_sleepy;
  312. allocator_ptr->deallocate = rtsafe_memory_pool_deallocate;
  313. }
  314. void
  315. rtmempool_allocator_free(
  316. const struct _LV2_RtMemPool_Pool * allocator_ptr)
  317. {
  318. if (allocator_ptr->handle)
  319. free((struct rtsafe_memory_pool *)allocator_ptr->handle);
  320. }