Collection of tools useful for audio production
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

873 lines
28KB

  1. /* -*- Mode: C ; c-basic-offset: 2 -*- */
  2. /*****************************************************************************
  3. *
  4. * Linux kernel header adapted for user-mode
  5. * The 2.6.17-rt1 version was used.
  6. *
  7. * Original copyright holders of this code are unknown, they were not
  8. * mentioned in the original file.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  22. *
  23. *****************************************************************************/
  24. #ifndef _LINUX_LIST_H
  25. #define _LINUX_LIST_H
  26. #include <stddef.h>
  27. #if !defined(offsetof)
  28. #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
  29. #endif
  30. /**
  31. * container_of - cast a member of a structure out to the containing structure
  32. * @ptr: the pointer to the member.
  33. * @type: the type of the container struct this is embedded in.
  34. * @member: the name of the member within the struct.
  35. *
  36. */
  37. #define container_of(ptr, type, member) (type *)((char *)(ptr) - offsetof(type,member))
  38. #define prefetch(x) (x = x)
  39. /*
  40. * These are non-NULL pointers that will result in page faults
  41. * under normal circumstances, used to verify that nobody uses
  42. * non-initialized list entries.
  43. */
  44. #define LIST_POISON1 ((void *) 0x00100100)
  45. #define LIST_POISON2 ((void *) 0x00200200)
  46. /*
  47. * Simple doubly linked list implementation.
  48. *
  49. * Some of the internal functions ("__xxx") are useful when
  50. * manipulating whole lists rather than single entries, as
  51. * sometimes we already know the next/prev entries and we can
  52. * generate better code by using them directly rather than
  53. * using the generic single-entry routines.
  54. */
  55. struct list_head {
  56. struct list_head *next, *prev;
  57. };
  58. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  59. #define LIST_HEAD(name) \
  60. struct list_head name = LIST_HEAD_INIT(name)
  61. static inline void INIT_LIST_HEAD(struct list_head *list)
  62. {
  63. list->next = list;
  64. list->prev = list;
  65. }
  66. /*
  67. * Insert a new entry between two known consecutive entries.
  68. *
  69. * This is only for internal list manipulation where we know
  70. * the prev/next entries already!
  71. */
  72. static inline void __list_add(struct list_head *new,
  73. struct list_head *prev,
  74. struct list_head *next)
  75. {
  76. next->prev = new;
  77. new->next = next;
  78. new->prev = prev;
  79. prev->next = new;
  80. }
  81. /**
  82. * list_add - add a new entry
  83. * @new: new entry to be added
  84. * @head: list head to add it after
  85. *
  86. * Insert a new entry after the specified head.
  87. * This is good for implementing stacks.
  88. */
  89. static inline void list_add(struct list_head *new, struct list_head *head)
  90. {
  91. __list_add(new, head, head->next);
  92. }
  93. /**
  94. * list_add_tail - add a new entry
  95. * @new: new entry to be added
  96. * @head: list head to add it before
  97. *
  98. * Insert a new entry before the specified head.
  99. * This is useful for implementing queues.
  100. */
  101. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  102. {
  103. __list_add(new, head->prev, head);
  104. }
  105. /*
  106. * Insert a new entry between two known consecutive entries.
  107. *
  108. * This is only for internal list manipulation where we know
  109. * the prev/next entries already!
  110. */
  111. static inline void __list_add_rcu(struct list_head * new,
  112. struct list_head * prev, struct list_head * next)
  113. {
  114. new->next = next;
  115. new->prev = prev;
  116. // smp_wmb();
  117. next->prev = new;
  118. prev->next = new;
  119. }
  120. /**
  121. * list_add_rcu - add a new entry to rcu-protected list
  122. * @new: new entry to be added
  123. * @head: list head to add it after
  124. *
  125. * Insert a new entry after the specified head.
  126. * This is good for implementing stacks.
  127. *
  128. * The caller must take whatever precautions are necessary
  129. * (such as holding appropriate locks) to avoid racing
  130. * with another list-mutation primitive, such as list_add_rcu()
  131. * or list_del_rcu(), running on this same list.
  132. * However, it is perfectly legal to run concurrently with
  133. * the _rcu list-traversal primitives, such as
  134. * list_for_each_entry_rcu().
  135. */
  136. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  137. {
  138. __list_add_rcu(new, head, head->next);
  139. }
  140. /**
  141. * list_add_tail_rcu - add a new entry to rcu-protected list
  142. * @new: new entry to be added
  143. * @head: list head to add it before
  144. *
  145. * Insert a new entry before the specified head.
  146. * This is useful for implementing queues.
  147. *
  148. * The caller must take whatever precautions are necessary
  149. * (such as holding appropriate locks) to avoid racing
  150. * with another list-mutation primitive, such as list_add_tail_rcu()
  151. * or list_del_rcu(), running on this same list.
  152. * However, it is perfectly legal to run concurrently with
  153. * the _rcu list-traversal primitives, such as
  154. * list_for_each_entry_rcu().
  155. */
  156. static inline void list_add_tail_rcu(struct list_head *new,
  157. struct list_head *head)
  158. {
  159. __list_add_rcu(new, head->prev, head);
  160. }
  161. /*
  162. * Delete a list entry by making the prev/next entries
  163. * point to each other.
  164. *
  165. * This is only for internal list manipulation where we know
  166. * the prev/next entries already!
  167. */
  168. static inline void __list_del(struct list_head * prev, struct list_head * next)
  169. {
  170. next->prev = prev;
  171. prev->next = next;
  172. }
  173. /**
  174. * list_del - deletes entry from list.
  175. * @entry: the element to delete from the list.
  176. * Note: list_empty on entry does not return true after this, the entry is
  177. * in an undefined state.
  178. */
  179. static inline void list_del(struct list_head *entry)
  180. {
  181. __list_del(entry->prev, entry->next);
  182. entry->next = LIST_POISON1;
  183. entry->prev = LIST_POISON2;
  184. }
  185. /**
  186. * list_del_rcu - deletes entry from list without re-initialization
  187. * @entry: the element to delete from the list.
  188. *
  189. * Note: list_empty on entry does not return true after this,
  190. * the entry is in an undefined state. It is useful for RCU based
  191. * lockfree traversal.
  192. *
  193. * In particular, it means that we can not poison the forward
  194. * pointers that may still be used for walking the list.
  195. *
  196. * The caller must take whatever precautions are necessary
  197. * (such as holding appropriate locks) to avoid racing
  198. * with another list-mutation primitive, such as list_del_rcu()
  199. * or list_add_rcu(), running on this same list.
  200. * However, it is perfectly legal to run concurrently with
  201. * the _rcu list-traversal primitives, such as
  202. * list_for_each_entry_rcu().
  203. *
  204. * Note that the caller is not permitted to immediately free
  205. * the newly deleted entry. Instead, either synchronize_rcu()
  206. * or call_rcu() must be used to defer freeing until an RCU
  207. * grace period has elapsed.
  208. */
  209. static inline void list_del_rcu(struct list_head *entry)
  210. {
  211. __list_del(entry->prev, entry->next);
  212. entry->prev = LIST_POISON2;
  213. }
  214. /*
  215. * list_replace_rcu - replace old entry by new one
  216. * @old : the element to be replaced
  217. * @new : the new element to insert
  218. *
  219. * The old entry will be replaced with the new entry atomically.
  220. */
  221. static inline void list_replace_rcu(struct list_head *old,
  222. struct list_head *new)
  223. {
  224. new->next = old->next;
  225. new->prev = old->prev;
  226. // smp_wmb();
  227. new->next->prev = new;
  228. new->prev->next = new;
  229. old->prev = LIST_POISON2;
  230. }
  231. /**
  232. * list_del_init - deletes entry from list and reinitialize it.
  233. * @entry: the element to delete from the list.
  234. */
  235. static inline void list_del_init(struct list_head *entry)
  236. {
  237. __list_del(entry->prev, entry->next);
  238. INIT_LIST_HEAD(entry);
  239. }
  240. /**
  241. * list_move - delete from one list and add as another's head
  242. * @list: the entry to move
  243. * @head: the head that will precede our entry
  244. */
  245. static inline void list_move(struct list_head *list, struct list_head *head)
  246. {
  247. __list_del(list->prev, list->next);
  248. list_add(list, head);
  249. }
  250. /**
  251. * list_move_tail - delete from one list and add as another's tail
  252. * @list: the entry to move
  253. * @head: the head that will follow our entry
  254. */
  255. static inline void list_move_tail(struct list_head *list,
  256. struct list_head *head)
  257. {
  258. __list_del(list->prev, list->next);
  259. list_add_tail(list, head);
  260. }
  261. /**
  262. * list_empty - tests whether a list is empty
  263. * @head: the list to test.
  264. */
  265. static inline int list_empty(const struct list_head *head)
  266. {
  267. return head->next == head;
  268. }
  269. /**
  270. * list_empty_careful - tests whether a list is
  271. * empty _and_ checks that no other CPU might be
  272. * in the process of still modifying either member
  273. *
  274. * NOTE: using list_empty_careful() without synchronization
  275. * can only be safe if the only activity that can happen
  276. * to the list entry is list_del_init(). Eg. it cannot be used
  277. * if another CPU could re-list_add() it.
  278. *
  279. * @head: the list to test.
  280. */
  281. static inline int list_empty_careful(const struct list_head *head)
  282. {
  283. struct list_head *next = head->next;
  284. return (next == head) && (next == head->prev);
  285. }
  286. static inline void __list_splice(struct list_head *list,
  287. struct list_head *head)
  288. {
  289. struct list_head *first = list->next;
  290. struct list_head *last = list->prev;
  291. struct list_head *at = head->next;
  292. first->prev = head;
  293. head->next = first;
  294. last->next = at;
  295. at->prev = last;
  296. }
  297. /**
  298. * list_splice - join two lists
  299. * @list: the new list to add.
  300. * @head: the place to add it in the first list.
  301. */
  302. static inline void list_splice(struct list_head *list, struct list_head *head)
  303. {
  304. if (!list_empty(list))
  305. __list_splice(list, head);
  306. }
  307. /**
  308. * list_splice_init - join two lists and reinitialise the emptied list.
  309. * @list: the new list to add.
  310. * @head: the place to add it in the first list.
  311. *
  312. * The list at @list is reinitialised
  313. */
  314. static inline void list_splice_init(struct list_head *list,
  315. struct list_head *head)
  316. {
  317. if (!list_empty(list)) {
  318. __list_splice(list, head);
  319. INIT_LIST_HEAD(list);
  320. }
  321. }
  322. /**
  323. * list_entry - get the struct for this entry
  324. * @ptr: the &struct list_head pointer.
  325. * @type: the type of the struct this is embedded in.
  326. * @member: the name of the list_struct within the struct.
  327. */
  328. #define list_entry(ptr, type, member) \
  329. container_of(ptr, type, member)
  330. /**
  331. * list_for_each - iterate over a list
  332. * @pos: the &struct list_head to use as a loop counter.
  333. * @head: the head for your list.
  334. */
  335. #define list_for_each(pos, head) \
  336. for (pos = (head)->next; prefetch(pos->next), pos != (head); \
  337. pos = pos->next)
  338. /**
  339. * __list_for_each - iterate over a list
  340. * @pos: the &struct list_head to use as a loop counter.
  341. * @head: the head for your list.
  342. *
  343. * This variant differs from list_for_each() in that it's the
  344. * simplest possible list iteration code, no prefetching is done.
  345. * Use this for code that knows the list to be very short (empty
  346. * or 1 entry) most of the time.
  347. */
  348. #define __list_for_each(pos, head) \
  349. for (pos = (head)->next; pos != (head); pos = pos->next)
  350. /**
  351. * list_for_each_prev - iterate over a list backwards
  352. * @pos: the &struct list_head to use as a loop counter.
  353. * @head: the head for your list.
  354. */
  355. #define list_for_each_prev(pos, head) \
  356. for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
  357. pos = pos->prev)
  358. /**
  359. * list_for_each_safe - iterate over a list safe against removal of list entry
  360. * @pos: the &struct list_head to use as a loop counter.
  361. * @n: another &struct list_head to use as temporary storage
  362. * @head: the head for your list.
  363. */
  364. #define list_for_each_safe(pos, n, head) \
  365. for (pos = (head)->next, n = pos->next; pos != (head); \
  366. pos = n, n = pos->next)
  367. /**
  368. * list_for_each_entry - iterate over list of given type
  369. * @pos: the type * to use as a loop counter.
  370. * @head: the head for your list.
  371. * @member: the name of the list_struct within the struct.
  372. */
  373. #define list_for_each_entry(pos, head, member) \
  374. for (pos = list_entry((head)->next, typeof(*pos), member); \
  375. prefetch(pos->member.next), &pos->member != (head); \
  376. pos = list_entry(pos->member.next, typeof(*pos), member))
  377. /**
  378. * list_for_each_entry_reverse - iterate backwards over list of given type.
  379. * @pos: the type * to use as a loop counter.
  380. * @head: the head for your list.
  381. * @member: the name of the list_struct within the struct.
  382. */
  383. #define list_for_each_entry_reverse(pos, head, member) \
  384. for (pos = list_entry((head)->prev, typeof(*pos), member); \
  385. prefetch(pos->member.prev), &pos->member != (head); \
  386. pos = list_entry(pos->member.prev, typeof(*pos), member))
  387. /**
  388. * list_prepare_entry - prepare a pos entry for use as a start point in
  389. * list_for_each_entry_continue
  390. * @pos: the type * to use as a start point
  391. * @head: the head of the list
  392. * @member: the name of the list_struct within the struct.
  393. */
  394. #define list_prepare_entry(pos, head, member) \
  395. ((pos) ? : list_entry(head, typeof(*pos), member))
  396. /**
  397. * list_for_each_entry_continue - iterate over list of given type
  398. * continuing after existing point
  399. * @pos: the type * to use as a loop counter.
  400. * @head: the head for your list.
  401. * @member: the name of the list_struct within the struct.
  402. */
  403. #define list_for_each_entry_continue(pos, head, member) \
  404. for (pos = list_entry(pos->member.next, typeof(*pos), member); \
  405. prefetch(pos->member.next), &pos->member != (head); \
  406. pos = list_entry(pos->member.next, typeof(*pos), member))
  407. /**
  408. * list_for_each_entry_from - iterate over list of given type
  409. * continuing from existing point
  410. * @pos: the type * to use as a loop counter.
  411. * @head: the head for your list.
  412. * @member: the name of the list_struct within the struct.
  413. */
  414. #define list_for_each_entry_from(pos, head, member) \
  415. for (; prefetch(pos->member.next), &pos->member != (head); \
  416. pos = list_entry(pos->member.next, typeof(*pos), member))
  417. /**
  418. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  419. * @pos: the type * to use as a loop counter.
  420. * @n: another type * to use as temporary storage
  421. * @head: the head for your list.
  422. * @member: the name of the list_struct within the struct.
  423. */
  424. #define list_for_each_entry_safe(pos, n, head, member) \
  425. for (pos = list_entry((head)->next, typeof(*pos), member), \
  426. n = list_entry(pos->member.next, typeof(*pos), member); \
  427. &pos->member != (head); \
  428. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  429. /**
  430. * list_for_each_entry_safe_continue - iterate over list of given type
  431. * continuing after existing point safe against removal of list entry
  432. * @pos: the type * to use as a loop counter.
  433. * @n: another type * to use as temporary storage
  434. * @head: the head for your list.
  435. * @member: the name of the list_struct within the struct.
  436. */
  437. #define list_for_each_entry_safe_continue(pos, n, head, member) \
  438. for (pos = list_entry(pos->member.next, typeof(*pos), member), \
  439. n = list_entry(pos->member.next, typeof(*pos), member); \
  440. &pos->member != (head); \
  441. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  442. /**
  443. * list_for_each_entry_safe_from - iterate over list of given type
  444. * from existing point safe against removal of list entry
  445. * @pos: the type * to use as a loop counter.
  446. * @n: another type * to use as temporary storage
  447. * @head: the head for your list.
  448. * @member: the name of the list_struct within the struct.
  449. */
  450. #define list_for_each_entry_safe_from(pos, n, head, member) \
  451. for (n = list_entry(pos->member.next, typeof(*pos), member); \
  452. &pos->member != (head); \
  453. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  454. /**
  455. * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
  456. * removal of list entry
  457. * @pos: the type * to use as a loop counter.
  458. * @n: another type * to use as temporary storage
  459. * @head: the head for your list.
  460. * @member: the name of the list_struct within the struct.
  461. */
  462. #define list_for_each_entry_safe_reverse(pos, n, head, member) \
  463. for (pos = list_entry((head)->prev, typeof(*pos), member), \
  464. n = list_entry(pos->member.prev, typeof(*pos), member); \
  465. &pos->member != (head); \
  466. pos = n, n = list_entry(n->member.prev, typeof(*n), member))
  467. /**
  468. * list_for_each_rcu - iterate over an rcu-protected list
  469. * @pos: the &struct list_head to use as a loop counter.
  470. * @head: the head for your list.
  471. *
  472. * This list-traversal primitive may safely run concurrently with
  473. * the _rcu list-mutation primitives such as list_add_rcu()
  474. * as long as the traversal is guarded by rcu_read_lock().
  475. */
  476. #define list_for_each_rcu(pos, head) \
  477. for (pos = (head)->next; \
  478. prefetch(rcu_dereference(pos)->next), pos != (head); \
  479. pos = pos->next)
  480. #define __list_for_each_rcu(pos, head) \
  481. for (pos = (head)->next; \
  482. rcu_dereference(pos) != (head); \
  483. pos = pos->next)
  484. /**
  485. * list_for_each_safe_rcu - iterate over an rcu-protected list safe
  486. * against removal of list entry
  487. * @pos: the &struct list_head to use as a loop counter.
  488. * @n: another &struct list_head to use as temporary storage
  489. * @head: the head for your list.
  490. *
  491. * This list-traversal primitive may safely run concurrently with
  492. * the _rcu list-mutation primitives such as list_add_rcu()
  493. * as long as the traversal is guarded by rcu_read_lock().
  494. */
  495. #define list_for_each_safe_rcu(pos, n, head) \
  496. for (pos = (head)->next; \
  497. n = rcu_dereference(pos)->next, pos != (head); \
  498. pos = n)
  499. /**
  500. * list_for_each_entry_rcu - iterate over rcu list of given type
  501. * @pos: the type * to use as a loop counter.
  502. * @head: the head for your list.
  503. * @member: the name of the list_struct within the struct.
  504. *
  505. * This list-traversal primitive may safely run concurrently with
  506. * the _rcu list-mutation primitives such as list_add_rcu()
  507. * as long as the traversal is guarded by rcu_read_lock().
  508. */
  509. #define list_for_each_entry_rcu(pos, head, member) \
  510. for (pos = list_entry((head)->next, typeof(*pos), member); \
  511. prefetch(rcu_dereference(pos)->member.next), \
  512. &pos->member != (head); \
  513. pos = list_entry(pos->member.next, typeof(*pos), member))
  514. /**
  515. * list_for_each_continue_rcu - iterate over an rcu-protected list
  516. * continuing after existing point.
  517. * @pos: the &struct list_head to use as a loop counter.
  518. * @head: the head for your list.
  519. *
  520. * This list-traversal primitive may safely run concurrently with
  521. * the _rcu list-mutation primitives such as list_add_rcu()
  522. * as long as the traversal is guarded by rcu_read_lock().
  523. */
  524. #define list_for_each_continue_rcu(pos, head) \
  525. for ((pos) = (pos)->next; \
  526. prefetch(rcu_dereference((pos))->next), (pos) != (head); \
  527. (pos) = (pos)->next)
  528. /*
  529. * Double linked lists with a single pointer list head.
  530. * Mostly useful for hash tables where the two pointer list head is
  531. * too wasteful.
  532. * You lose the ability to access the tail in O(1).
  533. */
  534. struct hlist_head {
  535. struct hlist_node *first;
  536. };
  537. struct hlist_node {
  538. struct hlist_node *next, **pprev;
  539. };
  540. #define HLIST_HEAD_INIT { .first = NULL }
  541. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  542. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  543. static inline void INIT_HLIST_NODE(struct hlist_node *h)
  544. {
  545. h->next = NULL;
  546. h->pprev = NULL;
  547. }
  548. static inline int hlist_unhashed(const struct hlist_node *h)
  549. {
  550. return !h->pprev;
  551. }
  552. static inline int hlist_empty(const struct hlist_head *h)
  553. {
  554. return !h->first;
  555. }
  556. static inline void __hlist_del(struct hlist_node *n)
  557. {
  558. struct hlist_node *next = n->next;
  559. struct hlist_node **pprev = n->pprev;
  560. *pprev = next;
  561. if (next)
  562. next->pprev = pprev;
  563. }
  564. static inline void hlist_del(struct hlist_node *n)
  565. {
  566. __hlist_del(n);
  567. n->next = LIST_POISON1;
  568. n->pprev = LIST_POISON2;
  569. }
  570. /**
  571. * hlist_del_rcu - deletes entry from hash list without re-initialization
  572. * @n: the element to delete from the hash list.
  573. *
  574. * Note: list_unhashed() on entry does not return true after this,
  575. * the entry is in an undefined state. It is useful for RCU based
  576. * lockfree traversal.
  577. *
  578. * In particular, it means that we can not poison the forward
  579. * pointers that may still be used for walking the hash list.
  580. *
  581. * The caller must take whatever precautions are necessary
  582. * (such as holding appropriate locks) to avoid racing
  583. * with another list-mutation primitive, such as hlist_add_head_rcu()
  584. * or hlist_del_rcu(), running on this same list.
  585. * However, it is perfectly legal to run concurrently with
  586. * the _rcu list-traversal primitives, such as
  587. * hlist_for_each_entry().
  588. */
  589. static inline void hlist_del_rcu(struct hlist_node *n)
  590. {
  591. __hlist_del(n);
  592. n->pprev = LIST_POISON2;
  593. }
  594. static inline void hlist_del_init(struct hlist_node *n)
  595. {
  596. if (!hlist_unhashed(n)) {
  597. __hlist_del(n);
  598. INIT_HLIST_NODE(n);
  599. }
  600. }
  601. /*
  602. * hlist_replace_rcu - replace old entry by new one
  603. * @old : the element to be replaced
  604. * @new : the new element to insert
  605. *
  606. * The old entry will be replaced with the new entry atomically.
  607. */
  608. static inline void hlist_replace_rcu(struct hlist_node *old,
  609. struct hlist_node *new)
  610. {
  611. struct hlist_node *next = old->next;
  612. new->next = next;
  613. new->pprev = old->pprev;
  614. // smp_wmb();
  615. if (next)
  616. new->next->pprev = &new->next;
  617. *new->pprev = new;
  618. old->pprev = LIST_POISON2;
  619. }
  620. static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  621. {
  622. struct hlist_node *first = h->first;
  623. n->next = first;
  624. if (first)
  625. first->pprev = &n->next;
  626. h->first = n;
  627. n->pprev = &h->first;
  628. }
  629. /**
  630. * hlist_add_head_rcu - adds the specified element to the specified hlist,
  631. * while permitting racing traversals.
  632. * @n: the element to add to the hash list.
  633. * @h: the list to add to.
  634. *
  635. * The caller must take whatever precautions are necessary
  636. * (such as holding appropriate locks) to avoid racing
  637. * with another list-mutation primitive, such as hlist_add_head_rcu()
  638. * or hlist_del_rcu(), running on this same list.
  639. * However, it is perfectly legal to run concurrently with
  640. * the _rcu list-traversal primitives, such as
  641. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  642. * problems on Alpha CPUs. Regardless of the type of CPU, the
  643. * list-traversal primitive must be guarded by rcu_read_lock().
  644. */
  645. static inline void hlist_add_head_rcu(struct hlist_node *n,
  646. struct hlist_head *h)
  647. {
  648. struct hlist_node *first = h->first;
  649. n->next = first;
  650. n->pprev = &h->first;
  651. // smp_wmb();
  652. if (first)
  653. first->pprev = &n->next;
  654. h->first = n;
  655. }
  656. /* next must be != NULL */
  657. static inline void hlist_add_before(struct hlist_node *n,
  658. struct hlist_node *next)
  659. {
  660. n->pprev = next->pprev;
  661. n->next = next;
  662. next->pprev = &n->next;
  663. *(n->pprev) = n;
  664. }
  665. static inline void hlist_add_after(struct hlist_node *n,
  666. struct hlist_node *next)
  667. {
  668. next->next = n->next;
  669. n->next = next;
  670. next->pprev = &n->next;
  671. if(next->next)
  672. next->next->pprev = &next->next;
  673. }
  674. /**
  675. * hlist_add_before_rcu - adds the specified element to the specified hlist
  676. * before the specified node while permitting racing traversals.
  677. * @n: the new element to add to the hash list.
  678. * @next: the existing element to add the new element before.
  679. *
  680. * The caller must take whatever precautions are necessary
  681. * (such as holding appropriate locks) to avoid racing
  682. * with another list-mutation primitive, such as hlist_add_head_rcu()
  683. * or hlist_del_rcu(), running on this same list.
  684. * However, it is perfectly legal to run concurrently with
  685. * the _rcu list-traversal primitives, such as
  686. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  687. * problems on Alpha CPUs.
  688. */
  689. static inline void hlist_add_before_rcu(struct hlist_node *n,
  690. struct hlist_node *next)
  691. {
  692. n->pprev = next->pprev;
  693. n->next = next;
  694. // smp_wmb();
  695. next->pprev = &n->next;
  696. *(n->pprev) = n;
  697. }
  698. /**
  699. * hlist_add_after_rcu - adds the specified element to the specified hlist
  700. * after the specified node while permitting racing traversals.
  701. * @prev: the existing element to add the new element after.
  702. * @n: the new element to add to the hash list.
  703. *
  704. * The caller must take whatever precautions are necessary
  705. * (such as holding appropriate locks) to avoid racing
  706. * with another list-mutation primitive, such as hlist_add_head_rcu()
  707. * or hlist_del_rcu(), running on this same list.
  708. * However, it is perfectly legal to run concurrently with
  709. * the _rcu list-traversal primitives, such as
  710. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  711. * problems on Alpha CPUs.
  712. */
  713. static inline void hlist_add_after_rcu(struct hlist_node *prev,
  714. struct hlist_node *n)
  715. {
  716. n->next = prev->next;
  717. n->pprev = &prev->next;
  718. // smp_wmb();
  719. prev->next = n;
  720. if (n->next)
  721. n->next->pprev = &n->next;
  722. }
  723. #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  724. #define hlist_for_each(pos, head) \
  725. for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
  726. pos = pos->next)
  727. #define hlist_for_each_safe(pos, n, head) \
  728. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  729. pos = n)
  730. /**
  731. * hlist_for_each_entry - iterate over list of given type
  732. * @tpos: the type * to use as a loop counter.
  733. * @pos: the &struct hlist_node to use as a loop counter.
  734. * @head: the head for your list.
  735. * @member: the name of the hlist_node within the struct.
  736. */
  737. #define hlist_for_each_entry(tpos, pos, head, member) \
  738. for (pos = (head)->first; \
  739. pos && ({ prefetch(pos->next); 1;}) && \
  740. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  741. pos = pos->next)
  742. /**
  743. * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
  744. * @tpos: the type * to use as a loop counter.
  745. * @pos: the &struct hlist_node to use as a loop counter.
  746. * @member: the name of the hlist_node within the struct.
  747. */
  748. #define hlist_for_each_entry_continue(tpos, pos, member) \
  749. for (pos = (pos)->next; \
  750. pos && ({ prefetch(pos->next); 1;}) && \
  751. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  752. pos = pos->next)
  753. /**
  754. * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
  755. * @tpos: the type * to use as a loop counter.
  756. * @pos: the &struct hlist_node to use as a loop counter.
  757. * @member: the name of the hlist_node within the struct.
  758. */
  759. #define hlist_for_each_entry_from(tpos, pos, member) \
  760. for (; pos && ({ prefetch(pos->next); 1;}) && \
  761. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  762. pos = pos->next)
  763. /**
  764. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  765. * @tpos: the type * to use as a loop counter.
  766. * @pos: the &struct hlist_node to use as a loop counter.
  767. * @n: another &struct hlist_node to use as temporary storage
  768. * @head: the head for your list.
  769. * @member: the name of the hlist_node within the struct.
  770. */
  771. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  772. for (pos = (head)->first; \
  773. pos && ({ n = pos->next; 1; }) && \
  774. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  775. pos = n)
  776. /**
  777. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  778. * @tpos: the type * to use as a loop counter.
  779. * @pos: the &struct hlist_node to use as a loop counter.
  780. * @head: the head for your list.
  781. * @member: the name of the hlist_node within the struct.
  782. *
  783. * This list-traversal primitive may safely run concurrently with
  784. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  785. * as long as the traversal is guarded by rcu_read_lock().
  786. */
  787. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  788. for (pos = (head)->first; \
  789. rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
  790. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  791. pos = pos->next)
  792. #endif