jack1 codebase
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

915 lines
28KB

  1. /* -*- Mode: C ; c-basic-offset: 2 -*- */
  2. /*****************************************************************************
  3. *
  4. * Linux kernel header adapted for user-mode
  5. * The 2.6.17-rt1 version was used.
  6. *
  7. * Original copyright holders of this code are unknown, they were not
  8. * mentioned in the original file.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  22. *
  23. *****************************************************************************/
  24. #ifndef _LINUX_LIST_H
  25. #define _LINUX_LIST_H
  26. #include <stddef.h>
  27. #if !defined(offsetof)
  28. #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
  29. #endif
  30. /**
  31. * container_of - cast a member of a structure out to the containing structure
  32. * @ptr: the pointer to the member.
  33. * @type: the type of the container struct this is embedded in.
  34. * @member: the name of the member within the struct.
  35. *
  36. */
  37. #define container_of(ptr, type, member) ({ \
  38. const typeof( ((type*)0)->member ) * __mptr = (ptr); \
  39. (type*)( (char*)__mptr - offsetof (type, member) ); })
  40. #define prefetch(x) (x = x)
  41. /*
  42. * These are non-NULL pointers that will result in page faults
  43. * under normal circumstances, used to verify that nobody uses
  44. * non-initialized list entries.
  45. */
  46. #define LIST_POISON1 ((void*)0x00100100)
  47. #define LIST_POISON2 ((void*)0x00200200)
  48. /*
  49. * Simple doubly linked list implementation.
  50. *
  51. * Some of the internal functions ("__xxx") are useful when
  52. * manipulating whole lists rather than single entries, as
  53. * sometimes we already know the next/prev entries and we can
  54. * generate better code by using them directly rather than
  55. * using the generic single-entry routines.
  56. */
  57. struct list_head {
  58. struct list_head *next, *prev;
  59. };
  60. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  61. #define LIST_HEAD(name) \
  62. struct list_head name = LIST_HEAD_INIT (name)
  63. static inline void INIT_LIST_HEAD (struct list_head *list)
  64. {
  65. list->next = list;
  66. list->prev = list;
  67. }
  68. /*
  69. * Insert a new entry between two known consecutive entries.
  70. *
  71. * This is only for internal list manipulation where we know
  72. * the prev/next entries already!
  73. */
  74. static inline void __list_add (struct list_head *new,
  75. struct list_head *prev,
  76. struct list_head *next)
  77. {
  78. next->prev = new;
  79. new->next = next;
  80. new->prev = prev;
  81. prev->next = new;
  82. }
  83. /**
  84. * list_add - add a new entry
  85. * @new: new entry to be added
  86. * @head: list head to add it after
  87. *
  88. * Insert a new entry after the specified head.
  89. * This is good for implementing stacks.
  90. */
  91. static inline void list_add (struct list_head *new, struct list_head *head)
  92. {
  93. __list_add (new, head, head->next);
  94. }
  95. /**
  96. * list_add_tail - add a new entry
  97. * @new: new entry to be added
  98. * @head: list head to add it before
  99. *
  100. * Insert a new entry before the specified head.
  101. * This is useful for implementing queues.
  102. */
  103. static inline void list_add_tail (struct list_head *new, struct list_head *head)
  104. {
  105. __list_add (new, head->prev, head);
  106. }
  107. /*
  108. * Insert a new entry between two known consecutive entries.
  109. *
  110. * This is only for internal list manipulation where we know
  111. * the prev/next entries already!
  112. */
  113. static inline void __list_add_rcu (struct list_head * new,
  114. struct list_head * prev, struct list_head * next)
  115. {
  116. new->next = next;
  117. new->prev = prev;
  118. // smp_wmb();
  119. next->prev = new;
  120. prev->next = new;
  121. }
  122. /**
  123. * list_add_rcu - add a new entry to rcu-protected list
  124. * @new: new entry to be added
  125. * @head: list head to add it after
  126. *
  127. * Insert a new entry after the specified head.
  128. * This is good for implementing stacks.
  129. *
  130. * The caller must take whatever precautions are necessary
  131. * (such as holding appropriate locks) to avoid racing
  132. * with another list-mutation primitive, such as list_add_rcu()
  133. * or list_del_rcu(), running on this same list.
  134. * However, it is perfectly legal to run concurrently with
  135. * the _rcu list-traversal primitives, such as
  136. * list_for_each_entry_rcu().
  137. */
  138. static inline void list_add_rcu (struct list_head *new, struct list_head *head)
  139. {
  140. __list_add_rcu (new, head, head->next);
  141. }
  142. /**
  143. * list_add_tail_rcu - add a new entry to rcu-protected list
  144. * @new: new entry to be added
  145. * @head: list head to add it before
  146. *
  147. * Insert a new entry before the specified head.
  148. * This is useful for implementing queues.
  149. *
  150. * The caller must take whatever precautions are necessary
  151. * (such as holding appropriate locks) to avoid racing
  152. * with another list-mutation primitive, such as list_add_tail_rcu()
  153. * or list_del_rcu(), running on this same list.
  154. * However, it is perfectly legal to run concurrently with
  155. * the _rcu list-traversal primitives, such as
  156. * list_for_each_entry_rcu().
  157. */
  158. static inline void list_add_tail_rcu (struct list_head *new,
  159. struct list_head *head)
  160. {
  161. __list_add_rcu (new, head->prev, head);
  162. }
  163. /*
  164. * Delete a list entry by making the prev/next entries
  165. * point to each other.
  166. *
  167. * This is only for internal list manipulation where we know
  168. * the prev/next entries already!
  169. */
  170. static inline void __list_del (struct list_head * prev, struct list_head * next)
  171. {
  172. next->prev = prev;
  173. prev->next = next;
  174. }
  175. /**
  176. * list_del - deletes entry from list.
  177. * @entry: the element to delete from the list.
  178. * Note: list_empty on entry does not return true after this, the entry is
  179. * in an undefined state.
  180. */
  181. static inline void list_del (struct list_head *entry)
  182. {
  183. __list_del (entry->prev, entry->next);
  184. entry->next = LIST_POISON1;
  185. entry->prev = LIST_POISON2;
  186. }
  187. /**
  188. * list_del_rcu - deletes entry from list without re-initialization
  189. * @entry: the element to delete from the list.
  190. *
  191. * Note: list_empty on entry does not return true after this,
  192. * the entry is in an undefined state. It is useful for RCU based
  193. * lockfree traversal.
  194. *
  195. * In particular, it means that we can not poison the forward
  196. * pointers that may still be used for walking the list.
  197. *
  198. * The caller must take whatever precautions are necessary
  199. * (such as holding appropriate locks) to avoid racing
  200. * with another list-mutation primitive, such as list_del_rcu()
  201. * or list_add_rcu(), running on this same list.
  202. * However, it is perfectly legal to run concurrently with
  203. * the _rcu list-traversal primitives, such as
  204. * list_for_each_entry_rcu().
  205. *
  206. * Note that the caller is not permitted to immediately free
  207. * the newly deleted entry. Instead, either synchronize_rcu()
  208. * or call_rcu() must be used to defer freeing until an RCU
  209. * grace period has elapsed.
  210. */
  211. static inline void list_del_rcu (struct list_head *entry)
  212. {
  213. __list_del (entry->prev, entry->next);
  214. entry->prev = LIST_POISON2;
  215. }
  216. /*
  217. * list_replace_rcu - replace old entry by new one
  218. * @old : the element to be replaced
  219. * @new : the new element to insert
  220. *
  221. * The old entry will be replaced with the new entry atomically.
  222. */
  223. static inline void list_replace_rcu (struct list_head *old,
  224. struct list_head *new)
  225. {
  226. new->next = old->next;
  227. new->prev = old->prev;
  228. // smp_wmb();
  229. new->next->prev = new;
  230. new->prev->next = new;
  231. old->prev = LIST_POISON2;
  232. }
  233. /**
  234. * list_del_init - deletes entry from list and reinitialize it.
  235. * @entry: the element to delete from the list.
  236. */
  237. static inline void list_del_init (struct list_head *entry)
  238. {
  239. __list_del (entry->prev, entry->next);
  240. INIT_LIST_HEAD (entry);
  241. }
  242. /**
  243. * list_move - delete from one list and add as another's head
  244. * @list: the entry to move
  245. * @head: the head that will precede our entry
  246. */
  247. static inline void list_move (struct list_head *list, struct list_head *head)
  248. {
  249. __list_del (list->prev, list->next);
  250. list_add (list, head);
  251. }
  252. /**
  253. * list_move_tail - delete from one list and add as another's tail
  254. * @list: the entry to move
  255. * @head: the head that will follow our entry
  256. */
  257. static inline void list_move_tail (struct list_head *list,
  258. struct list_head *head)
  259. {
  260. __list_del (list->prev, list->next);
  261. list_add_tail (list, head);
  262. }
  263. /**
  264. * list_empty - tests whether a list is empty
  265. * @head: the list to test.
  266. */
  267. static inline int list_empty (const struct list_head *head)
  268. {
  269. return head->next == head;
  270. }
  271. /**
  272. * list_empty_careful - tests whether a list is
  273. * empty _and_ checks that no other CPU might be
  274. * in the process of still modifying either member
  275. *
  276. * NOTE: using list_empty_careful() without synchronization
  277. * can only be safe if the only activity that can happen
  278. * to the list entry is list_del_init(). Eg. it cannot be used
  279. * if another CPU could re-list_add() it.
  280. *
  281. * @head: the list to test.
  282. */
  283. static inline int list_empty_careful (const struct list_head *head)
  284. {
  285. struct list_head *next = head->next;
  286. return (next == head) && (next == head->prev);
  287. }
  288. static inline void __list_splice (struct list_head *list,
  289. struct list_head *head)
  290. {
  291. struct list_head *first = list->next;
  292. struct list_head *last = list->prev;
  293. struct list_head *at = head->next;
  294. first->prev = head;
  295. head->next = first;
  296. last->next = at;
  297. at->prev = last;
  298. }
  299. /**
  300. * list_splice - join two lists
  301. * @list: the new list to add.
  302. * @head: the place to add it in the first list.
  303. */
  304. static inline void list_splice (struct list_head *list, struct list_head *head)
  305. {
  306. if (!list_empty (list)) {
  307. __list_splice (list, head);
  308. }
  309. }
  310. /**
  311. * list_splice_init - join two lists and reinitialise the emptied list.
  312. * @list: the new list to add.
  313. * @head: the place to add it in the first list.
  314. *
  315. * The list at @list is reinitialised
  316. */
  317. static inline void list_splice_init (struct list_head *list,
  318. struct list_head *head)
  319. {
  320. if (!list_empty (list)) {
  321. __list_splice (list, head);
  322. INIT_LIST_HEAD (list);
  323. }
  324. }
  325. /**
  326. * list_entry - get the struct for this entry
  327. * @ptr: the &struct list_head pointer.
  328. * @type: the type of the struct this is embedded in.
  329. * @member: the name of the list_struct within the struct.
  330. */
  331. #define list_entry(ptr, type, member) \
  332. container_of (ptr, type, member)
  333. /**
  334. * list_for_each - iterate over a list
  335. * @pos: the &struct list_head to use as a loop counter.
  336. * @head: the head for your list.
  337. */
  338. #define list_for_each(pos, head) \
  339. for (pos = (head)->next; prefetch (pos->next), pos != (head); \
  340. pos = pos->next)
  341. /**
  342. * __list_for_each - iterate over a list
  343. * @pos: the &struct list_head to use as a loop counter.
  344. * @head: the head for your list.
  345. *
  346. * This variant differs from list_for_each() in that it's the
  347. * simplest possible list iteration code, no prefetching is done.
  348. * Use this for code that knows the list to be very short (empty
  349. * or 1 entry) most of the time.
  350. */
  351. #define __list_for_each(pos, head) \
  352. for (pos = (head)->next; pos != (head); pos = pos->next)
  353. /**
  354. * list_for_each_prev - iterate over a list backwards
  355. * @pos: the &struct list_head to use as a loop counter.
  356. * @head: the head for your list.
  357. */
  358. #define list_for_each_prev(pos, head) \
  359. for (pos = (head)->prev; prefetch (pos->prev), pos != (head); \
  360. pos = pos->prev)
  361. /**
  362. * list_for_each_safe - iterate over a list safe against removal of list entry
  363. * @pos: the &struct list_head to use as a loop counter.
  364. * @n: another &struct list_head to use as temporary storage
  365. * @head: the head for your list.
  366. */
  367. #define list_for_each_safe(pos, n, head) \
  368. for (pos = (head)->next, n = pos->next; pos != (head); \
  369. pos = n, n = pos->next)
  370. /**
  371. * list_for_each_entry - iterate over list of given type
  372. * @pos: the type * to use as a loop counter.
  373. * @head: the head for your list.
  374. * @member: the name of the list_struct within the struct.
  375. */
  376. #define list_for_each_entry(pos, head, member) \
  377. for (pos = list_entry ((head)->next, typeof(*pos), member); \
  378. prefetch (pos->member.next), &pos->member != (head); \
  379. pos = list_entry (pos->member.next, typeof(*pos), member))
  380. /**
  381. * list_for_each_entry_reverse - iterate backwards over list of given type.
  382. * @pos: the type * to use as a loop counter.
  383. * @head: the head for your list.
  384. * @member: the name of the list_struct within the struct.
  385. */
  386. #define list_for_each_entry_reverse(pos, head, member) \
  387. for (pos = list_entry ((head)->prev, typeof(*pos), member); \
  388. prefetch (pos->member.prev), &pos->member != (head); \
  389. pos = list_entry (pos->member.prev, typeof(*pos), member))
  390. /**
  391. * list_prepare_entry - prepare a pos entry for use as a start point in
  392. * list_for_each_entry_continue
  393. * @pos: the type * to use as a start point
  394. * @head: the head of the list
  395. * @member: the name of the list_struct within the struct.
  396. */
  397. #define list_prepare_entry(pos, head, member) \
  398. ((pos) ? : list_entry (head, typeof(*pos), member))
  399. /**
  400. * list_for_each_entry_continue - iterate over list of given type
  401. * continuing after existing point
  402. * @pos: the type * to use as a loop counter.
  403. * @head: the head for your list.
  404. * @member: the name of the list_struct within the struct.
  405. */
  406. #define list_for_each_entry_continue(pos, head, member) \
  407. for (pos = list_entry (pos->member.next, typeof(*pos), member); \
  408. prefetch (pos->member.next), &pos->member != (head); \
  409. pos = list_entry (pos->member.next, typeof(*pos), member))
  410. /**
  411. * list_for_each_entry_from - iterate over list of given type
  412. * continuing from existing point
  413. * @pos: the type * to use as a loop counter.
  414. * @head: the head for your list.
  415. * @member: the name of the list_struct within the struct.
  416. */
  417. #define list_for_each_entry_from(pos, head, member) \
  418. for (; prefetch (pos->member.next), &pos->member != (head); \
  419. pos = list_entry (pos->member.next, typeof(*pos), member))
  420. /**
  421. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  422. * @pos: the type * to use as a loop counter.
  423. * @n: another type * to use as temporary storage
  424. * @head: the head for your list.
  425. * @member: the name of the list_struct within the struct.
  426. */
  427. #define list_for_each_entry_safe(pos, n, head, member) \
  428. for (pos = list_entry ((head)->next, typeof(*pos), member), \
  429. n = list_entry (pos->member.next, typeof(*pos), member); \
  430. &pos->member != (head); \
  431. pos = n, n = list_entry (n->member.next, typeof(*n), member))
  432. /**
  433. * list_for_each_entry_safe_continue - iterate over list of given type
  434. * continuing after existing point safe against removal of list entry
  435. * @pos: the type * to use as a loop counter.
  436. * @n: another type * to use as temporary storage
  437. * @head: the head for your list.
  438. * @member: the name of the list_struct within the struct.
  439. */
  440. #define list_for_each_entry_safe_continue(pos, n, head, member) \
  441. for (pos = list_entry (pos->member.next, typeof(*pos), member), \
  442. n = list_entry (pos->member.next, typeof(*pos), member); \
  443. &pos->member != (head); \
  444. pos = n, n = list_entry (n->member.next, typeof(*n), member))
  445. /**
  446. * list_for_each_entry_safe_from - iterate over list of given type
  447. * from existing point safe against removal of list entry
  448. * @pos: the type * to use as a loop counter.
  449. * @n: another type * to use as temporary storage
  450. * @head: the head for your list.
  451. * @member: the name of the list_struct within the struct.
  452. */
  453. #define list_for_each_entry_safe_from(pos, n, head, member) \
  454. for (n = list_entry (pos->member.next, typeof(*pos), member); \
  455. &pos->member != (head); \
  456. pos = n, n = list_entry (n->member.next, typeof(*n), member))
  457. /**
  458. * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
  459. * removal of list entry
  460. * @pos: the type * to use as a loop counter.
  461. * @n: another type * to use as temporary storage
  462. * @head: the head for your list.
  463. * @member: the name of the list_struct within the struct.
  464. */
  465. #define list_for_each_entry_safe_reverse(pos, n, head, member) \
  466. for (pos = list_entry ((head)->prev, typeof(*pos), member), \
  467. n = list_entry (pos->member.prev, typeof(*pos), member); \
  468. &pos->member != (head); \
  469. pos = n, n = list_entry (n->member.prev, typeof(*n), member))
  470. /**
  471. * list_for_each_rcu - iterate over an rcu-protected list
  472. * @pos: the &struct list_head to use as a loop counter.
  473. * @head: the head for your list.
  474. *
  475. * This list-traversal primitive may safely run concurrently with
  476. * the _rcu list-mutation primitives such as list_add_rcu()
  477. * as long as the traversal is guarded by rcu_read_lock().
  478. */
  479. #define list_for_each_rcu(pos, head) \
  480. for (pos = (head)->next; \
  481. prefetch (rcu_dereference (pos)->next), pos != (head); \
  482. pos = pos->next)
  483. #define __list_for_each_rcu(pos, head) \
  484. for (pos = (head)->next; \
  485. rcu_dereference (pos) != (head); \
  486. pos = pos->next)
  487. /**
  488. * list_for_each_safe_rcu - iterate over an rcu-protected list safe
  489. * against removal of list entry
  490. * @pos: the &struct list_head to use as a loop counter.
  491. * @n: another &struct list_head to use as temporary storage
  492. * @head: the head for your list.
  493. *
  494. * This list-traversal primitive may safely run concurrently with
  495. * the _rcu list-mutation primitives such as list_add_rcu()
  496. * as long as the traversal is guarded by rcu_read_lock().
  497. */
  498. #define list_for_each_safe_rcu(pos, n, head) \
  499. for (pos = (head)->next; \
  500. n = rcu_dereference (pos)->next, pos != (head); \
  501. pos = n)
  502. /**
  503. * list_for_each_entry_rcu - iterate over rcu list of given type
  504. * @pos: the type * to use as a loop counter.
  505. * @head: the head for your list.
  506. * @member: the name of the list_struct within the struct.
  507. *
  508. * This list-traversal primitive may safely run concurrently with
  509. * the _rcu list-mutation primitives such as list_add_rcu()
  510. * as long as the traversal is guarded by rcu_read_lock().
  511. */
  512. #define list_for_each_entry_rcu(pos, head, member) \
  513. for (pos = list_entry ((head)->next, typeof(*pos), member); \
  514. prefetch (rcu_dereference (pos)->member.next), \
  515. &pos->member != (head); \
  516. pos = list_entry (pos->member.next, typeof(*pos), member))
  517. /**
  518. * list_for_each_continue_rcu - iterate over an rcu-protected list
  519. * continuing after existing point.
  520. * @pos: the &struct list_head to use as a loop counter.
  521. * @head: the head for your list.
  522. *
  523. * This list-traversal primitive may safely run concurrently with
  524. * the _rcu list-mutation primitives such as list_add_rcu()
  525. * as long as the traversal is guarded by rcu_read_lock().
  526. */
  527. #define list_for_each_continue_rcu(pos, head) \
  528. for ((pos) = (pos)->next; \
  529. prefetch (rcu_dereference ((pos))->next), (pos) != (head); \
  530. (pos) = (pos)->next)
  531. /*
  532. * Double linked lists with a single pointer list head.
  533. * Mostly useful for hash tables where the two pointer list head is
  534. * too wasteful.
  535. * You lose the ability to access the tail in O(1).
  536. */
  537. struct hlist_head {
  538. struct hlist_node *first;
  539. };
  540. struct hlist_node {
  541. struct hlist_node *next, **pprev;
  542. };
  543. #define HLIST_HEAD_INIT { .first = NULL }
  544. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  545. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  546. static inline void INIT_HLIST_NODE (struct hlist_node *h)
  547. {
  548. h->next = NULL;
  549. h->pprev = NULL;
  550. }
  551. static inline int hlist_unhashed (const struct hlist_node *h)
  552. {
  553. return !h->pprev;
  554. }
  555. static inline int hlist_empty (const struct hlist_head *h)
  556. {
  557. return !h->first;
  558. }
  559. static inline void __hlist_del (struct hlist_node *n)
  560. {
  561. struct hlist_node *next = n->next;
  562. struct hlist_node **pprev = n->pprev;
  563. *pprev = next;
  564. if (next) {
  565. next->pprev = pprev;
  566. }
  567. }
  568. static inline void hlist_del (struct hlist_node *n)
  569. {
  570. __hlist_del (n);
  571. n->next = LIST_POISON1;
  572. n->pprev = LIST_POISON2;
  573. }
  574. /**
  575. * hlist_del_rcu - deletes entry from hash list without re-initialization
  576. * @n: the element to delete from the hash list.
  577. *
  578. * Note: list_unhashed() on entry does not return true after this,
  579. * the entry is in an undefined state. It is useful for RCU based
  580. * lockfree traversal.
  581. *
  582. * In particular, it means that we can not poison the forward
  583. * pointers that may still be used for walking the hash list.
  584. *
  585. * The caller must take whatever precautions are necessary
  586. * (such as holding appropriate locks) to avoid racing
  587. * with another list-mutation primitive, such as hlist_add_head_rcu()
  588. * or hlist_del_rcu(), running on this same list.
  589. * However, it is perfectly legal to run concurrently with
  590. * the _rcu list-traversal primitives, such as
  591. * hlist_for_each_entry().
  592. */
  593. static inline void hlist_del_rcu (struct hlist_node *n)
  594. {
  595. __hlist_del (n);
  596. n->pprev = LIST_POISON2;
  597. }
  598. static inline void hlist_del_init (struct hlist_node *n)
  599. {
  600. if (!hlist_unhashed (n)) {
  601. __hlist_del (n);
  602. INIT_HLIST_NODE (n);
  603. }
  604. }
  605. /*
  606. * hlist_replace_rcu - replace old entry by new one
  607. * @old : the element to be replaced
  608. * @new : the new element to insert
  609. *
  610. * The old entry will be replaced with the new entry atomically.
  611. */
  612. static inline void hlist_replace_rcu (struct hlist_node *old,
  613. struct hlist_node *new)
  614. {
  615. struct hlist_node *next = old->next;
  616. new->next = next;
  617. new->pprev = old->pprev;
  618. // smp_wmb();
  619. if (next) {
  620. new->next->pprev = &new->next;
  621. }
  622. *new->pprev = new;
  623. old->pprev = LIST_POISON2;
  624. }
  625. static inline void hlist_add_head (struct hlist_node *n, struct hlist_head *h)
  626. {
  627. struct hlist_node *first = h->first;
  628. n->next = first;
  629. if (first) {
  630. first->pprev = &n->next;
  631. }
  632. h->first = n;
  633. n->pprev = &h->first;
  634. }
  635. /**
  636. * hlist_add_head_rcu - adds the specified element to the specified hlist,
  637. * while permitting racing traversals.
  638. * @n: the element to add to the hash list.
  639. * @h: the list to add to.
  640. *
  641. * The caller must take whatever precautions are necessary
  642. * (such as holding appropriate locks) to avoid racing
  643. * with another list-mutation primitive, such as hlist_add_head_rcu()
  644. * or hlist_del_rcu(), running on this same list.
  645. * However, it is perfectly legal to run concurrently with
  646. * the _rcu list-traversal primitives, such as
  647. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  648. * problems on Alpha CPUs. Regardless of the type of CPU, the
  649. * list-traversal primitive must be guarded by rcu_read_lock().
  650. */
  651. static inline void hlist_add_head_rcu (struct hlist_node *n,
  652. struct hlist_head *h)
  653. {
  654. struct hlist_node *first = h->first;
  655. n->next = first;
  656. n->pprev = &h->first;
  657. // smp_wmb();
  658. if (first) {
  659. first->pprev = &n->next;
  660. }
  661. h->first = n;
  662. }
  663. /* next must be != NULL */
  664. static inline void hlist_add_before (struct hlist_node *n,
  665. struct hlist_node *next)
  666. {
  667. n->pprev = next->pprev;
  668. n->next = next;
  669. next->pprev = &n->next;
  670. *(n->pprev) = n;
  671. }
  672. static inline void hlist_add_after (struct hlist_node *n,
  673. struct hlist_node *next)
  674. {
  675. next->next = n->next;
  676. n->next = next;
  677. next->pprev = &n->next;
  678. if (next->next) {
  679. next->next->pprev = &next->next;
  680. }
  681. }
  682. /**
  683. * hlist_add_before_rcu - adds the specified element to the specified hlist
  684. * before the specified node while permitting racing traversals.
  685. * @n: the new element to add to the hash list.
  686. * @next: the existing element to add the new element before.
  687. *
  688. * The caller must take whatever precautions are necessary
  689. * (such as holding appropriate locks) to avoid racing
  690. * with another list-mutation primitive, such as hlist_add_head_rcu()
  691. * or hlist_del_rcu(), running on this same list.
  692. * However, it is perfectly legal to run concurrently with
  693. * the _rcu list-traversal primitives, such as
  694. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  695. * problems on Alpha CPUs.
  696. */
  697. static inline void hlist_add_before_rcu (struct hlist_node *n,
  698. struct hlist_node *next)
  699. {
  700. n->pprev = next->pprev;
  701. n->next = next;
  702. // smp_wmb();
  703. next->pprev = &n->next;
  704. *(n->pprev) = n;
  705. }
  706. /**
  707. * hlist_add_after_rcu - adds the specified element to the specified hlist
  708. * after the specified node while permitting racing traversals.
  709. * @prev: the existing element to add the new element after.
  710. * @n: the new element to add to the hash list.
  711. *
  712. * The caller must take whatever precautions are necessary
  713. * (such as holding appropriate locks) to avoid racing
  714. * with another list-mutation primitive, such as hlist_add_head_rcu()
  715. * or hlist_del_rcu(), running on this same list.
  716. * However, it is perfectly legal to run concurrently with
  717. * the _rcu list-traversal primitives, such as
  718. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  719. * problems on Alpha CPUs.
  720. */
  721. static inline void hlist_add_after_rcu (struct hlist_node *prev,
  722. struct hlist_node *n)
  723. {
  724. n->next = prev->next;
  725. n->pprev = &prev->next;
  726. // smp_wmb();
  727. prev->next = n;
  728. if (n->next) {
  729. n->next->pprev = &n->next;
  730. }
  731. }
  732. #define hlist_entry(ptr, type, member) container_of (ptr, type, member)
  733. #define hlist_for_each(pos, head) \
  734. for (pos = (head)->first; pos && ({ prefetch (pos->next); 1; }); \
  735. pos = pos->next)
  736. #define hlist_for_each_safe(pos, n, head) \
  737. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  738. pos = n)
  739. /**
  740. * hlist_for_each_entry - iterate over list of given type
  741. * @tpos: the type * to use as a loop counter.
  742. * @pos: the &struct hlist_node to use as a loop counter.
  743. * @head: the head for your list.
  744. * @member: the name of the hlist_node within the struct.
  745. */
  746. #define hlist_for_each_entry(tpos, pos, head, member) \
  747. for (pos = (head)->first; \
  748. pos && ({ prefetch (pos->next); 1; }) && \
  749. ({ tpos = hlist_entry (pos, typeof(*tpos), member); 1; }); \
  750. pos = pos->next)
  751. /**
  752. * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
  753. * @tpos: the type * to use as a loop counter.
  754. * @pos: the &struct hlist_node to use as a loop counter.
  755. * @member: the name of the hlist_node within the struct.
  756. */
  757. #define hlist_for_each_entry_continue(tpos, pos, member) \
  758. for (pos = (pos)->next; \
  759. pos && ({ prefetch (pos->next); 1; }) && \
  760. ({ tpos = hlist_entry (pos, typeof(*tpos), member); 1; }); \
  761. pos = pos->next)
  762. /**
  763. * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
  764. * @tpos: the type * to use as a loop counter.
  765. * @pos: the &struct hlist_node to use as a loop counter.
  766. * @member: the name of the hlist_node within the struct.
  767. */
  768. #define hlist_for_each_entry_from(tpos, pos, member) \
  769. for (; pos && ({ prefetch (pos->next); 1; }) && \
  770. ({ tpos = hlist_entry (pos, typeof(*tpos), member); 1; }); \
  771. pos = pos->next)
  772. /**
  773. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  774. * @tpos: the type * to use as a loop counter.
  775. * @pos: the &struct hlist_node to use as a loop counter.
  776. * @n: another &struct hlist_node to use as temporary storage
  777. * @head: the head for your list.
  778. * @member: the name of the hlist_node within the struct.
  779. */
  780. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  781. for (pos = (head)->first; \
  782. pos && ({ n = pos->next; 1; }) && \
  783. ({ tpos = hlist_entry (pos, typeof(*tpos), member); 1; }); \
  784. pos = n)
  785. /**
  786. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  787. * @tpos: the type * to use as a loop counter.
  788. * @pos: the &struct hlist_node to use as a loop counter.
  789. * @head: the head for your list.
  790. * @member: the name of the hlist_node within the struct.
  791. *
  792. * This list-traversal primitive may safely run concurrently with
  793. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  794. * as long as the traversal is guarded by rcu_read_lock().
  795. */
  796. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  797. for (pos = (head)->first; \
  798. rcu_dereference (pos) && ({ prefetch (pos->next); 1; }) && \
  799. ({ tpos = hlist_entry (pos, typeof(*tpos), member); 1; }); \
  800. pos = pos->next)
  801. #endif
  802. /**
  803. * __list_sort - sort the list using given comparator with merge-sort algorithm
  804. * @head: is a head of the list to be sorted
  805. * @member_offset: is machine offset inside the list entry structure to the
  806. * field of type struct list_head which links that entry with
  807. * the list.
  808. */
  809. extern void __list_sort(struct list_head * head,
  810. int member_offset,
  811. int (*comparator)(void*, void*));
  812. /**
  813. * list_sort - wrapper for __list_sort
  814. * @head: is a head of the list to be sorted
  815. * @type: is the type of list entry
  816. * @member: is the name of the field inside entry that links that entry with
  817. * other entries in the list.
  818. * @comaprator: function comparing two entries, should return value lesser
  819. * than 0 when the first argument is lesser than the second one.
  820. */
  821. #define list_sort(head, type, member, comparator) \
  822. ({ \
  823. __list_sort (head, \
  824. offsetof (type, member), \
  825. (int (*)(void*, void*))comparator); \
  826. })
  827. void test_list_sort(void);