XBPS Library API 20240111
The X Binary Package System
queue.h
1/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
2
3/*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
32 */
33
34#ifndef _SYS_QUEUE_H_
35#define _SYS_QUEUE_H_
36
37/*
38 * This file defines five types of data structures: singly-linked lists,
39 * lists, simple queues, tail queues, and circular queues.
40 *
41 * A singly-linked list is headed by a single forward pointer. The
42 * elements are singly linked for minimum space and pointer manipulation
43 * overhead at the expense of O(n) removal for arbitrary elements. New
44 * elements can be added to the list after an existing element or at the
45 * head of the list. Elements being removed from the head of the list
46 * should use the explicit macro for this purpose for optimum
47 * efficiency. A singly-linked list may only be traversed in the forward
48 * direction. Singly-linked lists are ideal for applications with large
49 * datasets and few or no removals or for implementing a LIFO queue.
50 *
51 * A list is headed by a single forward pointer (or an array of forward
52 * pointers for a hash table header). The elements are doubly linked
53 * so that an arbitrary element can be removed without a need to
54 * traverse the list. New elements can be added to the list before
55 * or after an existing element or at the head of the list. A list
56 * may only be traversed in the forward direction.
57 *
58 * A simple queue is headed by a pair of pointers, one the head of the
59 * list and the other to the tail of the list. The elements are singly
60 * linked to save space, so elements can only be removed from the
61 * head of the list. New elements can be added to the list after
62 * an existing element, at the head of the list, or at the end of the
63 * list. A simple queue may only be traversed in the forward direction.
64 *
65 * A tail queue is headed by a pair of pointers, one to the head of the
66 * list and the other to the tail of the list. The elements are doubly
67 * linked so that an arbitrary element can be removed without a need to
68 * traverse the list. New elements can be added to the list before or
69 * after an existing element, at the head of the list, or at the end of
70 * the list. A tail queue may be traversed in either direction.
71 *
72 * A circle queue is headed by a pair of pointers, one to the head of the
73 * list and the other to the tail of the list. The elements are doubly
74 * linked so that an arbitrary element can be removed without a need to
75 * traverse the list. New elements can be added to the list before or after
76 * an existing element, at the head of the list, or at the end of the list.
77 * A circle queue may be traversed in either direction, but has a more
78 * complex end of list detection.
79 *
80 * For details on the use of these macros, see the queue(3) manual page.
81 */
82
83/*
84 * List definitions.
85 */
86#define LIST_HEAD(name, type) \
87struct name { \
88 struct type *lh_first; /* first element */ \
89}
90
91#define LIST_HEAD_INITIALIZER(head) \
92 { NULL }
93
94#define LIST_ENTRY(type) \
95struct { \
96 struct type *le_next; /* next element */ \
97 struct type **le_prev; /* address of previous next element */ \
98}
99
100/*
101 * List functions.
102 */
103#if defined(_KERNEL) && defined(QUEUEDEBUG)
104#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
105 if ((head)->lh_first && \
106 (head)->lh_first->field.le_prev != &(head)->lh_first) \
107 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
108#define QUEUEDEBUG_LIST_OP(elm, field) \
109 if ((elm)->field.le_next && \
110 (elm)->field.le_next->field.le_prev != \
111 &(elm)->field.le_next) \
112 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
113 if (*(elm)->field.le_prev != (elm)) \
114 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
115#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
116 (elm)->field.le_next = (void *)1L; \
117 (elm)->field.le_prev = (void *)1L;
118#else
119#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
120#define QUEUEDEBUG_LIST_OP(elm, field)
121#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
122#endif
123
124#define LIST_INIT(head) do { \
125 (head)->lh_first = NULL; \
126} while (/*CONSTCOND*/0)
127
128#define LIST_INSERT_AFTER(listelm, elm, field) do { \
129 QUEUEDEBUG_LIST_OP((listelm), field) \
130 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
131 (listelm)->field.le_next->field.le_prev = \
132 &(elm)->field.le_next; \
133 (listelm)->field.le_next = (elm); \
134 (elm)->field.le_prev = &(listelm)->field.le_next; \
135} while (/*CONSTCOND*/0)
136
137#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
138 QUEUEDEBUG_LIST_OP((listelm), field) \
139 (elm)->field.le_prev = (listelm)->field.le_prev; \
140 (elm)->field.le_next = (listelm); \
141 *(listelm)->field.le_prev = (elm); \
142 (listelm)->field.le_prev = &(elm)->field.le_next; \
143} while (/*CONSTCOND*/0)
144
145#define LIST_INSERT_HEAD(head, elm, field) do { \
146 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
147 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
148 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
149 (head)->lh_first = (elm); \
150 (elm)->field.le_prev = &(head)->lh_first; \
151} while (/*CONSTCOND*/0)
152
153#define LIST_REMOVE(elm, field) do { \
154 QUEUEDEBUG_LIST_OP((elm), field) \
155 if ((elm)->field.le_next != NULL) \
156 (elm)->field.le_next->field.le_prev = \
157 (elm)->field.le_prev; \
158 *(elm)->field.le_prev = (elm)->field.le_next; \
159 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
160} while (/*CONSTCOND*/0)
161
162#define LIST_FOREACH(var, head, field) \
163 for ((var) = ((head)->lh_first); \
164 (var); \
165 (var) = ((var)->field.le_next))
166
167/*
168 * List access methods.
169 */
170#define LIST_EMPTY(head) ((head)->lh_first == NULL)
171#define LIST_FIRST(head) ((head)->lh_first)
172#define LIST_NEXT(elm, field) ((elm)->field.le_next)
173
174
175/*
176 * Singly-linked List definitions.
177 */
178#define SLIST_HEAD(name, type) \
179struct name { \
180 struct type *slh_first; /* first element */ \
181}
182
183#define SLIST_HEAD_INITIALIZER(head) \
184 { NULL }
185
186#define SLIST_ENTRY(type) \
187struct { \
188 struct type *sle_next; /* next element */ \
189}
190
191/*
192 * Singly-linked List functions.
193 */
194#define SLIST_INIT(head) do { \
195 (head)->slh_first = NULL; \
196} while (/*CONSTCOND*/0)
197
198#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
199 (elm)->field.sle_next = (slistelm)->field.sle_next; \
200 (slistelm)->field.sle_next = (elm); \
201} while (/*CONSTCOND*/0)
202
203#define SLIST_INSERT_HEAD(head, elm, field) do { \
204 (elm)->field.sle_next = (head)->slh_first; \
205 (head)->slh_first = (elm); \
206} while (/*CONSTCOND*/0)
207
208#define SLIST_REMOVE_HEAD(head, field) do { \
209 (head)->slh_first = (head)->slh_first->field.sle_next; \
210} while (/*CONSTCOND*/0)
211
212#define SLIST_REMOVE(head, elm, type, field) do { \
213 if ((head)->slh_first == (elm)) { \
214 SLIST_REMOVE_HEAD((head), field); \
215 } \
216 else { \
217 struct type *curelm = (head)->slh_first; \
218 while(curelm->field.sle_next != (elm)) \
219 curelm = curelm->field.sle_next; \
220 curelm->field.sle_next = \
221 curelm->field.sle_next->field.sle_next; \
222 } \
223} while (/*CONSTCOND*/0)
224
225#define SLIST_REMOVE_AFTER(slistelm, field) do { \
226 (slistelm)->field.sle_next = \
227 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
228} while (/*CONSTCOND*/0)
229
230#define SLIST_FOREACH(var, head, field) \
231 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
232
233#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
234 for ((var) = SLIST_FIRST((head)); \
235 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
236 (var) = (tvar))
237
238/*
239 * Singly-linked List access methods.
240 */
241#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
242#define SLIST_FIRST(head) ((head)->slh_first)
243#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
244
245
246/*
247 * Singly-linked Tail queue declarations.
248 */
249#define STAILQ_HEAD(name, type) \
250struct name { \
251 struct type *stqh_first; /* first element */ \
252 struct type **stqh_last; /* addr of last next element */ \
253}
254
255#define STAILQ_HEAD_INITIALIZER(head) \
256 { NULL, &(head).stqh_first }
257
258#define STAILQ_ENTRY(type) \
259struct { \
260 struct type *stqe_next; /* next element */ \
261}
262
263/*
264 * Singly-linked Tail queue functions.
265 */
266#define STAILQ_INIT(head) do { \
267 (head)->stqh_first = NULL; \
268 (head)->stqh_last = &(head)->stqh_first; \
269} while (/*CONSTCOND*/0)
270
271#define STAILQ_INSERT_HEAD(head, elm, field) do { \
272 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
273 (head)->stqh_last = &(elm)->field.stqe_next; \
274 (head)->stqh_first = (elm); \
275} while (/*CONSTCOND*/0)
276
277#define STAILQ_INSERT_TAIL(head, elm, field) do { \
278 (elm)->field.stqe_next = NULL; \
279 *(head)->stqh_last = (elm); \
280 (head)->stqh_last = &(elm)->field.stqe_next; \
281} while (/*CONSTCOND*/0)
282
283#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
284 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
285 (head)->stqh_last = &(elm)->field.stqe_next; \
286 (listelm)->field.stqe_next = (elm); \
287} while (/*CONSTCOND*/0)
288
289#define STAILQ_REMOVE_HEAD(head, field) do { \
290 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
291 (head)->stqh_last = &(head)->stqh_first; \
292} while (/*CONSTCOND*/0)
293
294#define STAILQ_REMOVE(head, elm, type, field) do { \
295 if ((head)->stqh_first == (elm)) { \
296 STAILQ_REMOVE_HEAD((head), field); \
297 } else { \
298 struct type *curelm = (head)->stqh_first; \
299 while (curelm->field.stqe_next != (elm)) \
300 curelm = curelm->field.stqe_next; \
301 if ((curelm->field.stqe_next = \
302 curelm->field.stqe_next->field.stqe_next) == NULL) \
303 (head)->stqh_last = &(curelm)->field.stqe_next; \
304 } \
305} while (/*CONSTCOND*/0)
306
307#define STAILQ_FOREACH(var, head, field) \
308 for ((var) = ((head)->stqh_first); \
309 (var); \
310 (var) = ((var)->field.stqe_next))
311
312#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
313 for ((var) = STAILQ_FIRST((head)); \
314 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
315 (var) = (tvar))
316
317#define STAILQ_CONCAT(head1, head2) do { \
318 if (!STAILQ_EMPTY((head2))) { \
319 *(head1)->stqh_last = (head2)->stqh_first; \
320 (head1)->stqh_last = (head2)->stqh_last; \
321 STAILQ_INIT((head2)); \
322 } \
323} while (/*CONSTCOND*/0)
324
325#define STAILQ_LAST(head, type, field) \
326 (STAILQ_EMPTY((head)) ? \
327 NULL : \
328 ((struct type *)(void *) \
329 ((char *)((head)->stqh_last) - offsetof(struct type, field))))
330
331/*
332 * Singly-linked Tail queue access methods.
333 */
334#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
335#define STAILQ_FIRST(head) ((head)->stqh_first)
336#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
337
338
339/*
340 * Simple queue definitions.
341 */
342#define SIMPLEQ_HEAD(name, type) \
343struct name { \
344 struct type *sqh_first; /* first element */ \
345 struct type **sqh_last; /* addr of last next element */ \
346}
347
348#define SIMPLEQ_HEAD_INITIALIZER(head) \
349 { NULL, &(head).sqh_first }
350
351#define SIMPLEQ_ENTRY(type) \
352struct { \
353 struct type *sqe_next; /* next element */ \
354}
355
356/*
357 * Simple queue functions.
358 */
359#define SIMPLEQ_INIT(head) do { \
360 (head)->sqh_first = NULL; \
361 (head)->sqh_last = &(head)->sqh_first; \
362} while (/*CONSTCOND*/0)
363
364#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
365 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
366 (head)->sqh_last = &(elm)->field.sqe_next; \
367 (head)->sqh_first = (elm); \
368} while (/*CONSTCOND*/0)
369
370#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
371 (elm)->field.sqe_next = NULL; \
372 *(head)->sqh_last = (elm); \
373 (head)->sqh_last = &(elm)->field.sqe_next; \
374} while (/*CONSTCOND*/0)
375
376#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
377 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
378 (head)->sqh_last = &(elm)->field.sqe_next; \
379 (listelm)->field.sqe_next = (elm); \
380} while (/*CONSTCOND*/0)
381
382#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
383 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
384 (head)->sqh_last = &(head)->sqh_first; \
385} while (/*CONSTCOND*/0)
386
387#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
388 if ((head)->sqh_first == (elm)) { \
389 SIMPLEQ_REMOVE_HEAD((head), field); \
390 } else { \
391 struct type *curelm = (head)->sqh_first; \
392 while (curelm->field.sqe_next != (elm)) \
393 curelm = curelm->field.sqe_next; \
394 if ((curelm->field.sqe_next = \
395 curelm->field.sqe_next->field.sqe_next) == NULL) \
396 (head)->sqh_last = &(curelm)->field.sqe_next; \
397 } \
398} while (/*CONSTCOND*/0)
399
400#define SIMPLEQ_FOREACH(var, head, field) \
401 for ((var) = ((head)->sqh_first); \
402 (var); \
403 (var) = ((var)->field.sqe_next))
404
405#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
406 for ((var) = ((head)->sqh_first); \
407 (var) && ((next = ((var)->field.sqe_next)), 1); \
408 (var) = (next))
409
410#define SIMPLEQ_CONCAT(head1, head2) do { \
411 if (!SIMPLEQ_EMPTY((head2))) { \
412 *(head1)->sqh_last = (head2)->sqh_first; \
413 (head1)->sqh_last = (head2)->sqh_last; \
414 SIMPLEQ_INIT((head2)); \
415 } \
416} while (/*CONSTCOND*/0)
417
418#define SIMPLEQ_LAST(head, type, field) \
419 (SIMPLEQ_EMPTY((head)) ? \
420 NULL : \
421 ((struct type *)(void *) \
422 ((char *)((head)->sqh_last) - offsetof(struct type, field))))
423
424/*
425 * Simple queue access methods.
426 */
427#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
428#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
429#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
430
431
432/*
433 * Tail queue definitions.
434 */
435#define _TAILQ_HEAD(name, type, qual) \
436struct name { \
437 qual type *tqh_first; /* first element */ \
438 qual type *qual *tqh_last; /* addr of last next element */ \
439}
440#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
441
442#define TAILQ_HEAD_INITIALIZER(head) \
443 { NULL, &(head).tqh_first }
444
445#define _TAILQ_ENTRY(type, qual) \
446struct { \
447 qual type *tqe_next; /* next element */ \
448 qual type *qual *tqe_prev; /* address of previous next element */\
449}
450#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
451
452/*
453 * Tail queue functions.
454 */
455#if defined(_KERNEL) && defined(QUEUEDEBUG)
456#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
457 if ((head)->tqh_first && \
458 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
459 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
460#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
461 if (*(head)->tqh_last != NULL) \
462 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
463#define QUEUEDEBUG_TAILQ_OP(elm, field) \
464 if ((elm)->field.tqe_next && \
465 (elm)->field.tqe_next->field.tqe_prev != \
466 &(elm)->field.tqe_next) \
467 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
468 if (*(elm)->field.tqe_prev != (elm)) \
469 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
470#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
471 if ((elm)->field.tqe_next == NULL && \
472 (head)->tqh_last != &(elm)->field.tqe_next) \
473 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
474 (head), (elm), __FILE__, __LINE__);
475#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
476 (elm)->field.tqe_next = (void *)1L; \
477 (elm)->field.tqe_prev = (void *)1L;
478#else
479#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
480#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
481#define QUEUEDEBUG_TAILQ_OP(elm, field)
482#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
483#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
484#endif
485
486#define TAILQ_INIT(head) do { \
487 (head)->tqh_first = NULL; \
488 (head)->tqh_last = &(head)->tqh_first; \
489} while (/*CONSTCOND*/0)
490
491#define TAILQ_INSERT_HEAD(head, elm, field) do { \
492 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
493 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
494 (head)->tqh_first->field.tqe_prev = \
495 &(elm)->field.tqe_next; \
496 else \
497 (head)->tqh_last = &(elm)->field.tqe_next; \
498 (head)->tqh_first = (elm); \
499 (elm)->field.tqe_prev = &(head)->tqh_first; \
500} while (/*CONSTCOND*/0)
501
502#define TAILQ_INSERT_TAIL(head, elm, field) do { \
503 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
504 (elm)->field.tqe_next = NULL; \
505 (elm)->field.tqe_prev = (head)->tqh_last; \
506 *(head)->tqh_last = (elm); \
507 (head)->tqh_last = &(elm)->field.tqe_next; \
508} while (/*CONSTCOND*/0)
509
510#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
511 QUEUEDEBUG_TAILQ_OP((listelm), field) \
512 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
513 (elm)->field.tqe_next->field.tqe_prev = \
514 &(elm)->field.tqe_next; \
515 else \
516 (head)->tqh_last = &(elm)->field.tqe_next; \
517 (listelm)->field.tqe_next = (elm); \
518 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
519} while (/*CONSTCOND*/0)
520
521#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
522 QUEUEDEBUG_TAILQ_OP((listelm), field) \
523 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
524 (elm)->field.tqe_next = (listelm); \
525 *(listelm)->field.tqe_prev = (elm); \
526 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
527} while (/*CONSTCOND*/0)
528
529#define TAILQ_REMOVE(head, elm, field) do { \
530 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
531 QUEUEDEBUG_TAILQ_OP((elm), field) \
532 if (((elm)->field.tqe_next) != NULL) \
533 (elm)->field.tqe_next->field.tqe_prev = \
534 (elm)->field.tqe_prev; \
535 else \
536 (head)->tqh_last = (elm)->field.tqe_prev; \
537 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
538 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
539} while (/*CONSTCOND*/0)
540
541#define TAILQ_FOREACH(var, head, field) \
542 for ((var) = ((head)->tqh_first); \
543 (var); \
544 (var) = ((var)->field.tqe_next))
545
546#define TAILQ_FOREACH_SAFE(var, head, field, next) \
547 for ((var) = ((head)->tqh_first); \
548 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \
549 (var) = (next))
550
551#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
552 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
553 (var); \
554 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
555
556#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
557 for ((var) = TAILQ_LAST((head), headname); \
558 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\
559 (var) = (prev))
560
561#define TAILQ_CONCAT(head1, head2, field) do { \
562 if (!TAILQ_EMPTY(head2)) { \
563 *(head1)->tqh_last = (head2)->tqh_first; \
564 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
565 (head1)->tqh_last = (head2)->tqh_last; \
566 TAILQ_INIT((head2)); \
567 } \
568} while (/*CONSTCOND*/0)
569
570/*
571 * Tail queue access methods.
572 */
573#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
574#define TAILQ_FIRST(head) ((head)->tqh_first)
575#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
576
577#define TAILQ_LAST(head, headname) \
578 (*(((struct headname *)((head)->tqh_last))->tqh_last))
579#define TAILQ_PREV(elm, headname, field) \
580 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
581
582
583/*
584 * Circular queue definitions.
585 */
586#if defined(_KERNEL) && defined(QUEUEDEBUG)
587#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
588 if ((head)->cqh_first != (void *)(head) && \
589 (head)->cqh_first->field.cqe_prev != (void *)(head)) \
590 panic("CIRCLEQ head forw %p %s:%d", (head), \
591 __FILE__, __LINE__); \
592 if ((head)->cqh_last != (void *)(head) && \
593 (head)->cqh_last->field.cqe_next != (void *)(head)) \
594 panic("CIRCLEQ head back %p %s:%d", (head), \
595 __FILE__, __LINE__);
596#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
597 if ((elm)->field.cqe_next == (void *)(head)) { \
598 if ((head)->cqh_last != (elm)) \
599 panic("CIRCLEQ elm last %p %s:%d", (elm), \
600 __FILE__, __LINE__); \
601 } else { \
602 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
603 panic("CIRCLEQ elm forw %p %s:%d", (elm), \
604 __FILE__, __LINE__); \
605 } \
606 if ((elm)->field.cqe_prev == (void *)(head)) { \
607 if ((head)->cqh_first != (elm)) \
608 panic("CIRCLEQ elm first %p %s:%d", (elm), \
609 __FILE__, __LINE__); \
610 } else { \
611 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
612 panic("CIRCLEQ elm prev %p %s:%d", (elm), \
613 __FILE__, __LINE__); \
614 }
615#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
616 (elm)->field.cqe_next = (void *)1L; \
617 (elm)->field.cqe_prev = (void *)1L;
618#else
619#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
620#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
621#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
622#endif
623
624#define CIRCLEQ_HEAD(name, type) \
625struct name { \
626 struct type *cqh_first; /* first element */ \
627 struct type *cqh_last; /* last element */ \
628}
629
630#define CIRCLEQ_HEAD_INITIALIZER(head) \
631 { (void *)&head, (void *)&head }
632
633#define CIRCLEQ_ENTRY(type) \
634struct { \
635 struct type *cqe_next; /* next element */ \
636 struct type *cqe_prev; /* previous element */ \
637}
638
639/*
640 * Circular queue functions.
641 */
642#define CIRCLEQ_INIT(head) do { \
643 (head)->cqh_first = (void *)(head); \
644 (head)->cqh_last = (void *)(head); \
645} while (/*CONSTCOND*/0)
646
647#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
648 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
649 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
650 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
651 (elm)->field.cqe_prev = (listelm); \
652 if ((listelm)->field.cqe_next == (void *)(head)) \
653 (head)->cqh_last = (elm); \
654 else \
655 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
656 (listelm)->field.cqe_next = (elm); \
657} while (/*CONSTCOND*/0)
658
659#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
660 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
661 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
662 (elm)->field.cqe_next = (listelm); \
663 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
664 if ((listelm)->field.cqe_prev == (void *)(head)) \
665 (head)->cqh_first = (elm); \
666 else \
667 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
668 (listelm)->field.cqe_prev = (elm); \
669} while (/*CONSTCOND*/0)
670
671#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
672 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
673 (elm)->field.cqe_next = (head)->cqh_first; \
674 (elm)->field.cqe_prev = (void *)(head); \
675 if ((head)->cqh_last == (void *)(head)) \
676 (head)->cqh_last = (elm); \
677 else \
678 (head)->cqh_first->field.cqe_prev = (elm); \
679 (head)->cqh_first = (elm); \
680} while (/*CONSTCOND*/0)
681
682#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
683 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
684 (elm)->field.cqe_next = (void *)(head); \
685 (elm)->field.cqe_prev = (head)->cqh_last; \
686 if ((head)->cqh_first == (void *)(head)) \
687 (head)->cqh_first = (elm); \
688 else \
689 (head)->cqh_last->field.cqe_next = (elm); \
690 (head)->cqh_last = (elm); \
691} while (/*CONSTCOND*/0)
692
693#define CIRCLEQ_REMOVE(head, elm, field) do { \
694 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
695 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
696 if ((elm)->field.cqe_next == (void *)(head)) \
697 (head)->cqh_last = (elm)->field.cqe_prev; \
698 else \
699 (elm)->field.cqe_next->field.cqe_prev = \
700 (elm)->field.cqe_prev; \
701 if ((elm)->field.cqe_prev == (void *)(head)) \
702 (head)->cqh_first = (elm)->field.cqe_next; \
703 else \
704 (elm)->field.cqe_prev->field.cqe_next = \
705 (elm)->field.cqe_next; \
706 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
707} while (/*CONSTCOND*/0)
708
709#define CIRCLEQ_FOREACH(var, head, field) \
710 for ((var) = ((head)->cqh_first); \
711 (var) != (const void *)(head); \
712 (var) = ((var)->field.cqe_next))
713
714#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
715 for ((var) = ((head)->cqh_last); \
716 (var) != (const void *)(head); \
717 (var) = ((var)->field.cqe_prev))
718
719/*
720 * Circular queue access methods.
721 */
722#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
723#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
724#define CIRCLEQ_LAST(head) ((head)->cqh_last)
725#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
726#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
727
728#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
729 (((elm)->field.cqe_next == (void *)(head)) \
730 ? ((head)->cqh_first) \
731 : (elm->field.cqe_next))
732#define CIRCLEQ_LOOP_PREV(head, elm, field) \
733 (((elm)->field.cqe_prev == (void *)(head)) \
734 ? ((head)->cqh_last) \
735 : (elm->field.cqe_prev))
736
737#endif /* !_SYS_QUEUE_H_ */