Print this page
6222 libuutil could provide a way to re-create an AVL tree
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libuutil/common/uu_avl.c
+++ new/usr/src/lib/libuutil/common/uu_avl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + *
25 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 26 */
25 27
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 28 #include "libuutil_common.h"
29 29
30 30 #include <stdlib.h>
31 31 #include <string.h>
32 32 #include <unistd.h>
33 33 #include <sys/avl.h>
34 34
35 35 static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool };
36 36 static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
37 37
38 38 /*
39 39 * The index mark change on every insert and delete, to catch stale
40 40 * references.
41 41 *
42 42 * We leave the low bit alone, since the avl code uses it.
43 43 */
44 44 #define INDEX_MAX (sizeof (uintptr_t) - 2)
45 45 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
46 46
47 47 #define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
48 48 #define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
49 49 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
50 50 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
51 51
52 52 /*
53 53 * When an element is inactive (not in a tree), we keep a marked pointer to
54 54 * its containing pool in its first word, and a NULL pointer in its second.
55 55 *
56 56 * On insert, we use these to verify that it comes from the correct pool.
57 57 */
58 58 #define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
59 59 (pp)->uap_nodeoffset))
60 60
61 61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
62 62
63 63 #define DEAD_MARKER 0xc4
64 64
65 65 uu_avl_pool_t *
66 66 uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
67 67 uu_compare_fn_t *compare_func, uint32_t flags)
68 68 {
69 69 uu_avl_pool_t *pp, *next, *prev;
70 70
71 71 if (name == NULL ||
72 72 uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
73 73 nodeoffset + sizeof (uu_avl_node_t) > objsize ||
74 74 compare_func == NULL) {
75 75 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
76 76 return (NULL);
77 77 }
78 78
79 79 if (flags & ~UU_AVL_POOL_DEBUG) {
80 80 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
81 81 return (NULL);
82 82 }
83 83
84 84 pp = uu_zalloc(sizeof (uu_avl_pool_t));
85 85 if (pp == NULL) {
86 86 uu_set_error(UU_ERROR_NO_MEMORY);
87 87 return (NULL);
88 88 }
89 89
90 90 (void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
91 91 pp->uap_nodeoffset = nodeoffset;
92 92 pp->uap_objsize = objsize;
93 93 pp->uap_cmp = compare_func;
94 94 if (flags & UU_AVL_POOL_DEBUG)
95 95 pp->uap_debug = 1;
96 96 pp->uap_last_index = 0;
97 97
98 98 (void) pthread_mutex_init(&pp->uap_lock, NULL);
99 99
100 100 pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
101 101 pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
102 102
103 103 (void) pthread_mutex_lock(&uu_apool_list_lock);
104 104 pp->uap_next = next = &uu_null_apool;
105 105 pp->uap_prev = prev = next->uap_prev;
106 106 next->uap_prev = pp;
107 107 prev->uap_next = pp;
108 108 (void) pthread_mutex_unlock(&uu_apool_list_lock);
109 109
110 110 return (pp);
111 111 }
112 112
113 113 void
114 114 uu_avl_pool_destroy(uu_avl_pool_t *pp)
115 115 {
116 116 if (pp->uap_debug) {
117 117 if (pp->uap_null_avl.ua_next_enc !=
118 118 UU_PTR_ENCODE(&pp->uap_null_avl) ||
119 119 pp->uap_null_avl.ua_prev_enc !=
120 120 UU_PTR_ENCODE(&pp->uap_null_avl)) {
121 121 uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
122 122 "outstanding avls, or is corrupt.\n",
123 123 (int)sizeof (pp->uap_name), pp->uap_name,
124 124 (void *)pp);
125 125 }
126 126 }
127 127 (void) pthread_mutex_lock(&uu_apool_list_lock);
128 128 pp->uap_next->uap_prev = pp->uap_prev;
129 129 pp->uap_prev->uap_next = pp->uap_next;
130 130 (void) pthread_mutex_unlock(&uu_apool_list_lock);
131 131 pp->uap_prev = NULL;
132 132 pp->uap_next = NULL;
133 133 uu_free(pp);
134 134 }
135 135
136 136 void
137 137 uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
138 138 {
139 139 uintptr_t *na = (uintptr_t *)np;
140 140
141 141 if (pp->uap_debug) {
142 142 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
143 143 if (offset + sizeof (*np) > pp->uap_objsize) {
144 144 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
145 145 "offset %ld doesn't fit in object (size %ld)\n",
146 146 base, (void *)np, (void *)pp, pp->uap_name,
147 147 (long)offset, (long)pp->uap_objsize);
148 148 }
149 149 if (offset != pp->uap_nodeoffset) {
150 150 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
151 151 "offset %ld doesn't match pool's offset (%ld)\n",
152 152 base, (void *)np, (void *)pp, pp->uap_name,
153 153 (long)offset, (long)pp->uap_objsize);
154 154 }
155 155 }
156 156
157 157 na[0] = POOL_TO_MARKER(pp);
158 158 na[1] = 0;
159 159 }
160 160
161 161 void
162 162 uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
163 163 {
164 164 uintptr_t *na = (uintptr_t *)np;
165 165
166 166 if (pp->uap_debug) {
167 167 if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
168 168 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
169 169 "node already finied\n",
170 170 base, (void *)np, (void *)pp, pp->uap_name);
171 171 }
172 172 if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
173 173 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
174 174 "node corrupt, in tree, or in different pool\n",
175 175 base, (void *)np, (void *)pp, pp->uap_name);
176 176 }
177 177 }
178 178
179 179 na[0] = DEAD_MARKER;
180 180 na[1] = DEAD_MARKER;
181 181 na[2] = DEAD_MARKER;
182 182 }
183 183
184 184 struct uu_avl_node_compare_info {
185 185 uu_compare_fn_t *ac_compare;
186 186 void *ac_private;
187 187 void *ac_right;
188 188 void *ac_found;
189 189 };
190 190
191 191 static int
192 192 uu_avl_node_compare(const void *l, const void *r)
193 193 {
194 194 struct uu_avl_node_compare_info *info =
195 195 (struct uu_avl_node_compare_info *)l;
196 196
197 197 int res = info->ac_compare(r, info->ac_right, info->ac_private);
198 198
199 199 if (res == 0) {
200 200 if (info->ac_found == NULL)
201 201 info->ac_found = (void *)r;
202 202 return (-1);
203 203 }
204 204 if (res < 0)
205 205 return (1);
206 206 return (-1);
207 207 }
208 208
209 209 uu_avl_t *
210 210 uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
211 211 {
212 212 uu_avl_t *ap, *next, *prev;
213 213
214 214 if (flags & ~UU_AVL_DEBUG) {
215 215 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
216 216 return (NULL);
217 217 }
218 218
219 219 ap = uu_zalloc(sizeof (*ap));
220 220 if (ap == NULL) {
221 221 uu_set_error(UU_ERROR_NO_MEMORY);
222 222 return (NULL);
223 223 }
224 224
225 225 ap->ua_pool = pp;
226 226 ap->ua_parent_enc = UU_PTR_ENCODE(parent);
227 227 ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
228 228 ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
229 229
230 230 avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
231 231 pp->uap_nodeoffset);
232 232
233 233 ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
234 234 ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
235 235
236 236 (void) pthread_mutex_lock(&pp->uap_lock);
237 237 next = &pp->uap_null_avl;
238 238 prev = UU_PTR_DECODE(next->ua_prev_enc);
239 239 ap->ua_next_enc = UU_PTR_ENCODE(next);
240 240 ap->ua_prev_enc = UU_PTR_ENCODE(prev);
241 241 next->ua_prev_enc = UU_PTR_ENCODE(ap);
242 242 prev->ua_next_enc = UU_PTR_ENCODE(ap);
243 243 (void) pthread_mutex_unlock(&pp->uap_lock);
244 244
245 245 return (ap);
246 246 }
247 247
248 248 void
249 249 uu_avl_destroy(uu_avl_t *ap)
250 250 {
251 251 uu_avl_pool_t *pp = ap->ua_pool;
252 252
253 253 if (ap->ua_debug) {
254 254 if (avl_numnodes(&ap->ua_tree) != 0) {
255 255 uu_panic("uu_avl_destroy(%p): tree not empty\n",
256 256 (void *)ap);
257 257 }
258 258 if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
259 259 ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
260 260 uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
261 261 (void *)ap);
262 262 }
263 263 }
264 264 (void) pthread_mutex_lock(&pp->uap_lock);
↓ open down ↓ |
227 lines elided |
↑ open up ↑ |
265 265 UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc;
266 266 UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc;
267 267 (void) pthread_mutex_unlock(&pp->uap_lock);
268 268 ap->ua_prev_enc = UU_PTR_ENCODE(NULL);
269 269 ap->ua_next_enc = UU_PTR_ENCODE(NULL);
270 270
271 271 ap->ua_pool = NULL;
272 272 avl_destroy(&ap->ua_tree);
273 273
274 274 uu_free(ap);
275 +}
276 +
277 +void
278 +uu_avl_recreate(uu_avl_t *ap)
279 +{
280 + uu_avl_pool_t *pp = ap->ua_pool;
281 +
282 + avl_destroy(&ap->ua_tree);
283 + avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
284 + pp->uap_nodeoffset);
275 285 }
276 286
277 287 size_t
278 288 uu_avl_numnodes(uu_avl_t *ap)
279 289 {
280 290 return (avl_numnodes(&ap->ua_tree));
281 291 }
282 292
283 293 void *
284 294 uu_avl_first(uu_avl_t *ap)
285 295 {
286 296 return (avl_first(&ap->ua_tree));
287 297 }
288 298
289 299 void *
290 300 uu_avl_last(uu_avl_t *ap)
291 301 {
292 302 return (avl_last(&ap->ua_tree));
293 303 }
294 304
295 305 void *
296 306 uu_avl_next(uu_avl_t *ap, void *node)
297 307 {
298 308 return (AVL_NEXT(&ap->ua_tree, node));
299 309 }
300 310
301 311 void *
302 312 uu_avl_prev(uu_avl_t *ap, void *node)
303 313 {
304 314 return (AVL_PREV(&ap->ua_tree, node));
305 315 }
306 316
307 317 static void
308 318 _avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
309 319 {
310 320 uu_avl_walk_t *next, *prev;
311 321
312 322 int robust = (flags & UU_WALK_ROBUST);
313 323 int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
314 324
315 325 (void) memset(wp, 0, sizeof (*wp));
316 326 wp->uaw_avl = ap;
317 327 wp->uaw_robust = robust;
318 328 wp->uaw_dir = direction;
319 329
320 330 if (direction > 0)
321 331 wp->uaw_next_result = avl_first(&ap->ua_tree);
322 332 else
323 333 wp->uaw_next_result = avl_last(&ap->ua_tree);
324 334
325 335 if (ap->ua_debug || robust) {
326 336 wp->uaw_next = next = &ap->ua_null_walk;
327 337 wp->uaw_prev = prev = next->uaw_prev;
328 338 next->uaw_prev = wp;
329 339 prev->uaw_next = wp;
330 340 }
331 341 }
332 342
333 343 static void *
334 344 _avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
335 345 {
336 346 void *np = wp->uaw_next_result;
337 347
338 348 avl_tree_t *t = &ap->ua_tree;
339 349
340 350 if (np == NULL)
341 351 return (NULL);
342 352
343 353 wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
344 354 AVL_PREV(t, np);
345 355
346 356 return (np);
347 357 }
348 358
349 359 static void
350 360 _avl_walk_fini(uu_avl_walk_t *wp)
351 361 {
352 362 if (wp->uaw_next != NULL) {
353 363 wp->uaw_next->uaw_prev = wp->uaw_prev;
354 364 wp->uaw_prev->uaw_next = wp->uaw_next;
355 365 wp->uaw_next = NULL;
356 366 wp->uaw_prev = NULL;
357 367 }
358 368 wp->uaw_avl = NULL;
359 369 wp->uaw_next_result = NULL;
360 370 }
361 371
362 372 uu_avl_walk_t *
363 373 uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
364 374 {
365 375 uu_avl_walk_t *wp;
366 376
367 377 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
368 378 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
369 379 return (NULL);
370 380 }
371 381
372 382 wp = uu_zalloc(sizeof (*wp));
373 383 if (wp == NULL) {
374 384 uu_set_error(UU_ERROR_NO_MEMORY);
375 385 return (NULL);
376 386 }
377 387
378 388 _avl_walk_init(wp, ap, flags);
379 389 return (wp);
380 390 }
381 391
382 392 void *
383 393 uu_avl_walk_next(uu_avl_walk_t *wp)
384 394 {
385 395 return (_avl_walk_advance(wp, wp->uaw_avl));
386 396 }
387 397
388 398 void
389 399 uu_avl_walk_end(uu_avl_walk_t *wp)
390 400 {
391 401 _avl_walk_fini(wp);
392 402 uu_free(wp);
393 403 }
394 404
395 405 int
396 406 uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
397 407 {
398 408 void *e;
399 409 uu_avl_walk_t my_walk;
400 410
401 411 int status = UU_WALK_NEXT;
402 412
403 413 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
404 414 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
405 415 return (-1);
406 416 }
407 417
408 418 _avl_walk_init(&my_walk, ap, flags);
409 419 while (status == UU_WALK_NEXT &&
410 420 (e = _avl_walk_advance(&my_walk, ap)) != NULL)
411 421 status = (*func)(e, private);
412 422 _avl_walk_fini(&my_walk);
413 423
414 424 if (status >= 0)
415 425 return (0);
416 426 uu_set_error(UU_ERROR_CALLBACK_FAILED);
417 427 return (-1);
418 428 }
419 429
420 430 void
421 431 uu_avl_remove(uu_avl_t *ap, void *elem)
422 432 {
423 433 uu_avl_walk_t *wp;
424 434 uu_avl_pool_t *pp = ap->ua_pool;
425 435 uintptr_t *na = NODE_ARRAY(pp, elem);
426 436
427 437 if (ap->ua_debug) {
428 438 /*
429 439 * invalidate outstanding uu_avl_index_ts.
430 440 */
431 441 ap->ua_index = INDEX_NEXT(ap->ua_index);
432 442 }
433 443
434 444 /*
435 445 * Robust walkers most be advanced, if we are removing the node
436 446 * they are currently using. In debug mode, non-robust walkers
437 447 * are also on the walker list.
438 448 */
439 449 for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
440 450 wp = wp->uaw_next) {
441 451 if (wp->uaw_robust) {
442 452 if (elem == wp->uaw_next_result)
443 453 (void) _avl_walk_advance(wp, ap);
444 454 } else if (wp->uaw_next_result != NULL) {
445 455 uu_panic("uu_avl_remove(%p, %p): active non-robust "
446 456 "walker\n", (void *)ap, elem);
447 457 }
448 458 }
449 459
450 460 avl_remove(&ap->ua_tree, elem);
451 461
452 462 na[0] = POOL_TO_MARKER(pp);
453 463 na[1] = 0;
454 464 }
455 465
456 466 void *
457 467 uu_avl_teardown(uu_avl_t *ap, void **cookie)
458 468 {
459 469 void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
460 470
461 471 if (elem != NULL) {
462 472 uu_avl_pool_t *pp = ap->ua_pool;
463 473 uintptr_t *na = NODE_ARRAY(pp, elem);
464 474
465 475 na[0] = POOL_TO_MARKER(pp);
466 476 na[1] = 0;
467 477 }
468 478 return (elem);
469 479 }
470 480
471 481 void *
472 482 uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
473 483 {
474 484 struct uu_avl_node_compare_info info;
475 485 void *result;
476 486
477 487 info.ac_compare = ap->ua_pool->uap_cmp;
478 488 info.ac_private = private;
479 489 info.ac_right = elem;
480 490 info.ac_found = NULL;
481 491
482 492 result = avl_find(&ap->ua_tree, &info, out);
483 493 if (out != NULL)
484 494 *out = INDEX_ENCODE(ap, *out);
485 495
486 496 if (ap->ua_debug && result != NULL)
487 497 uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
488 498
489 499 return (info.ac_found);
490 500 }
491 501
492 502 void
493 503 uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
494 504 {
495 505 if (ap->ua_debug) {
496 506 uu_avl_pool_t *pp = ap->ua_pool;
497 507 uintptr_t *na = NODE_ARRAY(pp, elem);
498 508
499 509 if (na[1] != 0)
500 510 uu_panic("uu_avl_insert(%p, %p, %p): node already "
501 511 "in tree, or corrupt\n",
502 512 (void *)ap, elem, (void *)idx);
503 513 if (na[0] == 0)
504 514 uu_panic("uu_avl_insert(%p, %p, %p): node not "
505 515 "initialized\n",
506 516 (void *)ap, elem, (void *)idx);
507 517 if (na[0] != POOL_TO_MARKER(pp))
508 518 uu_panic("uu_avl_insert(%p, %p, %p): node from "
509 519 "other pool, or corrupt\n",
510 520 (void *)ap, elem, (void *)idx);
511 521
512 522 if (!INDEX_VALID(ap, idx))
513 523 uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
514 524 (void *)ap, elem, (void *)idx,
515 525 INDEX_CHECK(idx)? "outdated index" :
516 526 "invalid index");
517 527
518 528 /*
519 529 * invalidate outstanding uu_avl_index_ts.
520 530 */
521 531 ap->ua_index = INDEX_NEXT(ap->ua_index);
522 532 }
523 533 avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
524 534 }
525 535
526 536 void *
527 537 uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
528 538 {
529 539 if (ap->ua_debug && !INDEX_VALID(ap, idx))
530 540 uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
531 541 (void *)ap, (void *)idx, INDEX_CHECK(idx)?
532 542 "outdated index" : "invalid index");
533 543 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
534 544 }
535 545
536 546 void *
537 547 uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
538 548 {
539 549 if (ap->ua_debug && !INDEX_VALID(ap, idx))
540 550 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
541 551 (void *)ap, (void *)idx, INDEX_CHECK(idx)?
542 552 "outdated index" : "invalid index");
543 553 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
544 554 }
545 555
546 556 /*
547 557 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
548 558 */
549 559 void
550 560 uu_avl_lockup(void)
551 561 {
552 562 uu_avl_pool_t *pp;
553 563
554 564 (void) pthread_mutex_lock(&uu_apool_list_lock);
555 565 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
556 566 pp = pp->uap_next)
557 567 (void) pthread_mutex_lock(&pp->uap_lock);
558 568 }
559 569
560 570 void
561 571 uu_avl_release(void)
562 572 {
563 573 uu_avl_pool_t *pp;
564 574
565 575 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
566 576 pp = pp->uap_next)
567 577 (void) pthread_mutex_unlock(&pp->uap_lock);
568 578 (void) pthread_mutex_unlock(&uu_apool_list_lock);
569 579 }
↓ open down ↓ |
285 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX