Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/ddi.c
+++ new/usr/src/uts/common/os/ddi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24
25 25 /*
26 26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 +/*
30 + * Copyright 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
31 + */
29 32
30 33 /*
31 34 * UNIX Device Driver Interface functions
32 35 *
33 36 * This file contains functions that are to be added to the kernel
34 37 * to put the interface presented to drivers in conformance with
35 38 * the DDI standard. Of the functions added to the kernel, 17 are
36 39 * function equivalents of existing macros in sysmacros.h,
37 40 * stream.h, and param.h
38 41 *
39 42 * 17 additional functions -- drv_getparm(), drv_setparm(),
40 43 * getrbuf(), freerbuf(),
41 44 * getemajor(), geteminor(), etoimajor(), itoemajor(), drv_usectohz(),
42 45 * drv_hztousec(), drv_usecwait(), drv_priv(), and kvtoppid() --
43 46 * are specified by DDI to exist in the kernel and are implemented here.
44 47 *
45 48 * Note that putnext() and put() are not in this file. The C version of
46 49 * these routines are in uts/common/os/putnext.c and assembly versions
47 50 * might exist for some architectures.
48 51 */
49 52
50 53 #include <sys/types.h>
51 54 #include <sys/param.h>
52 55 #include <sys/t_lock.h>
53 56 #include <sys/time.h>
54 57 #include <sys/systm.h>
55 58 #include <sys/cpuvar.h>
56 59 #include <sys/signal.h>
57 60 #include <sys/pcb.h>
58 61 #include <sys/user.h>
59 62 #include <sys/errno.h>
60 63 #include <sys/buf.h>
61 64 #include <sys/proc.h>
62 65 #include <sys/cmn_err.h>
63 66 #include <sys/stream.h>
64 67 #include <sys/strsubr.h>
65 68 #include <sys/uio.h>
66 69 #include <sys/kmem.h>
67 70 #include <sys/conf.h>
68 71 #include <sys/cred.h>
69 72 #include <sys/vnode.h>
70 73 #include <sys/file.h>
71 74 #include <sys/poll.h>
72 75 #include <sys/session.h>
73 76 #include <sys/ddi.h>
74 77 #include <sys/sunddi.h>
75 78 #include <sys/esunddi.h>
76 79 #include <sys/mkdev.h>
77 80 #include <sys/debug.h>
78 81 #include <sys/vtrace.h>
79 82
80 83 /*
81 84 * return internal major number corresponding to device
82 85 * number (new format) argument
83 86 */
84 87 major_t
85 88 getmajor(dev_t dev)
86 89 {
87 90 #ifdef _LP64
88 91 return ((major_t)((dev >> NBITSMINOR64) & MAXMAJ64));
89 92 #else
90 93 return ((major_t)((dev >> NBITSMINOR) & MAXMAJ));
91 94 #endif
92 95 }
93 96
94 97 /*
95 98 * return external major number corresponding to device
96 99 * number (new format) argument
97 100 */
98 101 major_t
99 102 getemajor(dev_t dev)
100 103 {
101 104 #ifdef _LP64
102 105 return ((major_t)((dev >> NBITSMINOR64) & MAXMAJ64));
103 106 #else
104 107 return ((major_t)((dev >> NBITSMINOR) & MAXMAJ));
105 108 #endif
106 109 }
107 110
108 111 /*
109 112 * return internal minor number corresponding to device
110 113 * number (new format) argument
111 114 */
112 115 minor_t
113 116 getminor(dev_t dev)
114 117 {
115 118 #ifdef _LP64
116 119 return ((minor_t)(dev & MAXMIN64));
117 120 #else
118 121 return ((minor_t)(dev & MAXMIN));
119 122 #endif
120 123 }
121 124
122 125 /*
123 126 * return external minor number corresponding to device
124 127 * number (new format) argument
125 128 */
126 129 minor_t
127 130 geteminor(dev_t dev)
128 131 {
129 132 #ifdef _LP64
130 133 return ((minor_t)(dev & MAXMIN64));
131 134 #else
132 135 return ((minor_t)(dev & MAXMIN));
133 136 #endif
134 137 }
135 138
136 139 /*
137 140 * return internal major number corresponding to external
138 141 * major number.
139 142 */
140 143 int
141 144 etoimajor(major_t emajnum)
142 145 {
143 146 #ifdef _LP64
144 147 if (emajnum >= devcnt)
145 148 return (-1); /* invalid external major */
146 149 #else
147 150 if (emajnum > MAXMAJ || emajnum >= devcnt)
148 151 return (-1); /* invalid external major */
149 152 #endif
150 153 return ((int)emajnum);
151 154 }
152 155
153 156 /*
154 157 * return external major number corresponding to internal
155 158 * major number argument or -1 if no external major number
156 159 * can be found after lastemaj that maps to the internal
157 160 * major number. Pass a lastemaj val of -1 to start
158 161 * the search initially. (Typical use of this function is
159 162 * of the form:
160 163 *
161 164 * lastemaj = -1;
162 165 * while ((lastemaj = itoemajor(imag, lastemaj)) != -1)
163 166 * { process major number }
164 167 */
165 168 int
166 169 itoemajor(major_t imajnum, int lastemaj)
167 170 {
168 171 if (imajnum >= devcnt)
169 172 return (-1);
170 173
171 174 /*
172 175 * if lastemaj == -1 then start from beginning of
173 176 * the (imaginary) MAJOR table
174 177 */
175 178 if (lastemaj < -1)
176 179 return (-1);
177 180
178 181 /*
179 182 * given that there's a 1-1 mapping of internal to external
180 183 * major numbers, searching is somewhat pointless ... let's
181 184 * just go there directly.
182 185 */
183 186 if (++lastemaj < devcnt && imajnum < devcnt)
184 187 return (imajnum);
185 188 return (-1);
186 189 }
187 190
188 191 /*
189 192 * encode external major and minor number arguments into a
190 193 * new format device number
191 194 */
192 195 dev_t
193 196 makedevice(major_t maj, minor_t minor)
194 197 {
195 198 #ifdef _LP64
196 199 return (((dev_t)maj << NBITSMINOR64) | (minor & MAXMIN64));
197 200 #else
198 201 return (((dev_t)maj << NBITSMINOR) | (minor & MAXMIN));
199 202 #endif
200 203 }
201 204
202 205 /*
203 206 * cmpdev - compress new device format to old device format
204 207 */
205 208 o_dev_t
206 209 cmpdev(dev_t dev)
207 210 {
208 211 major_t major_d;
209 212 minor_t minor_d;
210 213
211 214 #ifdef _LP64
212 215 major_d = dev >> NBITSMINOR64;
213 216 minor_d = dev & MAXMIN64;
214 217 #else
215 218 major_d = dev >> NBITSMINOR;
216 219 minor_d = dev & MAXMIN;
217 220 #endif
218 221 if (major_d > OMAXMAJ || minor_d > OMAXMIN)
219 222 return ((o_dev_t)NODEV);
220 223 return ((o_dev_t)((major_d << ONBITSMINOR) | minor_d));
221 224 }
222 225
223 226 dev_t
224 227 expdev(dev_t dev)
225 228 {
226 229 major_t major_d;
227 230 minor_t minor_d;
228 231
229 232 major_d = ((dev >> ONBITSMINOR) & OMAXMAJ);
230 233 minor_d = (dev & OMAXMIN);
231 234 #ifdef _LP64
232 235 return ((((dev_t)major_d << NBITSMINOR64) | minor_d));
233 236 #else
234 237 return ((((dev_t)major_d << NBITSMINOR) | minor_d));
235 238 #endif
236 239 }
237 240
238 241 /*
239 242 * return true (1) if the message type input is a data
240 243 * message type, 0 otherwise
241 244 */
242 245 #undef datamsg
243 246 int
244 247 datamsg(unsigned char db_type)
245 248 {
246 249 return (db_type == M_DATA || db_type == M_PROTO ||
247 250 db_type == M_PCPROTO || db_type == M_DELAY);
248 251 }
249 252
250 253 /*
251 254 * return a pointer to the other queue in the queue pair of qp
252 255 */
253 256 queue_t *
254 257 OTHERQ(queue_t *q)
255 258 {
256 259 return (_OTHERQ(q));
257 260 }
258 261
259 262 /*
260 263 * return a pointer to the read queue in the queue pair of qp.
261 264 */
262 265 queue_t *
263 266 RD(queue_t *q)
264 267 {
265 268 return (_RD(q));
266 269
267 270 }
268 271
269 272 /*
270 273 * return a pointer to the write queue in the queue pair of qp.
271 274 */
272 275 int
273 276 SAMESTR(queue_t *q)
274 277 {
275 278 return (_SAMESTR(q));
276 279 }
277 280
278 281 /*
279 282 * return a pointer to the write queue in the queue pair of qp.
280 283 */
281 284 queue_t *
282 285 WR(queue_t *q)
283 286 {
284 287 return (_WR(q));
285 288 }
286 289
287 290 /*
288 291 * store value of kernel parameter associated with parm
289 292 */
290 293 int
291 294 drv_getparm(unsigned int parm, void *valuep)
292 295 {
293 296 proc_t *p = curproc;
294 297 time_t now;
295 298
296 299 switch (parm) {
297 300 case UPROCP:
298 301 *(proc_t **)valuep = p;
299 302 break;
300 303 case PPGRP:
301 304 mutex_enter(&p->p_lock);
302 305 *(pid_t *)valuep = p->p_pgrp;
303 306 mutex_exit(&p->p_lock);
304 307 break;
305 308 case LBOLT:
306 309 *(clock_t *)valuep = ddi_get_lbolt();
307 310 break;
308 311 case TIME:
309 312 if ((now = gethrestime_sec()) == 0) {
310 313 timestruc_t ts;
311 314 mutex_enter(&tod_lock);
312 315 ts = tod_get();
313 316 mutex_exit(&tod_lock);
314 317 *(time_t *)valuep = ts.tv_sec;
315 318 } else {
316 319 *(time_t *)valuep = now;
317 320 }
318 321 break;
319 322 case PPID:
320 323 *(pid_t *)valuep = p->p_pid;
321 324 break;
322 325 case PSID:
323 326 mutex_enter(&p->p_splock);
324 327 *(pid_t *)valuep = p->p_sessp->s_sid;
325 328 mutex_exit(&p->p_splock);
326 329 break;
327 330 case UCRED:
328 331 *(cred_t **)valuep = CRED();
329 332 break;
330 333 default:
331 334 return (-1);
332 335 }
333 336
334 337 return (0);
335 338 }
336 339
337 340 /*
338 341 * set value of kernel parameter associated with parm
339 342 */
340 343 int
341 344 drv_setparm(unsigned int parm, unsigned long value)
342 345 {
343 346 switch (parm) {
344 347 case SYSRINT:
345 348 CPU_STATS_ADDQ(CPU, sys, rcvint, value);
346 349 break;
347 350 case SYSXINT:
348 351 CPU_STATS_ADDQ(CPU, sys, xmtint, value);
349 352 break;
350 353 case SYSMINT:
351 354 CPU_STATS_ADDQ(CPU, sys, mdmint, value);
352 355 break;
353 356 case SYSRAWC:
354 357 CPU_STATS_ADDQ(CPU, sys, rawch, value);
355 358 break;
356 359 case SYSCANC:
357 360 CPU_STATS_ADDQ(CPU, sys, canch, value);
358 361 break;
359 362 case SYSOUTC:
360 363 CPU_STATS_ADDQ(CPU, sys, outch, value);
361 364 break;
362 365 default:
363 366 return (-1);
364 367 }
365 368
366 369 return (0);
367 370 }
368 371
369 372 /*
370 373 * allocate space for buffer header and return pointer to it.
371 374 * preferred means of obtaining space for a local buf header.
372 375 * returns pointer to buf upon success, NULL for failure
373 376 */
374 377 struct buf *
375 378 getrbuf(int sleep)
376 379 {
377 380 struct buf *bp;
378 381
379 382 bp = kmem_alloc(sizeof (struct buf), sleep);
380 383 if (bp == NULL)
381 384 return (NULL);
382 385 bioinit(bp);
383 386
384 387 return (bp);
385 388 }
386 389
387 390 /*
388 391 * free up space allocated by getrbuf()
389 392 */
390 393 void
391 394 freerbuf(struct buf *bp)
392 395 {
393 396 biofini(bp);
394 397 kmem_free(bp, sizeof (struct buf));
395 398 }
396 399
397 400 /*
398 401 * convert byte count input to logical page units
399 402 * (byte counts that are not a page-size multiple
400 403 * are rounded down)
401 404 */
402 405 pgcnt_t
403 406 btop(size_t numbytes)
404 407 {
405 408 return (numbytes >> PAGESHIFT);
406 409 }
407 410
408 411 /*
409 412 * convert byte count input to logical page units
410 413 * (byte counts that are not a page-size multiple
411 414 * are rounded up)
412 415 */
413 416 pgcnt_t
414 417 btopr(size_t numbytes)
415 418 {
416 419 return ((numbytes + PAGEOFFSET) >> PAGESHIFT);
417 420 }
418 421
419 422 /*
420 423 * convert size in pages to bytes.
421 424 */
422 425 size_t
423 426 ptob(pgcnt_t numpages)
424 427 {
425 428 return (numpages << PAGESHIFT);
426 429 }
427 430
428 431 #define MAXCLOCK_T LONG_MAX
429 432
430 433 /*
431 434 * Convert from system time units (hz) to microseconds.
432 435 *
433 436 * If ticks <= 0, return 0.
434 437 * If converting ticks to usecs would overflow, return MAXCLOCK_T.
435 438 * Otherwise, convert ticks to microseconds.
436 439 */
437 440 clock_t
438 441 drv_hztousec(clock_t ticks)
439 442 {
440 443 if (ticks <= 0)
441 444 return (0);
442 445
443 446 if (ticks > MAXCLOCK_T / usec_per_tick)
444 447 return (MAXCLOCK_T);
445 448
446 449 return (TICK_TO_USEC(ticks));
447 450 }
448 451
449 452
450 453 /*
451 454 * Convert from microseconds to system time units (hz), rounded up.
452 455 *
↓ open down ↓ |
414 lines elided |
↑ open up ↑ |
453 456 * If ticks <= 0, return 0.
454 457 * Otherwise, convert microseconds to ticks, rounding up.
455 458 */
456 459 clock_t
457 460 drv_usectohz(clock_t microsecs)
458 461 {
459 462 if (microsecs <= 0)
460 463 return (0);
461 464
462 465 return (USEC_TO_TICK_ROUNDUP(microsecs));
466 +}
467 +
468 +/*
469 + * Convert from seconds to system time units (hz).
470 + *
471 + * If secs <= 0, return 0.
472 + * Otherwise, convert seconds to ticks, rounding up.
473 + */
474 +clock_t
475 +drv_sectohz(clock_t secs)
476 +{
477 + if (secs <= 0)
478 + return (0);
479 +
480 + return (SEC_TO_TICK(secs));
463 481 }
464 482
465 483 #ifdef sun
466 484 /*
467 485 * drv_usecwait implemented in each architecture's machine
468 486 * specific code somewhere. For sparc, it is the alternate entry
469 487 * to usec_delay (eventually usec_delay goes away). See
470 488 * sparc/os/ml/sparc_subr.s
471 489 */
472 490 #endif
473 491
474 492 /*
475 493 * bcanputnext, canputnext assume called from timeout, bufcall,
476 494 * or esballoc free routines. since these are driven by
477 495 * clock interrupts, instead of system calls the appropriate plumbing
478 496 * locks have not been acquired.
479 497 */
480 498 int
481 499 bcanputnext(queue_t *q, unsigned char band)
482 500 {
483 501 int ret;
484 502
485 503 claimstr(q);
486 504 ret = bcanput(q->q_next, band);
487 505 releasestr(q);
488 506 return (ret);
489 507 }
490 508
491 509 int
492 510 canputnext(queue_t *q)
493 511 {
494 512 queue_t *qofsq = q;
495 513 struct stdata *stp = STREAM(q);
496 514 kmutex_t *sdlock;
497 515
498 516 TRACE_1(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_IN,
499 517 "canputnext?:%p\n", q);
500 518
501 519 if (stp->sd_ciputctrl != NULL) {
502 520 int ix = CPU->cpu_seqid & stp->sd_nciputctrl;
503 521 sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock;
504 522 mutex_enter(sdlock);
505 523 } else
506 524 mutex_enter(sdlock = &stp->sd_reflock);
507 525
508 526 /* get next module forward with a service queue */
509 527 q = q->q_next->q_nfsrv;
510 528 ASSERT(q != NULL);
511 529
512 530 /* this is for loopback transports, they should not do a canputnext */
513 531 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(qofsq));
514 532
515 533 if (!(q->q_flag & QFULL)) {
516 534 mutex_exit(sdlock);
517 535 TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT,
518 536 "canputnext:%p %d", q, 1);
519 537 return (1);
520 538 }
521 539
522 540 if (sdlock != &stp->sd_reflock) {
523 541 mutex_exit(sdlock);
524 542 mutex_enter(&stp->sd_reflock);
525 543 }
526 544
527 545 /* the above is the most frequently used path */
528 546 stp->sd_refcnt++;
529 547 ASSERT(stp->sd_refcnt != 0); /* Wraparound */
530 548 mutex_exit(&stp->sd_reflock);
531 549
532 550 mutex_enter(QLOCK(q));
533 551 if (q->q_flag & QFULL) {
534 552 q->q_flag |= QWANTW;
535 553 mutex_exit(QLOCK(q));
536 554 TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT,
537 555 "canputnext:%p %d", q, 0);
538 556 releasestr(qofsq);
539 557
540 558 return (0);
541 559 }
542 560 mutex_exit(QLOCK(q));
543 561 TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT, "canputnext:%p %d", q, 1);
544 562 releasestr(qofsq);
545 563
546 564 return (1);
547 565 }
548 566
549 567
550 568 /*
551 569 * Open has progressed to the point where it is safe to send/receive messages.
552 570 *
553 571 * "qprocson enables the put and service routines of the driver
554 572 * or module... Prior to the call to qprocson, the put and service
555 573 * routines of a newly pushed module or newly opened driver are
556 574 * disabled. For the module, messages flow around it as if it
557 575 * were not present in the stream... qprocson must be called by
558 576 * the first open of a module or driver after allocation and
559 577 * initialization of any resource on which the put and service
560 578 * routines depend."
561 579 *
562 580 * Note that before calling qprocson a module/driver could itself cause its
563 581 * put or service procedures to be run by using put() or qenable().
564 582 */
565 583 void
566 584 qprocson(queue_t *q)
567 585 {
568 586 ASSERT(q->q_flag & QREADR);
569 587 /*
570 588 * Do not call insertq() if it is a re-open. But if _QINSERTING
571 589 * is set, q_next will not be NULL and we need to call insertq().
572 590 */
573 591 if ((q->q_next == NULL && WR(q)->q_next == NULL) ||
574 592 (q->q_flag & _QINSERTING))
575 593 insertq(STREAM(q), q);
576 594 }
577 595
578 596 /*
579 597 * Close has reached a point where it can no longer allow put/service
580 598 * into the queue.
581 599 *
582 600 * "qprocsoff disables the put and service routines of the driver
583 601 * or module... When the routines are disabled in a module, messages
584 602 * flow around the module as if it were not present in the stream.
585 603 * qprocsoff must be called by the close routine of a driver or module
586 604 * before deallocating any resources on which the driver/module's
587 605 * put and service routines depend. qprocsoff will remove the
588 606 * queue's service routines from the list of service routines to be
589 607 * run and waits until any concurrent put or service routines are
590 608 * finished."
591 609 *
592 610 * Note that after calling qprocsoff a module/driver could itself cause its
593 611 * put procedures to be run by using put().
594 612 */
595 613 void
596 614 qprocsoff(queue_t *q)
597 615 {
598 616 ASSERT(q->q_flag & QREADR);
599 617 if (q->q_flag & QWCLOSE) {
600 618 /* Called more than once */
601 619 return;
602 620 }
603 621 disable_svc(q);
604 622 removeq(q);
605 623 }
606 624
607 625 /*
608 626 * "freezestr() freezes the state of the entire STREAM containing
609 627 * the queue pair q. A frozen STREAM blocks any thread
610 628 * attempting to enter any open, close, put or service routine
611 629 * belonging to any queue instance in the STREAM, and blocks
612 630 * any thread currently within the STREAM if it attempts to put
613 631 * messages onto or take messages off of any queue within the
614 632 * STREAM (with the sole exception of the caller). Threads
615 633 * blocked by this mechanism remain so until the STREAM is
616 634 * thawed by a call to unfreezestr().
617 635 *
618 636 * Use strblock to set SQ_FROZEN in all syncqs in the stream (prevents
619 637 * further entry into put, service, open, and close procedures) and
620 638 * grab (and hold) all the QLOCKs in the stream (to block putq, getq etc.)
621 639 *
622 640 * Note: this has to be the only code that acquires one QLOCK while holding
623 641 * another QLOCK (otherwise we would have locking hirarchy/ordering violations.)
624 642 */
625 643 void
626 644 freezestr(queue_t *q)
627 645 {
628 646 struct stdata *stp = STREAM(q);
629 647
630 648 /*
631 649 * Increment refcnt to prevent q_next from changing during the strblock
632 650 * as well as while the stream is frozen.
633 651 */
634 652 claimstr(RD(q));
635 653
636 654 strblock(q);
637 655 ASSERT(stp->sd_freezer == NULL);
638 656 stp->sd_freezer = curthread;
639 657 for (q = stp->sd_wrq; q != NULL; q = SAMESTR(q) ? q->q_next : NULL) {
640 658 mutex_enter(QLOCK(q));
641 659 mutex_enter(QLOCK(RD(q)));
642 660 }
643 661 }
644 662
645 663 /*
646 664 * Undo what freezestr did.
647 665 * Have to drop the QLOCKs before the strunblock since strunblock will
648 666 * potentially call other put procedures.
649 667 */
650 668 void
651 669 unfreezestr(queue_t *q)
652 670 {
653 671 struct stdata *stp = STREAM(q);
654 672 queue_t *q1;
655 673
656 674 for (q1 = stp->sd_wrq; q1 != NULL;
657 675 q1 = SAMESTR(q1) ? q1->q_next : NULL) {
658 676 mutex_exit(QLOCK(q1));
659 677 mutex_exit(QLOCK(RD(q1)));
660 678 }
661 679 ASSERT(stp->sd_freezer == curthread);
662 680 stp->sd_freezer = NULL;
663 681 strunblock(q);
664 682 releasestr(RD(q));
665 683 }
666 684
667 685 /*
668 686 * Used by open and close procedures to "sleep" waiting for messages to
669 687 * arrive. Note: can only be used in open and close procedures.
670 688 *
671 689 * Lower the gate and let in either messages on the syncq (if there are
672 690 * any) or put/service procedures.
673 691 *
674 692 * If the queue has an outer perimeter this will not prevent entry into this
675 693 * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
676 694 * exclusive access to the outer perimeter.)
677 695 *
678 696 * Return 0 is the cv_wait_sig was interrupted; otherwise 1.
679 697 *
680 698 * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
681 699 * otherwise put entry points were not blocked in the first place. if this is
682 700 * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
683 701 * is always SQ_CIPUT if it is SQ_CIOC.
684 702 *
685 703 * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
686 704 * atomically under sq_putlocks to make sure putnext will not miss a pending
687 705 * wakeup.
688 706 */
689 707 int
690 708 qwait_sig(queue_t *q)
691 709 {
692 710 syncq_t *sq, *outer;
693 711 uint_t flags;
694 712 int ret = 1;
695 713 int is_sq_cioc;
696 714
697 715 /*
698 716 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
699 717 * while detecting all cases where the perimeter is entered
700 718 * so that qwait_sig can return to the caller.
701 719 *
702 720 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
703 721 * wait for a thread to leave the syncq.
704 722 */
705 723 sq = q->q_syncq;
706 724 ASSERT(sq);
707 725 is_sq_cioc = (sq->sq_type & SQ_CIOC) ? 1 : 0;
708 726 ASSERT(sq->sq_outer == NULL || sq->sq_outer->sq_flags & SQ_WRITER);
709 727 outer = sq->sq_outer;
710 728 /*
711 729 * XXX this does not work if there is only an outer perimeter.
712 730 * The semantics of qwait/qwait_sig are undefined in this case.
713 731 */
714 732 if (outer)
715 733 outer_exit(outer);
716 734
717 735 mutex_enter(SQLOCK(sq));
718 736 if (is_sq_cioc == 0) {
719 737 SQ_PUTLOCKS_ENTER(sq);
720 738 }
721 739 flags = sq->sq_flags;
722 740 /*
723 741 * Drop SQ_EXCL and sq_count but hold the SQLOCK
724 742 * to prevent any undetected entry and exit into the perimeter.
725 743 */
726 744 ASSERT(sq->sq_count > 0);
727 745 sq->sq_count--;
728 746
729 747 if (is_sq_cioc == 0) {
730 748 ASSERT(flags & SQ_EXCL);
731 749 flags &= ~SQ_EXCL;
732 750 }
733 751 /*
734 752 * Unblock any thread blocked in an entersq or outer_enter.
735 753 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
736 754 * since that could lead to livelock with two threads in
737 755 * qwait for the same (per module) inner perimeter.
738 756 */
739 757 if (flags & SQ_WANTWAKEUP) {
740 758 cv_broadcast(&sq->sq_wait);
741 759 flags &= ~SQ_WANTWAKEUP;
742 760 }
743 761 sq->sq_flags = flags;
744 762 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
745 763 if (is_sq_cioc == 0) {
746 764 SQ_PUTLOCKS_EXIT(sq);
747 765 }
748 766 /* drain_syncq() drops SQLOCK */
749 767 drain_syncq(sq);
750 768 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
751 769 entersq(sq, SQ_OPENCLOSE);
752 770 return (1);
753 771 }
754 772 /*
755 773 * Sleep on sq_exitwait to only be woken up when threads leave the
756 774 * put or service procedures. We can not sleep on sq_wait since an
757 775 * outer_exit in a qwait running in the same outer perimeter would
758 776 * cause a livelock "ping-pong" between two or more qwait'ers.
759 777 */
760 778 do {
761 779 sq->sq_flags |= SQ_WANTEXWAKEUP;
762 780 if (is_sq_cioc == 0) {
763 781 SQ_PUTLOCKS_EXIT(sq);
764 782 }
765 783 ret = cv_wait_sig(&sq->sq_exitwait, SQLOCK(sq));
766 784 if (is_sq_cioc == 0) {
767 785 SQ_PUTLOCKS_ENTER(sq);
768 786 }
769 787 } while (ret && (sq->sq_flags & SQ_WANTEXWAKEUP));
770 788 if (is_sq_cioc == 0) {
771 789 SQ_PUTLOCKS_EXIT(sq);
772 790 }
773 791 mutex_exit(SQLOCK(sq));
774 792
775 793 /*
776 794 * Re-enter the perimeters again
777 795 */
778 796 entersq(sq, SQ_OPENCLOSE);
779 797 return (ret);
780 798 }
781 799
782 800 /*
783 801 * Used by open and close procedures to "sleep" waiting for messages to
784 802 * arrive. Note: can only be used in open and close procedures.
785 803 *
786 804 * Lower the gate and let in either messages on the syncq (if there are
787 805 * any) or put/service procedures.
788 806 *
789 807 * If the queue has an outer perimeter this will not prevent entry into this
790 808 * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
791 809 * exclusive access to the outer perimeter.)
792 810 *
793 811 * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
794 812 * otherwise put entry points were not blocked in the first place. if this is
795 813 * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
796 814 * is always SQ_CIPUT if it is SQ_CIOC.
797 815 *
798 816 * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
799 817 * atomically under sq_putlocks to make sure putnext will not miss a pending
800 818 * wakeup.
801 819 */
802 820 void
803 821 qwait(queue_t *q)
804 822 {
805 823 syncq_t *sq, *outer;
806 824 uint_t flags;
807 825 int is_sq_cioc;
808 826
809 827 /*
810 828 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
811 829 * while detecting all cases where the perimeter is entered
812 830 * so that qwait can return to the caller.
813 831 *
814 832 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
815 833 * wait for a thread to leave the syncq.
816 834 */
817 835 sq = q->q_syncq;
818 836 ASSERT(sq);
819 837 is_sq_cioc = (sq->sq_type & SQ_CIOC) ? 1 : 0;
820 838 ASSERT(sq->sq_outer == NULL || sq->sq_outer->sq_flags & SQ_WRITER);
821 839 outer = sq->sq_outer;
822 840 /*
823 841 * XXX this does not work if there is only an outer perimeter.
824 842 * The semantics of qwait/qwait_sig are undefined in this case.
825 843 */
826 844 if (outer)
827 845 outer_exit(outer);
828 846
829 847 mutex_enter(SQLOCK(sq));
830 848 if (is_sq_cioc == 0) {
831 849 SQ_PUTLOCKS_ENTER(sq);
832 850 }
833 851 flags = sq->sq_flags;
834 852 /*
835 853 * Drop SQ_EXCL and sq_count but hold the SQLOCK
836 854 * to prevent any undetected entry and exit into the perimeter.
837 855 */
838 856 ASSERT(sq->sq_count > 0);
839 857 sq->sq_count--;
840 858
841 859 if (is_sq_cioc == 0) {
842 860 ASSERT(flags & SQ_EXCL);
843 861 flags &= ~SQ_EXCL;
844 862 }
845 863 /*
846 864 * Unblock any thread blocked in an entersq or outer_enter.
847 865 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
848 866 * since that could lead to livelock with two threads in
849 867 * qwait for the same (per module) inner perimeter.
850 868 */
851 869 if (flags & SQ_WANTWAKEUP) {
852 870 cv_broadcast(&sq->sq_wait);
853 871 flags &= ~SQ_WANTWAKEUP;
854 872 }
855 873 sq->sq_flags = flags;
856 874 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
857 875 if (is_sq_cioc == 0) {
858 876 SQ_PUTLOCKS_EXIT(sq);
859 877 }
860 878 /* drain_syncq() drops SQLOCK */
861 879 drain_syncq(sq);
862 880 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
863 881 entersq(sq, SQ_OPENCLOSE);
864 882 return;
865 883 }
866 884 /*
867 885 * Sleep on sq_exitwait to only be woken up when threads leave the
868 886 * put or service procedures. We can not sleep on sq_wait since an
869 887 * outer_exit in a qwait running in the same outer perimeter would
870 888 * cause a livelock "ping-pong" between two or more qwait'ers.
871 889 */
872 890 do {
873 891 sq->sq_flags |= SQ_WANTEXWAKEUP;
874 892 if (is_sq_cioc == 0) {
875 893 SQ_PUTLOCKS_EXIT(sq);
876 894 }
877 895 cv_wait(&sq->sq_exitwait, SQLOCK(sq));
878 896 if (is_sq_cioc == 0) {
879 897 SQ_PUTLOCKS_ENTER(sq);
880 898 }
881 899 } while (sq->sq_flags & SQ_WANTEXWAKEUP);
882 900 if (is_sq_cioc == 0) {
883 901 SQ_PUTLOCKS_EXIT(sq);
884 902 }
885 903 mutex_exit(SQLOCK(sq));
886 904
887 905 /*
888 906 * Re-enter the perimeters again
889 907 */
890 908 entersq(sq, SQ_OPENCLOSE);
891 909 }
892 910
893 911 /*
894 912 * Used for the synchronous streams entrypoints when sleeping outside
895 913 * the perimeters. Must never be called from regular put entrypoint.
896 914 *
897 915 * There's no need to grab sq_putlocks here (which only exist for CIPUT sync
898 916 * queues). If it is CIPUT sync queue put entry points were not blocked in the
899 917 * first place by rwnext/infonext which are treated as put entrypoints for
900 918 * permiter syncronization purposes.
901 919 *
902 920 * Consolidation private.
903 921 */
904 922 boolean_t
905 923 qwait_rw(queue_t *q)
906 924 {
907 925 syncq_t *sq;
908 926 ulong_t flags;
909 927 boolean_t gotsignal = B_FALSE;
910 928
911 929 /*
912 930 * Perform the same operations as a leavesq(sq, SQ_PUT)
913 931 * while detecting all cases where the perimeter is entered
914 932 * so that qwait_rw can return to the caller.
915 933 *
916 934 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
917 935 * wait for a thread to leave the syncq.
918 936 */
919 937 sq = q->q_syncq;
920 938 ASSERT(sq);
921 939
922 940 mutex_enter(SQLOCK(sq));
923 941 flags = sq->sq_flags;
924 942 /*
925 943 * Drop SQ_EXCL and sq_count but hold the SQLOCK until to prevent any
926 944 * undetected entry and exit into the perimeter.
927 945 */
928 946 ASSERT(sq->sq_count > 0);
929 947 sq->sq_count--;
930 948 if (!(sq->sq_type & SQ_CIPUT)) {
931 949 ASSERT(flags & SQ_EXCL);
932 950 flags &= ~SQ_EXCL;
933 951 }
934 952 /*
935 953 * Unblock any thread blocked in an entersq or outer_enter.
936 954 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
937 955 * since that could lead to livelock with two threads in
938 956 * qwait for the same (per module) inner perimeter.
939 957 */
940 958 if (flags & SQ_WANTWAKEUP) {
941 959 cv_broadcast(&sq->sq_wait);
942 960 flags &= ~SQ_WANTWAKEUP;
943 961 }
944 962 sq->sq_flags = flags;
945 963 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
946 964 /* drain_syncq() drops SQLOCK */
947 965 drain_syncq(sq);
948 966 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
949 967 entersq(sq, SQ_PUT);
950 968 return (B_FALSE);
951 969 }
952 970 /*
953 971 * Sleep on sq_exitwait to only be woken up when threads leave the
954 972 * put or service procedures. We can not sleep on sq_wait since an
955 973 * outer_exit in a qwait running in the same outer perimeter would
956 974 * cause a livelock "ping-pong" between two or more qwait'ers.
957 975 */
958 976 do {
959 977 sq->sq_flags |= SQ_WANTEXWAKEUP;
960 978 if (cv_wait_sig(&sq->sq_exitwait, SQLOCK(sq)) <= 0) {
961 979 sq->sq_flags &= ~SQ_WANTEXWAKEUP;
962 980 gotsignal = B_TRUE;
963 981 break;
964 982 }
965 983 } while (sq->sq_flags & SQ_WANTEXWAKEUP);
966 984 mutex_exit(SQLOCK(sq));
967 985
968 986 /*
969 987 * Re-enter the perimeters again
970 988 */
971 989 entersq(sq, SQ_PUT);
972 990 return (gotsignal);
973 991 }
974 992
975 993 /*
976 994 * Asynchronously upgrade to exclusive access at either the inner or
977 995 * outer perimeter.
978 996 */
979 997 void
980 998 qwriter(queue_t *q, mblk_t *mp, void (*func)(), int perim)
981 999 {
982 1000 if (perim == PERIM_INNER)
983 1001 qwriter_inner(q, mp, func);
984 1002 else if (perim == PERIM_OUTER)
985 1003 qwriter_outer(q, mp, func);
986 1004 else
987 1005 panic("qwriter: wrong \"perimeter\" parameter");
988 1006 }
989 1007
990 1008 /*
991 1009 * Schedule a synchronous streams timeout
992 1010 */
993 1011 timeout_id_t
994 1012 qtimeout(queue_t *q, void (*func)(void *), void *arg, clock_t tim)
995 1013 {
996 1014 syncq_t *sq;
997 1015 callbparams_t *cbp;
998 1016 timeout_id_t tid;
999 1017
1000 1018 sq = q->q_syncq;
1001 1019 /*
1002 1020 * you don't want the timeout firing before its params are set up
1003 1021 * callbparams_alloc() acquires SQLOCK(sq)
1004 1022 * qtimeout() can't fail and can't sleep, so panic if memory is not
1005 1023 * available.
1006 1024 */
1007 1025 cbp = callbparams_alloc(sq, func, arg, KM_NOSLEEP | KM_PANIC);
1008 1026 /*
1009 1027 * the callbflags in the sq use the same flags. They get anded
1010 1028 * in the callbwrapper to determine if a qun* of this callback type
1011 1029 * is required. This is not a request to cancel.
1012 1030 */
1013 1031 cbp->cbp_flags = SQ_CANCEL_TOUT;
1014 1032 /* check new timeout version return codes */
1015 1033 tid = timeout(qcallbwrapper, cbp, tim);
1016 1034 cbp->cbp_id = (callbparams_id_t)tid;
1017 1035 mutex_exit(SQLOCK(sq));
1018 1036 /* use local id because the cbp memory could be free by now */
1019 1037 return (tid);
1020 1038 }
1021 1039
1022 1040 bufcall_id_t
1023 1041 qbufcall(queue_t *q, size_t size, uint_t pri, void (*func)(void *), void *arg)
1024 1042 {
1025 1043 syncq_t *sq;
1026 1044 callbparams_t *cbp;
1027 1045 bufcall_id_t bid;
1028 1046
1029 1047 sq = q->q_syncq;
1030 1048 /*
1031 1049 * you don't want the timeout firing before its params are set up
1032 1050 * callbparams_alloc() acquires SQLOCK(sq) if successful.
1033 1051 */
1034 1052 cbp = callbparams_alloc(sq, func, arg, KM_NOSLEEP);
1035 1053 if (cbp == NULL)
1036 1054 return ((bufcall_id_t)0);
1037 1055
1038 1056 /*
1039 1057 * the callbflags in the sq use the same flags. They get anded
1040 1058 * in the callbwrapper to determine if a qun* of this callback type
1041 1059 * is required. This is not a request to cancel.
1042 1060 */
1043 1061 cbp->cbp_flags = SQ_CANCEL_BUFCALL;
1044 1062 /* check new timeout version return codes */
1045 1063 bid = bufcall(size, pri, qcallbwrapper, cbp);
1046 1064 cbp->cbp_id = (callbparams_id_t)bid;
1047 1065 if (bid == 0) {
1048 1066 callbparams_free(sq, cbp);
1049 1067 }
1050 1068 mutex_exit(SQLOCK(sq));
1051 1069 /* use local id because the params memory could be free by now */
1052 1070 return (bid);
1053 1071 }
1054 1072
1055 1073 /*
1056 1074 * cancel a timeout callback which enters the inner perimeter.
1057 1075 * cancelling of all callback types on a given syncq is serialized.
1058 1076 * the SQ_CALLB_BYPASSED flag indicates that the callback fn did
1059 1077 * not execute. The quntimeout return value needs to reflect this.
1060 1078 * As with out existing callback programming model - callbacks must
1061 1079 * be cancelled before a close completes - so ensuring that the sq
1062 1080 * is valid when the callback wrapper is executed.
1063 1081 */
1064 1082 clock_t
1065 1083 quntimeout(queue_t *q, timeout_id_t id)
1066 1084 {
1067 1085 syncq_t *sq = q->q_syncq;
1068 1086 clock_t ret;
1069 1087
1070 1088 mutex_enter(SQLOCK(sq));
1071 1089 /* callbacks are processed serially on each syncq */
1072 1090 while (sq->sq_callbflags & SQ_CALLB_CANCEL_MASK) {
1073 1091 sq->sq_flags |= SQ_WANTWAKEUP;
1074 1092 cv_wait(&sq->sq_wait, SQLOCK(sq));
1075 1093 }
1076 1094 sq->sq_cancelid = (callbparams_id_t)id;
1077 1095 sq->sq_callbflags = SQ_CANCEL_TOUT;
1078 1096 if (sq->sq_flags & SQ_WANTWAKEUP) {
1079 1097 cv_broadcast(&sq->sq_wait);
1080 1098 sq->sq_flags &= ~SQ_WANTWAKEUP;
1081 1099 }
1082 1100 mutex_exit(SQLOCK(sq));
1083 1101 ret = untimeout(id);
1084 1102 mutex_enter(SQLOCK(sq));
1085 1103 if (ret != -1) {
1086 1104 /* The wrapper was never called - need to free based on id */
1087 1105 callbparams_free_id(sq, (callbparams_id_t)id, SQ_CANCEL_TOUT);
1088 1106 }
1089 1107 if (sq->sq_callbflags & SQ_CALLB_BYPASSED) {
1090 1108 ret = 0; /* this was how much time left */
1091 1109 }
1092 1110 sq->sq_callbflags = 0;
1093 1111 if (sq->sq_flags & SQ_WANTWAKEUP) {
1094 1112 cv_broadcast(&sq->sq_wait);
1095 1113 sq->sq_flags &= ~SQ_WANTWAKEUP;
1096 1114 }
1097 1115 mutex_exit(SQLOCK(sq));
1098 1116 return (ret);
1099 1117 }
1100 1118
1101 1119
1102 1120 void
1103 1121 qunbufcall(queue_t *q, bufcall_id_t id)
1104 1122 {
1105 1123 syncq_t *sq = q->q_syncq;
1106 1124
1107 1125 mutex_enter(SQLOCK(sq));
1108 1126 /* callbacks are processed serially on each syncq */
1109 1127 while (sq->sq_callbflags & SQ_CALLB_CANCEL_MASK) {
1110 1128 sq->sq_flags |= SQ_WANTWAKEUP;
1111 1129 cv_wait(&sq->sq_wait, SQLOCK(sq));
1112 1130 }
1113 1131 sq->sq_cancelid = (callbparams_id_t)id;
1114 1132 sq->sq_callbflags = SQ_CANCEL_BUFCALL;
1115 1133 if (sq->sq_flags & SQ_WANTWAKEUP) {
1116 1134 cv_broadcast(&sq->sq_wait);
1117 1135 sq->sq_flags &= ~SQ_WANTWAKEUP;
1118 1136 }
1119 1137 mutex_exit(SQLOCK(sq));
1120 1138 unbufcall(id);
1121 1139 mutex_enter(SQLOCK(sq));
1122 1140 /*
1123 1141 * No indication from unbufcall if the callback has already run.
1124 1142 * Always attempt to free it.
1125 1143 */
1126 1144 callbparams_free_id(sq, (callbparams_id_t)id, SQ_CANCEL_BUFCALL);
1127 1145 sq->sq_callbflags = 0;
1128 1146 if (sq->sq_flags & SQ_WANTWAKEUP) {
1129 1147 cv_broadcast(&sq->sq_wait);
1130 1148 sq->sq_flags &= ~SQ_WANTWAKEUP;
1131 1149 }
1132 1150 mutex_exit(SQLOCK(sq));
1133 1151 }
1134 1152
1135 1153 /*
1136 1154 * Associate the stream with an instance of the bottom driver. This
1137 1155 * function is called by APIs that establish or modify the hardware
1138 1156 * association (ppa) of an open stream. Two examples of such
1139 1157 * post-open(9E) APIs are the dlpi(7p) DL_ATTACH_REQ message, and the
1140 1158 * ndd(1M) "instance=" ioctl(2). This interface may be called from a
1141 1159 * stream driver's wput procedure and from within syncq perimeters,
1142 1160 * so it can't block.
1143 1161 *
1144 1162 * The qassociate() "model" is that it should drive attach(9E), yet it
1145 1163 * can't really do that because driving attach(9E) is a blocking
1146 1164 * operation. Instead, the qassociate() implementation has complex
1147 1165 * dependencies on the implementation behavior of other parts of the
1148 1166 * kernel to ensure all appropriate instances (ones that have not been
1149 1167 * made inaccessible by DR) are attached at stream open() time, and
1150 1168 * that they will not autodetach. The code relies on the fact that an
1151 1169 * open() of a stream that ends up using qassociate() always occurs on
1152 1170 * a minor node created with CLONE_DEV. The open() comes through
1153 1171 * clnopen() and since clnopen() calls ddi_hold_installed_driver() we
1154 1172 * attach all instances and mark them DN_NO_AUTODETACH (given
1155 1173 * DN_DRIVER_HELD is maintained correctly).
1156 1174 *
1157 1175 * Since qassociate() can't really drive attach(9E), there are corner
1158 1176 * cases where the compromise described above leads to qassociate()
1159 1177 * returning failure. This can happen when administrative functions
1160 1178 * that cause detach(9E), such as "update_drv" or "modunload -i", are
1161 1179 * performed on the driver between the time the stream was opened and
1162 1180 * the time its hardware association was established. Although this can
1163 1181 * theoretically be an arbitrary amount of time, in practice the window
1164 1182 * is usually quite small, since applications almost always issue their
1165 1183 * hardware association request immediately after opening the stream,
1166 1184 * and do not typically switch association while open. When these
1167 1185 * corner cases occur, and qassociate() finds the requested instance
1168 1186 * detached, it will return failure. This failure should be propagated
1169 1187 * to the requesting administrative application using the appropriate
1170 1188 * post-open(9E) API error mechanism.
1171 1189 *
1172 1190 * All qassociate() callers are expected to check for and gracefully handle
1173 1191 * failure return, propagating errors back to the requesting administrative
1174 1192 * application.
1175 1193 */
1176 1194 int
1177 1195 qassociate(queue_t *q, int instance)
1178 1196 {
1179 1197 vnode_t *vp;
1180 1198 major_t major;
1181 1199 dev_info_t *dip;
1182 1200
1183 1201 if (instance == -1) {
1184 1202 ddi_assoc_queue_with_devi(q, NULL);
1185 1203 return (0);
1186 1204 }
1187 1205
1188 1206 vp = STREAM(q)->sd_vnode;
1189 1207 major = getmajor(vp->v_rdev);
1190 1208 dip = ddi_hold_devi_by_instance(major, instance,
1191 1209 E_DDI_HOLD_DEVI_NOATTACH);
1192 1210 if (dip == NULL)
1193 1211 return (-1);
1194 1212
1195 1213 ddi_assoc_queue_with_devi(q, dip);
1196 1214 ddi_release_devi(dip);
1197 1215 return (0);
1198 1216 }
1199 1217
1200 1218 /*
1201 1219 * This routine is the SVR4MP 'replacement' for
1202 1220 * hat_getkpfnum. The only major difference is
1203 1221 * the return value for illegal addresses - since
1204 1222 * sunm_getkpfnum() and srmmu_getkpfnum() both
1205 1223 * return '-1' for bogus mappings, we can (more or
1206 1224 * less) return the value directly.
1207 1225 */
1208 1226 ppid_t
1209 1227 kvtoppid(caddr_t addr)
1210 1228 {
1211 1229 return ((ppid_t)hat_getpfnum(kas.a_hat, addr));
1212 1230 }
1213 1231
1214 1232 /*
1215 1233 * This is used to set the timeout value for cv_timed_wait() or
1216 1234 * cv_timedwait_sig().
1217 1235 */
1218 1236 void
1219 1237 time_to_wait(clock_t *now, clock_t time)
1220 1238 {
1221 1239 *now = ddi_get_lbolt() + time;
1222 1240 }
↓ open down ↓ |
750 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX