Print this page
XXXX pass in cpu_pause_func via pause_cpus
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/prom_subr.c
+++ new/usr/src/uts/sun4/os/prom_subr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #pragma ident "%Z%%M% %I% %E% SMI"
27 27
28 28 #include <sys/types.h>
29 29 #include <sys/param.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/mutex.h>
32 32 #include <sys/systm.h>
33 33 #include <sys/sysmacros.h>
34 34 #include <sys/machsystm.h>
35 35 #include <sys/archsystm.h>
36 36 #include <sys/x_call.h>
37 37 #include <sys/promif.h>
38 38 #include <sys/prom_isa.h>
39 39 #include <sys/privregs.h>
40 40 #include <sys/vmem.h>
41 41 #include <sys/atomic.h>
42 42 #include <sys/panic.h>
43 43 #include <sys/rwlock.h>
44 44 #include <sys/reboot.h>
45 45 #include <sys/kdi.h>
46 46 #include <sys/kdi_machimpl.h>
47 47
48 48 /*
49 49 * We are called with a pointer to a cell-sized argument array.
50 50 * The service name (the first element of the argument array) is
51 51 * the name of the callback being invoked. When called, we are
52 52 * running on the firmwares trap table as a trusted subroutine
53 53 * of the firmware.
54 54 *
55 55 * We define entry points to allow callback handlers to be dynamically
56 56 * added and removed, to support obpsym, which is a separate module
57 57 * and can be dynamically loaded and unloaded and registers its
58 58 * callback handlers dynamically.
59 59 *
60 60 * Note: The actual callback handler we register, is the assembly lang.
61 61 * glue, callback_handler, which takes care of switching from a 64
62 62 * bit stack and environment to a 32 bit stack and environment, and
63 63 * back again, if the callback handler returns. callback_handler calls
64 64 * vx_handler to process the callback.
65 65 */
66 66
67 67 static kmutex_t vx_cmd_lock; /* protect vx_cmd table */
68 68
69 69 #define VX_CMD_MAX 10
70 70 #define ENDADDR(a) &a[sizeof (a) / sizeof (a[0])]
71 71 #define vx_cmd_end ((struct vx_cmd *)(ENDADDR(vx_cmd)))
72 72
73 73 static struct vx_cmd {
74 74 char *service; /* Service name */
75 75 int take_tba; /* If Non-zero we take over the tba */
76 76 void (*func)(cell_t *argument_array);
77 77 } vx_cmd[VX_CMD_MAX+1];
78 78
79 79 void
80 80 init_vx_handler(void)
81 81 {
82 82 extern int callback_handler(cell_t *arg_array);
83 83
84 84 /*
85 85 * initialize the lock protecting additions and deletions from
86 86 * the vx_cmd table. At callback time we don't need to grab
87 87 * this lock. Callback handlers do not need to modify the
88 88 * callback handler table.
89 89 */
90 90 mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
91 91
92 92 /*
93 93 * Tell OBP about our callback handler.
94 94 */
95 95 (void) prom_set_callback((void *)callback_handler);
96 96 }
97 97
98 98 /*
99 99 * Add a kernel callback handler to the kernel's list.
100 100 * The table is static, so if you add a callback handler, increase
101 101 * the value of VX_CMD_MAX. Find the first empty slot and use it.
102 102 */
103 103 void
104 104 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
105 105 {
106 106 struct vx_cmd *vp;
107 107
108 108 mutex_enter(&vx_cmd_lock);
109 109 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
110 110 if (vp->service == NULL) {
111 111 vp->service = name;
112 112 vp->take_tba = flag;
113 113 vp->func = func;
114 114 mutex_exit(&vx_cmd_lock);
115 115 return;
116 116 }
117 117 }
118 118 mutex_exit(&vx_cmd_lock);
119 119
120 120 #ifdef DEBUG
121 121
122 122 /*
123 123 * There must be enough entries to handle all callback entries.
124 124 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
125 125 */
126 126 cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
127 127 /* NOTREACHED */
128 128
129 129 #else /* DEBUG */
130 130
131 131 cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
132 132 name);
133 133
134 134 #endif /* DEBUG */
135 135
136 136 }
137 137
138 138 /*
139 139 * Remove a vx_handler function -- find the name string in the table,
140 140 * and clear it.
141 141 */
142 142 void
143 143 remove_vx_handler(char *name)
144 144 {
145 145 struct vx_cmd *vp;
146 146
147 147 mutex_enter(&vx_cmd_lock);
148 148 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
149 149 if (vp->service == NULL)
150 150 continue;
151 151 if (strcmp(vp->service, name) != 0)
152 152 continue;
153 153 vp->service = 0;
154 154 vp->take_tba = 0;
155 155 vp->func = 0;
156 156 mutex_exit(&vx_cmd_lock);
157 157 return;
158 158 }
159 159 mutex_exit(&vx_cmd_lock);
160 160 cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
161 161 }
162 162
163 163 int
164 164 vx_handler(cell_t *argument_array)
165 165 {
166 166 char *name;
167 167 struct vx_cmd *vp;
168 168 void *old_tba;
169 169
170 170 name = p1275_cell2ptr(*argument_array);
171 171
172 172 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
173 173 if (vp->service == (char *)0)
174 174 continue;
175 175 if (strcmp(vp->service, name) != 0)
176 176 continue;
177 177 if (vp->take_tba != 0) {
178 178 reestablish_curthread();
179 179 if (tba_taken_over != 0)
180 180 old_tba = set_tba((void *)&trap_table);
181 181 }
182 182 vp->func(argument_array);
183 183 if ((vp->take_tba != 0) && (tba_taken_over != 0))
184 184 (void) set_tba(old_tba);
185 185 return (0); /* Service name was known */
186 186 }
187 187
188 188 return (-1); /* Service name unknown */
189 189 }
190 190
191 191 /*
192 192 * PROM Locking Primitives
193 193 *
194 194 * These routines are called immediately before and immediately after calling
195 195 * into the firmware. The firmware is single-threaded and assumes that the
196 196 * kernel will implement locking to prevent simultaneous service calls. In
197 197 * addition, some service calls (particularly character rendering) can be
198 198 * slow, so we would like to sleep if we cannot acquire the lock to allow the
199 199 * caller's CPU to continue to perform useful work in the interim. Service
200 200 * routines may also be called early in boot as part of slave CPU startup
201 201 * when mutexes and cvs are not yet available (i.e. they are still running on
202 202 * the prom's TLB handlers and cannot touch curthread). Therefore, these
203 203 * routines must reduce to a simple compare-and-swap spin lock when necessary.
204 204 * Finally, kernel code may wish to acquire the firmware lock before executing
205 205 * a block of code that includes service calls, so we also allow the firmware
206 206 * lock to be acquired recursively by the owning CPU after disabling preemption.
207 207 *
208 208 * To meet these constraints, the lock itself is implemented as a compare-and-
209 209 * swap spin lock on the global prom_cpu pointer. We implement recursion by
210 210 * atomically incrementing the integer prom_holdcnt after acquiring the lock.
211 211 * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
212 212 * we disable preemption before acquiring the lock and leave it disabled once
213 213 * the lock is held. The kern_postprom() routine then enables preemption if
214 214 * we drop the lock and prom_holdcnt returns to zero. If the current CPU is
215 215 * an adult and the lock is held by another adult CPU, we can safely sleep
216 216 * until the lock is released. To do so, we acquire the adaptive prom_mutex
217 217 * and then sleep on prom_cv. Therefore, service routines must not be called
218 218 * from above LOCK_LEVEL on any adult CPU. Finally, if recursive entry is
219 219 * attempted on an adult CPU, we must also verify that curthread matches the
220 220 * saved prom_thread (the original owner) to ensure that low-level interrupt
221 221 * threads do not step on other threads running on the same CPU.
222 222 */
223 223
224 224 static cpu_t *volatile prom_cpu;
225 225 static kthread_t *volatile prom_thread;
226 226 static uint32_t prom_holdcnt;
227 227 static kmutex_t prom_mutex;
228 228 static kcondvar_t prom_cv;
229 229
230 230 /*
231 231 * The debugger uses PROM services, and is thus unable to run if any of the
232 232 * CPUs on the system are executing in the PROM at the time of debugger entry.
233 233 * If a CPU is determined to be in the PROM when the debugger is entered,
234 234 * prom_return_enter_debugger will be set, thus triggering a programmed debugger
235 235 * entry when the given CPU returns from the PROM. That CPU is then released by
236 236 * the debugger, and is allowed to complete PROM-related work.
237 237 */
238 238 int prom_exit_enter_debugger;
239 239
240 240 void
241 241 kern_preprom(void)
242 242 {
243 243 for (;;) {
244 244 /*
245 245 * Load the current CPU pointer and examine the mutex_ready bit.
246 246 * It doesn't matter if we are preempted here because we are
247 247 * only trying to determine if we are in the *set* of mutex
248 248 * ready CPUs. We cannot disable preemption until we confirm
249 249 * that we are running on a CPU in this set, since a call to
250 250 * kpreempt_disable() requires access to curthread.
251 251 */
252 252 processorid_t cpuid = getprocessorid();
253 253 cpu_t *cp = cpu[cpuid];
254 254 cpu_t *prcp;
255 255
256 256 if (panicstr)
257 257 return; /* just return if we are currently panicking */
258 258
259 259 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
260 260 /*
261 261 * Disable premption, and reload the current CPU. We
262 262 * can't move from a mutex_ready cpu to a non-ready cpu
263 263 * so we don't need to re-check cp->cpu_m.mutex_ready.
264 264 */
265 265 kpreempt_disable();
266 266 cp = CPU;
267 267 ASSERT(cp->cpu_m.mutex_ready);
268 268
269 269 /*
270 270 * Try the lock. If we don't get the lock, re-enable
271 271 * preemption and see if we should sleep. If we are
272 272 * already the lock holder, remove the effect of the
273 273 * previous kpreempt_disable() before returning since
274 274 * preemption was disabled by an earlier kern_preprom.
275 275 */
276 276 prcp = casptr((void *)&prom_cpu, NULL, cp);
277 277 if (prcp == NULL ||
278 278 (prcp == cp && prom_thread == curthread)) {
279 279 if (prcp == cp)
280 280 kpreempt_enable();
281 281 break;
282 282 }
283 283
284 284 kpreempt_enable();
285 285
286 286 /*
287 287 * We have to be very careful here since both prom_cpu
288 288 * and prcp->cpu_m.mutex_ready can be changed at any
289 289 * time by a non mutex_ready cpu holding the lock.
290 290 * If the owner is mutex_ready, holding prom_mutex
291 291 * prevents kern_postprom() from completing. If the
292 292 * owner isn't mutex_ready, we only know it will clear
293 293 * prom_cpu before changing cpu_m.mutex_ready, so we
294 294 * issue a membar after checking mutex_ready and then
295 295 * re-verify that prom_cpu is still held by the same
296 296 * cpu before actually proceeding to cv_wait().
297 297 */
298 298 mutex_enter(&prom_mutex);
299 299 prcp = prom_cpu;
300 300 if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
301 301 membar_consumer();
302 302 if (prcp == prom_cpu)
303 303 cv_wait(&prom_cv, &prom_mutex);
304 304 }
305 305 mutex_exit(&prom_mutex);
306 306
307 307 } else {
308 308 /*
309 309 * If we are not yet mutex_ready, just attempt to grab
310 310 * the lock. If we get it or already hold it, break.
311 311 */
312 312 ASSERT(getpil() == PIL_MAX);
313 313 prcp = casptr((void *)&prom_cpu, NULL, cp);
314 314 if (prcp == NULL || prcp == cp)
315 315 break;
316 316 }
317 317 }
318 318
319 319 /*
320 320 * We now hold the prom_cpu lock. Increment the hold count by one
321 321 * and assert our current state before returning to the caller.
322 322 */
323 323 atomic_add_32(&prom_holdcnt, 1);
324 324 ASSERT(prom_holdcnt >= 1);
325 325 prom_thread = curthread;
326 326 }
327 327
328 328 /*
329 329 * Drop the prom lock if it is held by the current CPU. If the lock is held
330 330 * recursively, return without clearing prom_cpu. If the hold count is now
331 331 * zero, clear prom_cpu and cv_signal any waiting CPU.
332 332 */
333 333 void
334 334 kern_postprom(void)
335 335 {
336 336 processorid_t cpuid = getprocessorid();
337 337 cpu_t *cp = cpu[cpuid];
338 338
339 339 if (panicstr)
340 340 return; /* do not modify lock further if we have panicked */
341 341
342 342 if (prom_cpu != cp)
343 343 panic("kern_postprom: not owner, cp=%p owner=%p",
344 344 (void *)cp, (void *)prom_cpu);
345 345
346 346 if (prom_holdcnt == 0)
347 347 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
348 348 (void *)prom_cpu);
349 349
350 350 if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
351 351 return; /* prom lock is held recursively by this CPU */
352 352
353 353 if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
354 354 kmdb_enter();
355 355
356 356 prom_thread = NULL;
357 357 membar_producer();
358 358
359 359 prom_cpu = NULL;
360 360 membar_producer();
361 361
362 362 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
363 363 mutex_enter(&prom_mutex);
364 364 cv_signal(&prom_cv);
365 365 mutex_exit(&prom_mutex);
366 366 kpreempt_enable();
367 367 }
368 368 }
369 369
370 370 /*
371 371 * If the frame buffer device is busy, briefly capture the other CPUs so that
372 372 * another CPU executing code to manipulate the device does not execute at the
373 373 * same time we are rendering characters. Refer to the comments and code in
374 374 * common/os/console.c for more information on these callbacks.
375 375 *
376 376 * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
377 377 * to idling other CPUs. The idling mechanism will cross-trap the other CPUs
378 378 * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
379 379 * them are holding the PROM lock before we idle them and then call into the
380 380 * PROM routines that render characters to the frame buffer.
381 381 */
382 382 int
383 383 console_enter(int busy)
384 384 {
385 385 int s = 0;
386 386
387 387 if (busy && panicstr == NULL) {
388 388 kern_preprom();
389 389 s = splhi();
390 390 idle_other_cpus();
391 391 }
392 392
393 393 return (s);
394 394 }
395 395
396 396 void
397 397 console_exit(int busy, int spl)
398 398 {
399 399 if (busy && panicstr == NULL) {
400 400 resume_other_cpus();
401 401 splx(spl);
402 402 kern_postprom();
↓ open down ↓ |
402 lines elided |
↑ open up ↑ |
403 403 }
404 404 }
405 405
406 406 /*
407 407 * This routine is a special form of pause_cpus(). It ensures that
408 408 * prom functions are callable while the cpus are paused.
409 409 */
410 410 void
411 411 promsafe_pause_cpus(void)
412 412 {
413 - pause_cpus(NULL);
413 + pause_cpus(NULL, NULL);
414 414
415 415 /* If some other cpu is entering or is in the prom, spin */
416 416 while (prom_cpu || mutex_owner(&prom_mutex)) {
417 417
418 418 start_cpus();
419 419 mutex_enter(&prom_mutex);
420 420
421 421 /* Wait for other cpu to exit prom */
422 422 while (prom_cpu)
423 423 cv_wait(&prom_cv, &prom_mutex);
424 424
425 425 mutex_exit(&prom_mutex);
426 - pause_cpus(NULL);
426 + pause_cpus(NULL, NULL);
427 427 }
428 428
429 429 /* At this point all cpus are paused and none are in the prom */
430 430 }
431 431
432 432 /*
433 433 * This routine is a special form of xc_attention(). It ensures that
434 434 * prom functions are callable while the cpus are at attention.
435 435 */
436 436 void
437 437 promsafe_xc_attention(cpuset_t cpuset)
438 438 {
439 439 xc_attention(cpuset);
440 440
441 441 /* If some other cpu is entering or is in the prom, spin */
442 442 while (prom_cpu || mutex_owner(&prom_mutex)) {
443 443
444 444 xc_dismissed(cpuset);
445 445 mutex_enter(&prom_mutex);
446 446
447 447 /* Wait for other cpu to exit prom */
448 448 while (prom_cpu)
449 449 cv_wait(&prom_cv, &prom_mutex);
450 450
451 451 mutex_exit(&prom_mutex);
452 452 xc_attention(cpuset);
453 453 }
454 454
455 455 /* At this point all cpus are paused and none are in the prom */
456 456 }
457 457
458 458
459 459 #if defined(PROM_32BIT_ADDRS)
460 460
461 461 #include <sys/promimpl.h>
462 462 #include <vm/seg_kmem.h>
463 463 #include <sys/kmem.h>
464 464 #include <sys/bootconf.h>
465 465
466 466 /*
467 467 * These routines are only used to workaround "poor feature interaction"
468 468 * in OBP. See bug 4115680 for details.
469 469 *
470 470 * Many of the promif routines need to allocate temporary buffers
471 471 * with 32-bit addresses to pass in/out of the CIF. The lifetime
472 472 * of the buffers is extremely short, they are allocated and freed
473 473 * around the CIF call. We use vmem_alloc() to cache 32-bit memory.
474 474 *
475 475 * Note the code in promplat_free() to prevent exhausting the 32 bit
476 476 * heap during boot.
477 477 */
478 478 static void *promplat_last_free = NULL;
479 479 static size_t promplat_last_size;
480 480 static vmem_t *promplat_arena;
481 481 static kmutex_t promplat_lock; /* protect arena, last_free, and last_size */
482 482
483 483 void *
484 484 promplat_alloc(size_t size)
485 485 {
486 486
487 487 mutex_enter(&promplat_lock);
488 488 if (promplat_arena == NULL) {
489 489 promplat_arena = vmem_create("promplat", NULL, 0, 8,
490 490 segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
491 491 }
492 492 mutex_exit(&promplat_lock);
493 493
494 494 return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
495 495 }
496 496
497 497 /*
498 498 * Delaying the free() of small allocations gets more mileage
499 499 * from pages during boot, otherwise a cycle of allocate/free
500 500 * calls could burn through available heap32 space too quickly.
501 501 */
502 502 void
503 503 promplat_free(void *p, size_t size)
504 504 {
505 505 void *p2 = NULL;
506 506 size_t s2;
507 507
508 508 /*
509 509 * If VM is initialized, clean up any delayed free().
510 510 */
511 511 if (kvseg.s_base != 0 && promplat_last_free != NULL) {
512 512 mutex_enter(&promplat_lock);
513 513 p2 = promplat_last_free;
514 514 s2 = promplat_last_size;
515 515 promplat_last_free = NULL;
516 516 promplat_last_size = 0;
517 517 mutex_exit(&promplat_lock);
518 518 if (p2 != NULL) {
519 519 vmem_free(promplat_arena, p2, s2);
520 520 p2 = NULL;
521 521 }
522 522 }
523 523
524 524 /*
525 525 * Do the free if VM is initialized or it's a large allocation.
526 526 */
527 527 if (kvseg.s_base != 0 || size >= PAGESIZE) {
528 528 vmem_free(promplat_arena, p, size);
529 529 return;
530 530 }
531 531
532 532 /*
533 533 * Otherwise, do the last free request and delay this one.
534 534 */
535 535 mutex_enter(&promplat_lock);
536 536 if (promplat_last_free != NULL) {
537 537 p2 = promplat_last_free;
538 538 s2 = promplat_last_size;
539 539 }
540 540 promplat_last_free = p;
541 541 promplat_last_size = size;
542 542 mutex_exit(&promplat_lock);
543 543
544 544 if (p2 != NULL)
545 545 vmem_free(promplat_arena, p2, s2);
546 546 }
547 547
548 548 void
549 549 promplat_bcopy(const void *src, void *dst, size_t count)
550 550 {
551 551 bcopy(src, dst, count);
552 552 }
553 553
554 554 #endif /* PROM_32BIT_ADDRS */
555 555
556 556 static prom_generation_cookie_t prom_tree_gen;
557 557 static krwlock_t prom_tree_lock;
558 558
559 559 int
560 560 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
561 561 prom_generation_cookie_t *ckp)
562 562 {
563 563 int chg, rv;
564 564
565 565 rw_enter(&prom_tree_lock, RW_READER);
566 566 /*
567 567 * If the tree has changed since the caller last accessed it
568 568 * pass 1 as the second argument to the callback function,
569 569 * otherwise 0.
570 570 */
571 571 if (ckp != NULL && *ckp != prom_tree_gen) {
572 572 *ckp = prom_tree_gen;
573 573 chg = 1;
574 574 } else
575 575 chg = 0;
576 576 rv = callback(arg, chg);
577 577 rw_exit(&prom_tree_lock);
578 578 return (rv);
579 579 }
580 580
581 581 int
582 582 prom_tree_update(int (*callback)(void *arg), void *arg)
583 583 {
584 584 int rv;
585 585
586 586 rw_enter(&prom_tree_lock, RW_WRITER);
587 587 prom_tree_gen++;
588 588 rv = callback(arg);
589 589 rw_exit(&prom_tree_lock);
590 590 return (rv);
591 591 }
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX