Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86xpv/io/psm/xpv_uppc.c
+++ new/usr/src/uts/i86xpv/io/psm/xpv_uppc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #define PSMI_1_7
28 28
29 29 #include <sys/mutex.h>
30 30 #include <sys/types.h>
31 31 #include <sys/time.h>
32 32 #include <sys/clock.h>
33 33 #include <sys/machlock.h>
34 34 #include <sys/smp_impldefs.h>
35 35 #include <sys/uadmin.h>
36 36 #include <sys/promif.h>
37 37 #include <sys/psm.h>
38 38 #include <sys/psm_common.h>
39 39 #include <sys/atomic.h>
40 40 #include <sys/archsystm.h>
41 41 #include <sys/mach_intr.h>
42 42 #include <sys/hypervisor.h>
43 43 #include <sys/evtchn_impl.h>
44 44 #include <sys/modctl.h>
45 45 #include <sys/trap.h>
46 46 #include <sys/panic.h>
47 47
48 48 #include <xen/public/vcpu.h>
49 49 #include <xen/public/physdev.h>
50 50
51 51
52 52 /*
53 53 * Global Data
54 54 */
55 55 int xen_uppc_use_acpi = 1; /* Use ACPI by default */
56 56 int xen_uppc_enable_acpi = 0;
57 57
58 58 static int xen_clock_irq = -1;
59 59
60 60 /*
61 61 * For interrupt link devices, if xen_uppc_unconditional_srs is set, an irq
62 62 * resource will be assigned (via _SRS). If it is not set, use the current
63 63 * irq setting (via _CRS), but only if that irq is in the set of possible
64 64 * irqs (returned by _PRS) for the device.
65 65 */
66 66 int xen_uppc_unconditional_srs = 1;
67 67
68 68 /*
69 69 * For interrupt link devices, if xen_uppc_prefer_crs is set when we are
70 70 * assigning an IRQ resource to a device, prefer the current IRQ setting
71 71 * over other possible irq settings under same conditions.
72 72 */
73 73 int xen_uppc_prefer_crs = 1;
74 74
75 75 int xen_uppc_verbose = 0;
76 76
77 77 /* flag definitions for xen_uppc_verbose */
78 78 #define XEN_UPPC_VERBOSE_IRQ_FLAG 0x00000001
79 79 #define XEN_UPPC_VERBOSE_POWEROFF_FLAG 0x00000002
80 80 #define XEN_UPPC_VERBOSE_POWEROFF_PAUSE_FLAG 0x00000004
81 81
82 82 #define XEN_UPPC_VERBOSE_IRQ(fmt) \
83 83 if (xen_uppc_verbose & XEN_UPPC_VERBOSE_IRQ_FLAG) \
84 84 cmn_err fmt;
85 85
86 86 #define XEN_UPPC_VERBOSE_POWEROFF(fmt) \
87 87 if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_FLAG) \
88 88 prom_printf fmt;
89 89
90 90 uchar_t xen_uppc_reserved_irqlist[MAX_ISA_IRQ + 1];
91 91
92 92 static uint16_t xen_uppc_irq_shared_table[MAX_ISA_IRQ + 1];
93 93
94 94 /*
95 95 * Contains SCI irqno from FADT after initialization
96 96 */
97 97 static int xen_uppc_sci = -1;
98 98
99 99 static struct psm_info xen_uppc_info;
100 100
101 101 /*
102 102 * Local support routines
103 103 */
104 104
105 105 static int
106 106 xen_uppc_init_acpi(void)
107 107 {
108 108 int verboseflags = 0;
109 109 int sci;
110 110 iflag_t sci_flags;
111 111
112 112 /*
113 113 * Process SCI configuration here; this may return
114 114 * an error if acpi-user-options has specified
115 115 * legacy mode (use ACPI without ACPI mode or SCI)
116 116 */
117 117 if (acpica_get_sci(&sci, &sci_flags) != AE_OK)
118 118 sci = -1;
119 119
120 120 /*
121 121 * Initialize sub-system - if error is returns, ACPI is not
122 122 * used.
123 123 */
124 124 if (acpica_init() != AE_OK)
125 125 return (0);
126 126
127 127 /*
128 128 * uppc implies system is in PIC mode; set edge/level
129 129 * via ELCR based on return value from get_sci; this
130 130 * will default to level/low if no override present,
131 131 * as recommended by Intel ACPI CA team.
132 132 */
133 133 if (sci >= 0) {
134 134 ASSERT((sci_flags.intr_el == INTR_EL_LEVEL) ||
135 135 (sci_flags.intr_el == INTR_EL_EDGE));
136 136
137 137 psm_set_elcr(sci, sci_flags.intr_el == INTR_EL_LEVEL);
138 138 }
139 139
140 140 /*
141 141 * Remember SCI for later use
142 142 */
143 143 xen_uppc_sci = sci;
144 144
145 145 if (xen_uppc_verbose & XEN_UPPC_VERBOSE_IRQ_FLAG)
146 146 verboseflags |= PSM_VERBOSE_IRQ_FLAG;
147 147
148 148 if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_FLAG)
149 149 verboseflags |= PSM_VERBOSE_POWEROFF_FLAG;
150 150
151 151 if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_PAUSE_FLAG)
152 152 verboseflags |= PSM_VERBOSE_POWEROFF_PAUSE_FLAG;
153 153
154 154 if (acpi_psm_init(xen_uppc_info.p_mach_idstring, verboseflags) ==
155 155 ACPI_PSM_FAILURE) {
156 156 return (0);
157 157 }
158 158
159 159 return (1);
160 160 }
161 161
162 162 /*
163 163 * Autoconfiguration Routines
164 164 */
165 165
166 166 static int
167 167 xen_uppc_probe(void)
168 168 {
169 169
170 170 return (PSM_SUCCESS);
171 171 }
172 172
173 173 static void
174 174 xen_uppc_softinit(void)
175 175 {
176 176 int i;
177 177
178 178 /* LINTED logical expression always true: op "||" */
179 179 ASSERT((1 << EVTCHN_SHIFT) == NBBY * sizeof (ulong_t));
180 180 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
181 181 if (xen_uppc_use_acpi && xen_uppc_init_acpi()) {
182 182 build_reserved_irqlist((uchar_t *)
183 183 xen_uppc_reserved_irqlist);
184 184 for (i = 0; i <= MAX_ISA_IRQ; i++)
185 185 xen_uppc_irq_shared_table[i] = 0;
186 186 xen_uppc_enable_acpi = 1;
187 187 }
188 188 }
189 189 }
190 190
191 191
192 192 #define XEN_NSEC_PER_TICK 10 /* XXX - assume we have a 100 Mhz clock */
193 193
194 194 /*ARGSUSED*/
195 195 static int
196 196 xen_uppc_clkinit(int hertz)
197 197 {
198 198 extern enum tod_fault_type tod_fault(enum tod_fault_type, int);
199 199 extern int dosynctodr;
200 200
201 201 /*
202 202 * domU cannot set the TOD hardware, fault the TOD clock now to
203 203 * indicate that and turn off attempts to sync TOD hardware
204 204 * with the hires timer.
205 205 */
206 206 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
207 207 mutex_enter(&tod_lock);
208 208 (void) tod_fault(TOD_RDONLY, 0);
209 209 dosynctodr = 0;
210 210 mutex_exit(&tod_lock);
211 211 }
212 212 /*
213 213 * The hypervisor provides a timer based on the local APIC timer.
214 214 * The interface supports requests of nanosecond resolution.
215 215 * A common frequency of the apic clock is 100 Mhz which
216 216 * gives a resolution of 10 nsec per tick. What we would really like
217 217 * is a way to get the ns per tick value from xen.
218 218 * XXPV - This is an assumption that needs checking and may change
219 219 */
220 220 return (XEN_NSEC_PER_TICK);
221 221 }
222 222
223 223 static void
224 224 xen_uppc_picinit()
225 225 {
226 226 int irqno;
227 227
228 228 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
229 229 #if 0
230 230 /* hypervisor initializes the 8259, don't mess with it */
231 231 picsetup(); /* initialise the 8259 */
232 232 #endif
233 233 /*
234 234 * We never called xen_uppc_addspl() when the SCI
235 235 * interrupt was added because that happened before the
236 236 * PSM module was loaded. Fix that up here by doing
237 237 * any missed operations (e.g. bind to CPU)
238 238 */
239 239 if ((irqno = xen_uppc_sci) >= 0) {
240 240 ec_enable_irq(irqno);
241 241 }
242 242 }
243 243 }
↓ open down ↓ |
243 lines elided |
↑ open up ↑ |
244 244
245 245
246 246 /*ARGSUSED*/
247 247 static int
248 248 xen_uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
249 249 {
250 250 int ret = PSM_SUCCESS;
251 251 cpuset_t cpus;
252 252
253 253 if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
254 - atomic_add_16(&xen_uppc_irq_shared_table[irqno], 1);
254 + atomic_inc_16(&xen_uppc_irq_shared_table[irqno]);
255 255
256 256 /*
257 257 * We are called at splhi() so we can't call anything that might end
258 258 * up trying to context switch.
259 259 */
260 260 if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
261 261 DOMAIN_IS_INITDOMAIN(xen_info)) {
262 262 CPUSET_ZERO(cpus);
263 263 CPUSET_ADD(cpus, 0);
264 264 ec_setup_pirq(irqno, ipl, &cpus);
265 265 } else {
266 266 /*
267 267 * Set priority/affinity/enable for non PIRQs
268 268 */
269 269 ret = ec_set_irq_priority(irqno, ipl);
270 270 ASSERT(ret == 0);
271 271 CPUSET_ZERO(cpus);
272 272 CPUSET_ADD(cpus, 0);
273 273 ec_set_irq_affinity(irqno, cpus);
274 274 ec_enable_irq(irqno);
275 275 }
276 276
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
277 277 return (ret);
278 278 }
279 279
280 280 /*ARGSUSED*/
281 281 static int
282 282 xen_uppc_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
283 283 {
284 284 int err = PSM_SUCCESS;
285 285
286 286 if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
287 - atomic_add_16(&xen_uppc_irq_shared_table[irqno], -1);
287 + atomic_dec_16(&xen_uppc_irq_shared_table[irqno]);
288 288
289 289 if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
290 290 DOMAIN_IS_INITDOMAIN(xen_info)) {
291 291 if (max_ipl == PSM_INVALID_IPL) {
292 292 /*
293 293 * unbind if no more sharers of this irq/evtchn
294 294 */
295 295 (void) ec_block_irq(irqno);
296 296 ec_unbind_irq(irqno);
297 297 } else {
298 298 /*
299 299 * If still in use reset priority
300 300 */
301 301 err = ec_set_irq_priority(irqno, max_ipl);
302 302 }
303 303 } else {
304 304 (void) ec_block_irq(irqno);
305 305 ec_unbind_irq(irqno);
306 306 }
307 307 return (err);
308 308 }
309 309
310 310 static processorid_t
311 311 xen_uppc_get_next_processorid(processorid_t id)
312 312 {
313 313 if (id == -1)
314 314 return (0);
315 315 return (-1);
316 316 }
317 317
318 318 /*ARGSUSED*/
319 319 static int
320 320 xen_uppc_get_clockirq(int ipl)
321 321 {
322 322 if (xen_clock_irq != -1)
323 323 return (xen_clock_irq);
324 324
325 325 xen_clock_irq = ec_bind_virq_to_irq(VIRQ_TIMER, 0);
326 326 return (xen_clock_irq);
327 327 }
328 328
329 329 /*ARGSUSED*/
330 330 static void
331 331 xen_uppc_shutdown(int cmd, int fcn)
332 332 {
333 333 XEN_UPPC_VERBOSE_POWEROFF(("xen_uppc_shutdown(%d,%d);\n", cmd, fcn));
334 334
335 335 switch (cmd) {
336 336 case A_SHUTDOWN:
337 337 switch (fcn) {
338 338 case AD_BOOT:
339 339 case AD_IBOOT:
340 340 (void) HYPERVISOR_shutdown(SHUTDOWN_reboot);
341 341 break;
342 342 case AD_POWEROFF:
343 343 /* fall through if domU or if poweroff fails */
344 344 if (DOMAIN_IS_INITDOMAIN(xen_info))
345 345 if (xen_uppc_enable_acpi)
346 346 (void) acpi_poweroff();
347 347 /* FALLTHRU */
348 348 case AD_HALT:
349 349 default:
350 350 (void) HYPERVISOR_shutdown(SHUTDOWN_poweroff);
351 351 break;
352 352 }
353 353 break;
354 354 case A_REBOOT:
355 355 (void) HYPERVISOR_shutdown(SHUTDOWN_reboot);
356 356 break;
357 357 default:
358 358 return;
359 359 }
360 360 }
361 361
362 362
363 363 /*
364 364 * This function will reprogram the timer.
365 365 *
366 366 * When in oneshot mode the argument is the absolute time in future at which to
367 367 * generate the interrupt.
368 368 *
369 369 * When in periodic mode, the argument is the interval at which the
370 370 * interrupts should be generated. There is no need to support the periodic
371 371 * mode timer change at this time.
372 372 *
373 373 * Note that we must be careful to convert from hrtime to Xen system time (see
374 374 * xpv_timestamp.c).
375 375 */
376 376 static void
377 377 xen_uppc_timer_reprogram(hrtime_t timer_req)
378 378 {
379 379 hrtime_t now, timer_new, time_delta, xen_time;
380 380 ulong_t flags;
381 381
382 382 flags = intr_clear();
383 383 /*
384 384 * We should be called from high PIL context (CBE_HIGH_PIL),
385 385 * so kpreempt is disabled.
386 386 */
387 387
388 388 now = xpv_gethrtime();
389 389 xen_time = xpv_getsystime();
390 390 if (timer_req <= now) {
391 391 /*
392 392 * requested to generate an interrupt in the past
393 393 * generate an interrupt as soon as possible
394 394 */
395 395 time_delta = XEN_NSEC_PER_TICK;
396 396 } else
397 397 time_delta = timer_req - now;
398 398
399 399 timer_new = xen_time + time_delta;
400 400 if (HYPERVISOR_set_timer_op(timer_new) != 0)
401 401 panic("can't set hypervisor timer?");
402 402 intr_restore(flags);
403 403 }
404 404
405 405 /*
406 406 * This function will enable timer interrupts.
407 407 */
408 408 static void
409 409 xen_uppc_timer_enable(void)
410 410 {
411 411 ec_unmask_irq(xen_clock_irq);
412 412 }
413 413
414 414 /*
415 415 * This function will disable timer interrupts on the current cpu.
416 416 */
417 417 static void
418 418 xen_uppc_timer_disable(void)
419 419 {
420 420 (void) ec_block_irq(xen_clock_irq);
421 421 /*
422 422 * If the clock irq is pending on this cpu then we need to
423 423 * clear the pending interrupt.
424 424 */
425 425 ec_unpend_irq(xen_clock_irq);
426 426 }
427 427
428 428
429 429 /*
430 430 * Configures the irq for the interrupt link device identified by
431 431 * acpipsmlnkp.
432 432 *
433 433 * Gets the current and the list of possible irq settings for the
434 434 * device. If xen_uppc_unconditional_srs is not set, and the current
435 435 * resource setting is in the list of possible irq settings,
436 436 * current irq resource setting is passed to the caller.
437 437 *
438 438 * Otherwise, picks an irq number from the list of possible irq
439 439 * settings, and sets the irq of the device to this value.
440 440 * If prefer_crs is set, among a set of irq numbers in the list that have
441 441 * the least number of devices sharing the interrupt, we pick current irq
442 442 * resource setting if it is a member of this set.
443 443 *
444 444 * Passes the irq number in the value pointed to by pci_irqp, and
445 445 * polarity and sensitivity in the structure pointed to by dipintrflagp
446 446 * to the caller.
447 447 *
448 448 * Note that if setting the irq resource failed, but successfuly obtained
449 449 * the current irq resource settings, passes the current irq resources
450 450 * and considers it a success.
451 451 *
452 452 * Returns:
453 453 * ACPI_PSM_SUCCESS on success.
454 454 *
455 455 * ACPI_PSM_FAILURE if an error occured during the configuration or
456 456 * if a suitable irq was not found for this device, or if setting the
457 457 * irq resource and obtaining the current resource fails.
458 458 *
459 459 */
460 460 static int
461 461 xen_uppc_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip,
462 462 int *pci_irqp, iflag_t *dipintr_flagp)
463 463 {
464 464 int i, min_share, foundnow, done = 0;
465 465 int32_t irq;
466 466 int32_t share_irq = -1;
467 467 int32_t chosen_irq = -1;
468 468 int cur_irq = -1;
469 469 acpi_irqlist_t *irqlistp;
470 470 acpi_irqlist_t *irqlistent;
471 471
472 472 if ((acpi_get_possible_irq_resources(acpipsmlnkp, &irqlistp))
473 473 == ACPI_PSM_FAILURE) {
474 474 XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: Unable to determine "
475 475 "or assign IRQ for device %s, instance #%d: The system was "
476 476 "unable to get the list of potential IRQs from ACPI.",
477 477 ddi_get_name(dip), ddi_get_instance(dip)));
478 478
479 479 return (ACPI_PSM_FAILURE);
480 480 }
481 481
482 482 if ((acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq,
483 483 dipintr_flagp) == ACPI_PSM_SUCCESS) &&
484 484 (!xen_uppc_unconditional_srs) &&
485 485 (cur_irq > 0)) {
486 486
487 487 if (acpi_irqlist_find_irq(irqlistp, cur_irq, NULL)
488 488 == ACPI_PSM_SUCCESS) {
489 489
490 490 acpi_free_irqlist(irqlistp);
491 491 ASSERT(pci_irqp != NULL);
492 492 *pci_irqp = cur_irq;
493 493 return (ACPI_PSM_SUCCESS);
494 494 }
495 495 XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: Could not find the "
496 496 "current irq %d for device %s, instance #%d in ACPI's "
497 497 "list of possible irqs for this device. Picking one from "
498 498 " the latter list.", cur_irq, ddi_get_name(dip),
499 499 ddi_get_instance(dip)));
500 500
501 501 }
502 502
503 503 irqlistent = irqlistp;
504 504 min_share = 255;
505 505
506 506 while (irqlistent != NULL) {
507 507
508 508 for (foundnow = 0, i = 0; i < irqlistent->num_irqs; i++) {
509 509
510 510 irq = irqlistp->irqs[i];
511 511
512 512 if ((irq > MAX_ISA_IRQ) ||
513 513 (irqlistent->intr_flags.intr_el == INTR_EL_EDGE) ||
514 514 (irq == 0))
515 515 continue;
516 516
517 517 if (xen_uppc_reserved_irqlist[irq])
518 518 continue;
519 519
520 520 if (xen_uppc_irq_shared_table[irq] == 0) {
521 521 chosen_irq = irq;
522 522 foundnow = 1;
523 523 if (!(xen_uppc_prefer_crs) ||
524 524 (irq == cur_irq)) {
525 525 done = 1;
526 526 break;
527 527 }
528 528 }
529 529
530 530 if ((xen_uppc_irq_shared_table[irq] < min_share) ||
531 531 ((xen_uppc_irq_shared_table[irq] == min_share) &&
532 532 (cur_irq == irq) && (xen_uppc_prefer_crs))) {
533 533 min_share = xen_uppc_irq_shared_table[irq];
534 534 share_irq = irq;
535 535 foundnow = 1;
536 536 }
537 537 }
538 538
539 539 /* If we found an IRQ in the inner loop, save the details */
540 540 if (foundnow && ((chosen_irq != -1) || (share_irq != -1))) {
541 541 /*
542 542 * Copy the acpi_prs_private_t and flags from this
543 543 * irq list entry, since we found an irq from this
544 544 * entry.
545 545 */
546 546 acpipsmlnkp->acpi_prs_prv = irqlistent->acpi_prs_prv;
547 547 *dipintr_flagp = irqlistent->intr_flags;
548 548 }
549 549
550 550 if (done)
551 551 break;
552 552
553 553 /* Load the next entry in the irqlist */
554 554 irqlistent = irqlistent->next;
555 555 }
556 556
557 557 acpi_free_irqlist(irqlistp);
558 558
559 559 if (chosen_irq != -1)
560 560 irq = chosen_irq;
561 561 else if (share_irq != -1)
562 562 irq = share_irq;
563 563 else {
564 564 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Could not find a "
565 565 "suitable irq from the list of possible irqs for device "
566 566 "%s, instance #%d in ACPI's list of possible\n",
567 567 ddi_get_name(dip), ddi_get_instance(dip)));
568 568
569 569 return (ACPI_PSM_FAILURE);
570 570 }
571 571
572 572
573 573 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Setting irq %d "
574 574 "for device %s instance #%d\n", irq, ddi_get_name(dip),
575 575 ddi_get_instance(dip)));
576 576
577 577 if ((acpi_set_irq_resource(acpipsmlnkp, irq)) == ACPI_PSM_SUCCESS) {
578 578 /*
579 579 * setting irq was successful, check to make sure CRS
580 580 * reflects that. If CRS does not agree with what we
581 581 * set, return the irq that was set.
582 582 */
583 583
584 584 if (acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq,
585 585 dipintr_flagp) == ACPI_PSM_SUCCESS) {
586 586
587 587 if (cur_irq != irq)
588 588 XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: "
589 589 "IRQ resource set (irqno %d) for device %s "
590 590 "instance #%d, differs from current "
591 591 "setting irqno %d",
592 592 irq, ddi_get_name(dip),
593 593 ddi_get_instance(dip), cur_irq));
594 594 }
595 595 /*
596 596 * return the irq that was set, and not what CRS reports,
597 597 * since CRS has been seen to be bogus on some systems
598 598 */
599 599 cur_irq = irq;
600 600 } else {
601 601 XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: set resource irq %d "
602 602 "failed for device %s instance #%d",
603 603 irq, ddi_get_name(dip), ddi_get_instance(dip)));
604 604 if (cur_irq == -1)
605 605 return (ACPI_PSM_FAILURE);
606 606 }
607 607
608 608 ASSERT(pci_irqp != NULL);
609 609 *pci_irqp = cur_irq;
610 610 return (ACPI_PSM_SUCCESS);
611 611 }
612 612
613 613
614 614 static int
615 615 xen_uppc_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid,
616 616 int ipin, int *pci_irqp, iflag_t *intr_flagp)
617 617 {
618 618 int status;
619 619 acpi_psm_lnk_t acpipsmlnk;
620 620
621 621 if ((status = acpi_get_irq_cache_ent(busid, devid, ipin, pci_irqp,
622 622 intr_flagp)) == ACPI_PSM_SUCCESS) {
623 623 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Found irqno %d "
624 624 "from cache for device %s, instance #%d\n", *pci_irqp,
625 625 ddi_get_name(dip), ddi_get_instance(dip)));
626 626 return (status);
627 627 }
628 628
629 629 bzero(&acpipsmlnk, sizeof (acpi_psm_lnk_t));
630 630
631 631 if ((status = acpi_translate_pci_irq(dip, ipin, pci_irqp,
632 632 intr_flagp, &acpipsmlnk)) == ACPI_PSM_FAILURE) {
633 633 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: "
634 634 " acpi_translate_pci_irq failed for device %s, instance"
635 635 " #%d\n", ddi_get_name(dip), ddi_get_instance(dip)));
636 636
637 637 return (status);
638 638 }
639 639
640 640 if (status == ACPI_PSM_PARTIAL && acpipsmlnk.lnkobj != NULL) {
641 641 status = xen_uppc_acpi_irq_configure(&acpipsmlnk, dip, pci_irqp,
642 642 intr_flagp);
643 643 if (status != ACPI_PSM_SUCCESS) {
644 644 status = acpi_get_current_irq_resource(&acpipsmlnk,
645 645 pci_irqp, intr_flagp);
646 646 }
647 647 }
648 648
649 649 if (status == ACPI_PSM_SUCCESS) {
650 650 acpi_new_irq_cache_ent(busid, devid, ipin, *pci_irqp,
651 651 intr_flagp, &acpipsmlnk);
652 652 psm_set_elcr(*pci_irqp, 1); /* set IRQ to PCI mode */
653 653
654 654 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: [ACPI] "
655 655 "new irq %d for device %s, instance #%d\n",
656 656 *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip)));
657 657 }
658 658
659 659 return (status);
660 660 }
661 661
662 662
663 663 /*ARGSUSED*/
664 664 static int
665 665 xen_uppc_translate_irq(dev_info_t *dip, int irqno)
666 666 {
667 667 char dev_type[16];
668 668 int dev_len, pci_irq, devid, busid;
669 669 ddi_acc_handle_t cfg_handle;
670 670 uchar_t ipin, iline;
671 671 iflag_t intr_flag;
672 672
673 673 if (dip == NULL) {
674 674 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: irqno = %d"
675 675 " dip = NULL\n", irqno));
676 676 return (irqno);
677 677 }
678 678
679 679 if (!xen_uppc_enable_acpi) {
680 680 return (irqno);
681 681 }
682 682
683 683 dev_len = sizeof (dev_type);
684 684 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ddi_get_parent(dip),
685 685 DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type,
686 686 &dev_len) != DDI_PROP_SUCCESS) {
687 687 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: irqno %d"
688 688 " device %s instance %d no device_type\n", irqno,
689 689 ddi_get_name(dip), ddi_get_instance(dip)));
690 690 return (irqno);
691 691 }
692 692
693 693 if ((strcmp(dev_type, "pci") == 0) ||
694 694 (strcmp(dev_type, "pciex") == 0)) {
695 695
696 696 /* pci device */
697 697 if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0)
698 698 return (irqno);
699 699
700 700 if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS)
701 701 return (irqno);
702 702
703 703 ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA;
704 704 iline = pci_config_get8(cfg_handle, PCI_CONF_ILINE);
705 705 if (xen_uppc_acpi_translate_pci_irq(dip, busid, devid,
706 706 ipin, &pci_irq, &intr_flag) == ACPI_PSM_SUCCESS) {
707 707
708 708 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: [ACPI] "
709 709 "new irq %d old irq %d device %s, instance %d\n",
710 710 pci_irq, irqno, ddi_get_name(dip),
711 711 ddi_get_instance(dip)));
712 712
713 713 /*
714 714 * Make sure pci_irq is within range.
715 715 * Otherwise, fall through and return irqno.
716 716 */
717 717 if (pci_irq <= MAX_ISA_IRQ) {
718 718 if (iline != pci_irq) {
719 719 /*
720 720 * Update the device's ILINE byte,
721 721 * in case uppc_acpi_translate_pci_irq
722 722 * has choosen a different pci_irq
723 723 * than the BIOS has configured.
724 724 * Some chipsets use the value in
725 725 * ILINE to control interrupt routing,
726 726 * in conflict with the PCI spec.
727 727 */
728 728 pci_config_put8(cfg_handle,
729 729 PCI_CONF_ILINE, pci_irq);
730 730 }
731 731 pci_config_teardown(&cfg_handle);
732 732 return (pci_irq);
733 733 }
734 734 }
735 735 pci_config_teardown(&cfg_handle);
736 736
737 737 /* FALLTHRU to common case - returning irqno */
738 738 } else {
739 739 /* non-PCI; assumes ISA-style edge-triggered */
740 740 psm_set_elcr(irqno, 0); /* set IRQ to ISA mode */
741 741
742 742 XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: non-pci,"
743 743 "irqno %d device %s instance %d\n", irqno,
744 744 ddi_get_name(dip), ddi_get_instance(dip)));
745 745 }
746 746
747 747 return (irqno);
748 748 }
749 749
750 750 /*
751 751 * xen_uppc_intr_enter() acks the event that triggered the interrupt and
752 752 * returns the new priority level,
753 753 */
754 754 /*ARGSUSED*/
755 755 static int
756 756 xen_uppc_intr_enter(int ipl, int *vector)
757 757 {
758 758 int newipl;
759 759 uint_t intno;
760 760 cpu_t *cpu = CPU;
761 761
762 762 intno = (*vector);
763 763
764 764 ASSERT(intno < NR_IRQS);
765 765 ASSERT(cpu->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask != 0);
766 766
767 767 ec_clear_irq(intno);
768 768
769 769 newipl = autovect[intno].avh_hi_pri;
770 770 if (newipl == 0) {
771 771 /*
772 772 * (newipl == 0) means we have no service routines for this
773 773 * vector. We will treat this as a spurious interrupt.
774 774 * We have cleared the pending bit already, clear the event
775 775 * mask and return a spurious interrupt. This case can happen
776 776 * when an interrupt delivery is racing with the removal of
777 777 * of the service routine for that interrupt.
778 778 */
779 779 ec_unmask_irq(intno);
780 780 newipl = -1; /* flag spurious interrupt */
781 781 } else if (newipl <= cpu->cpu_pri) {
782 782 /*
783 783 * (newipl <= cpu->cpu_pri) means that we must be trying to
784 784 * service a vector that was shared with a higher priority
785 785 * isr. The higher priority handler has been removed and
786 786 * we need to service this int. We can't return a lower
787 787 * priority than current cpu priority. Just synthesize a
788 788 * priority to return that should be acceptable.
789 789 */
790 790 newipl = cpu->cpu_pri + 1; /* synthetic priority */
791 791 }
792 792 return (newipl);
793 793 }
794 794
795 795
796 796 static void xen_uppc_setspl(int);
797 797
798 798 /*
799 799 * xen_uppc_intr_exit() restores the old interrupt
800 800 * priority level after processing an interrupt.
801 801 * It is called with interrupts disabled, and does not enable interrupts.
802 802 */
803 803 /* ARGSUSED */
804 804 static void
805 805 xen_uppc_intr_exit(int ipl, int vector)
806 806 {
807 807 ec_try_unmask_irq(vector);
808 808 xen_uppc_setspl(ipl);
809 809 }
810 810
811 811 intr_exit_fn_t
812 812 psm_intr_exit_fn(void)
813 813 {
814 814 return (xen_uppc_intr_exit);
815 815 }
816 816
817 817 /*
818 818 * Check if new ipl level allows delivery of previously unserviced events
819 819 */
820 820 static void
821 821 xen_uppc_setspl(int ipl)
822 822 {
823 823 struct cpu *cpu = CPU;
824 824 volatile vcpu_info_t *vci = cpu->cpu_m.mcpu_vcpu_info;
825 825 uint16_t pending;
826 826
827 827 ASSERT(vci->evtchn_upcall_mask != 0);
828 828
829 829 /*
830 830 * If new ipl level will enable any pending interrupts, setup so the
831 831 * upcoming sti will cause us to get an upcall.
832 832 */
833 833 pending = cpu->cpu_m.mcpu_intr_pending & ~((1 << (ipl + 1)) - 1);
834 834 if (pending) {
835 835 int i;
836 836 ulong_t pending_sels = 0;
837 837 volatile ulong_t *selp;
838 838 struct xen_evt_data *cpe = cpu->cpu_m.mcpu_evt_pend;
839 839
840 840 for (i = bsrw_insn(pending); i > ipl; i--)
841 841 pending_sels |= cpe->pending_sel[i];
842 842 ASSERT(pending_sels);
843 843 selp = (volatile ulong_t *)&vci->evtchn_pending_sel;
844 844 atomic_or_ulong(selp, pending_sels);
845 845 vci->evtchn_upcall_pending = 1;
846 846 }
847 847 }
848 848
849 849 /*
850 850 * The rest of the file is just generic psm module boilerplate
851 851 */
852 852
853 853 static struct psm_ops xen_uppc_ops = {
854 854 xen_uppc_probe, /* psm_probe */
855 855
856 856 xen_uppc_softinit, /* psm_init */
857 857 xen_uppc_picinit, /* psm_picinit */
858 858 xen_uppc_intr_enter, /* psm_intr_enter */
859 859 xen_uppc_intr_exit, /* psm_intr_exit */
860 860 xen_uppc_setspl, /* psm_setspl */
861 861 xen_uppc_addspl, /* psm_addspl */
862 862 xen_uppc_delspl, /* psm_delspl */
863 863 (int (*)(processorid_t))NULL, /* psm_disable_intr */
864 864 (void (*)(processorid_t))NULL, /* psm_enable_intr */
865 865 (int (*)(int))NULL, /* psm_softlvl_to_irq */
866 866 (void (*)(int))NULL, /* psm_set_softintr */
867 867 (void (*)(processorid_t))NULL, /* psm_set_idlecpu */
868 868 (void (*)(processorid_t))NULL, /* psm_unset_idlecpu */
869 869
870 870 xen_uppc_clkinit, /* psm_clkinit */
871 871 xen_uppc_get_clockirq, /* psm_get_clockirq */
872 872 (void (*)(void))NULL, /* psm_hrtimeinit */
873 873 xpv_gethrtime, /* psm_gethrtime */
874 874
875 875 xen_uppc_get_next_processorid, /* psm_get_next_processorid */
876 876 (int (*)(processorid_t, caddr_t))NULL, /* psm_cpu_start */
877 877 (int (*)(void))NULL, /* psm_post_cpu_start */
878 878 xen_uppc_shutdown, /* psm_shutdown */
879 879 (int (*)(int, int))NULL, /* psm_get_ipivect */
880 880 (void (*)(processorid_t, int))NULL, /* psm_send_ipi */
881 881
882 882 xen_uppc_translate_irq, /* psm_translate_irq */
883 883
884 884 (void (*)(int, char *))NULL, /* psm_notify_error */
885 885 (void (*)(int msg))NULL, /* psm_notify_func */
886 886 xen_uppc_timer_reprogram, /* psm_timer_reprogram */
887 887 xen_uppc_timer_enable, /* psm_timer_enable */
888 888 xen_uppc_timer_disable, /* psm_timer_disable */
889 889 (void (*)(void *arg))NULL, /* psm_post_cyclic_setup */
890 890 (void (*)(int, int))NULL, /* psm_preshutdown */
891 891
892 892 (int (*)(dev_info_t *, ddi_intr_handle_impl_t *,
893 893 psm_intr_op_t, int *))NULL, /* psm_intr_ops */
894 894 (int (*)(psm_state_request_t *))NULL, /* psm_state */
895 895 (int (*)(psm_cpu_request_t *))NULL /* psm_cpu_ops */
896 896 };
897 897
898 898 static struct psm_info xen_uppc_info = {
899 899 PSM_INFO_VER01_5, /* version */
900 900 PSM_OWN_SYS_DEFAULT, /* ownership */
901 901 &xen_uppc_ops, /* operation */
902 902 "xVM_uppc", /* machine name */
903 903 "UniProcessor PC" /* machine descriptions */
904 904 };
905 905
906 906 static void *xen_uppc_hdlp;
907 907
908 908 int
909 909 _init(void)
910 910 {
911 911 return (psm_mod_init(&xen_uppc_hdlp, &xen_uppc_info));
912 912 }
913 913
914 914 int
915 915 _fini(void)
916 916 {
917 917 return (psm_mod_fini(&xen_uppc_hdlp, &xen_uppc_info));
918 918 }
919 919
920 920 int
921 921 _info(struct modinfo *modinfop)
922 922 {
923 923 return (psm_mod_info(&xen_uppc_hdlp, &xen_uppc_info, modinfop));
924 924 }
↓ open down ↓ |
627 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX