1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25 /*
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
28 */
29 /*
30 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
31 */
32
33 /*
34 * To understand how the pcplusmp module interacts with the interrupt subsystem
35 * read the theory statement in uts/i86pc/os/intr.c.
36 */
37
38 /*
39 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
40 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
42 * PSMI 1.5 extensions are supported in Solaris Nevada.
43 * PSMI 1.6 extensions are supported in Solaris Nevada.
44 * PSMI 1.7 extensions are supported in Solaris Nevada.
45 */
46 #define PSMI_1_7
47
48 #include <sys/processor.h>
49 #include <sys/time.h>
50 #include <sys/psm.h>
51 #include <sys/smp_impldefs.h>
52 #include <sys/cram.h>
53 #include <sys/acpi/acpi.h>
54 #include <sys/acpica.h>
55 #include <sys/psm_common.h>
56 #include <sys/apic.h>
57 #include <sys/pit.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/ddi_impldefs.h>
61 #include <sys/pci.h>
62 #include <sys/promif.h>
63 #include <sys/x86_archext.h>
64 #include <sys/cpc_impl.h>
65 #include <sys/uadmin.h>
66 #include <sys/panic.h>
67 #include <sys/debug.h>
68 #include <sys/archsystm.h>
69 #include <sys/trap.h>
70 #include <sys/machsystm.h>
71 #include <sys/sysmacros.h>
72 #include <sys/cpuvar.h>
73 #include <sys/rm_platter.h>
74 #include <sys/privregs.h>
75 #include <sys/note.h>
76 #include <sys/pci_intr_lib.h>
77 #include <sys/spl.h>
78 #include <sys/clock.h>
79 #include <sys/cyclic.h>
80 #include <sys/dditypes.h>
81 #include <sys/sunddi.h>
82 #include <sys/x_call.h>
83 #include <sys/reboot.h>
84 #include <sys/hpet.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
87
88 /*
89 * Local Function Prototypes
90 */
91 static void apic_init_intr(void);
92
93 /*
94 * standard MP entries
95 */
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl, int *vect);
102 static void apic_setspl(int ipl);
103 static void x2apic_setspl(int ipl);
104 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
105 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
106 static int apic_disable_intr(processorid_t cpun);
107 static void apic_enable_intr(processorid_t cpun);
108 static int apic_get_ipivect(int ipl, int type);
109 static void apic_post_cyclic_setup(void *arg);
110
111 /*
112 * The following vector assignments influence the value of ipltopri and
113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
115 * we care to do so in future. Note some IPLs which are rarely used
116 * will share the vector ranges and heavily used IPLs (5 and 6) have
117 * a wide range.
118 *
119 * This array is used to initialize apic_ipls[] (in apic_init()).
120 *
121 * IPL Vector range. as passed to intr_enter
122 * 0 none.
123 * 1,2,3 0x20-0x2f 0x0-0xf
124 * 4 0x30-0x3f 0x10-0x1f
125 * 5 0x40-0x5f 0x20-0x3f
126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 /*
147 * Correlation of the hardware vector to the IPL in use, initialized
148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
150 * connected to errata-stricken IOAPICs
151 */
152 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
153
154 /*
155 * Patchable global variables.
156 */
157 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
158 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
159
160 /*
161 * Local static data
162 */
163 static struct psm_ops apic_ops = {
164 apic_probe,
165
166 apic_init,
167 apic_picinit,
168 apic_intr_enter,
169 apic_intr_exit,
170 apic_setspl,
171 apic_addspl,
172 apic_delspl,
173 apic_disable_intr,
174 apic_enable_intr,
175 (int (*)(int))NULL, /* psm_softlvl_to_irq */
176 (void (*)(int))NULL, /* psm_set_softintr */
177
178 apic_set_idlecpu,
179 apic_unset_idlecpu,
180
181 apic_clkinit,
182 apic_getclkirq,
183 (void (*)(void))NULL, /* psm_hrtimeinit */
184 apic_gethrtime,
185
186 apic_get_next_processorid,
187 apic_cpu_start,
188 apic_post_cpu_start,
189 apic_shutdown,
190 apic_get_ipivect,
191 apic_send_ipi,
192
193 (int (*)(dev_info_t *, int))NULL, /* psm_translate_irq */
194 (void (*)(int, char *))NULL, /* psm_notify_error */
195 (void (*)(int))NULL, /* psm_notify_func */
196 apic_timer_reprogram,
197 apic_timer_enable,
198 apic_timer_disable,
199 apic_post_cyclic_setup,
200 apic_preshutdown,
201 apic_intr_ops, /* Advanced DDI Interrupt framework */
202 apic_state, /* save, restore apic state for S3 */
203 apic_cpu_ops, /* CPU control interface. */
204 };
205
206 struct psm_ops *psmops = &apic_ops;
207
208 static struct psm_info apic_psm_info = {
209 PSM_INFO_VER01_7, /* version */
210 PSM_OWN_EXCLUSIVE, /* ownership */
211 (struct psm_ops *)&apic_ops, /* operation */
212 APIC_PCPLUSMP_NAME, /* machine name */
213 "pcplusmp v1.4 compatible",
214 };
215
216 static void *apic_hdlp;
217
218 /*
219 * apic_let_idle_redistribute can have the following values:
220 * 0 - If clock decremented it from 1 to 0, clock has to call redistribute.
221 * apic_redistribute_lock prevents multiple idle cpus from redistributing
222 */
223 int apic_num_idle_redistributions = 0;
224 static int apic_let_idle_redistribute = 0;
225
226 /* to gather intr data and redistribute */
227 static void apic_redistribute_compute(void);
228
229 /*
230 * This is the loadable module wrapper
231 */
232
233 int
234 _init(void)
235 {
236 if (apic_coarse_hrtime)
237 apic_ops.psm_gethrtime = &apic_gettime;
238 return (psm_mod_init(&apic_hdlp, &apic_psm_info));
239 }
240
241 int
242 _fini(void)
243 {
244 return (psm_mod_fini(&apic_hdlp, &apic_psm_info));
245 }
246
247 int
248 _info(struct modinfo *modinfop)
249 {
250 return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop));
251 }
252
253 static int
254 apic_probe(void)
255 {
256 /* check if apix is initialized */
257 if (apix_enable && apix_loaded())
258 return (PSM_FAILURE);
259 else
260 apix_enable = 0; /* continue using pcplusmp PSM */
261
262 return (apic_probe_common(apic_psm_info.p_mach_idstring));
263 }
264
265 static uchar_t
266 apic_xlate_vector_by_irq(uchar_t irq)
267 {
268 if (apic_irq_table[irq] == NULL)
269 return (0);
270
271 return (apic_irq_table[irq]->airq_vector);
272 }
273
274 void
275 apic_init(void)
276 {
277 int i;
278 int j = 1;
279
280 psm_get_ioapicid = apic_get_ioapicid;
281 psm_get_localapicid = apic_get_localapicid;
282 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
283
284 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
285 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
286 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
287 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
288 /* get to highest vector at the same ipl */
289 continue;
290 for (; j <= apic_vectortoipl[i]; j++) {
291 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
292 APIC_BASE_VECT;
293 }
294 }
295 for (; j < MAXIPL + 1; j++)
296 /* fill up any empty ipltopri slots */
297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
298 apic_init_common();
299
300 #if !defined(__amd64)
301 if (cpuid_have_cr8access(CPU))
302 apic_have_32bit_cr8 = 1;
303 #endif
304 }
305
306 static void
307 apic_init_intr(void)
308 {
309 processorid_t cpun = psm_get_cpu_id();
310 uint_t nlvt;
311 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
312
313 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
314
315 if (apic_mode == LOCAL_APIC) {
316 /*
317 * We are running APIC in MMIO mode.
318 */
319 if (apic_flat_model) {
320 apic_reg_ops->apic_write(APIC_FORMAT_REG,
321 APIC_FLAT_MODEL);
322 } else {
323 apic_reg_ops->apic_write(APIC_FORMAT_REG,
324 APIC_CLUSTER_MODEL);
325 }
326
327 apic_reg_ops->apic_write(APIC_DEST_REG,
328 AV_HIGH_ORDER >> cpun);
329 }
330
331 if (apic_directed_EOI_supported()) {
332 /*
333 * Setting the 12th bit in the Spurious Interrupt Vector
334 * Register suppresses broadcast EOIs generated by the local
335 * APIC. The suppression of broadcast EOIs happens only when
336 * interrupts are level-triggered.
337 */
338 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
339 }
340
341 /* need to enable APIC before unmasking NMI */
342 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
343
344 /*
345 * Presence of an invalid vector with delivery mode AV_FIXED can
346 * cause an error interrupt, even if the entry is masked...so
347 * write a valid vector to LVT entries along with the mask bit
348 */
349
350 /* All APICs have timer and LINT0/1 */
351 apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ);
352 apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ);
353 apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI); /* enable NMI */
354
355 /*
356 * On integrated APICs, the number of LVT entries is
357 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
358 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
359 */
360
361 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
362 nlvt = 3;
363 } else {
364 nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) &
365 0xFF) + 1;
366 }
367
368 if (nlvt >= 5) {
369 /* Enable performance counter overflow interrupt */
370
371 if (!is_x86_feature(x86_featureset, X86FSET_MSR))
372 apic_enable_cpcovf_intr = 0;
373 if (apic_enable_cpcovf_intr) {
374 if (apic_cpcovf_vect == 0) {
375 int ipl = APIC_PCINT_IPL;
376 int irq = apic_get_ipivect(ipl, -1);
377
378 ASSERT(irq != -1);
379 apic_cpcovf_vect =
380 apic_irq_table[irq]->airq_vector;
381 ASSERT(apic_cpcovf_vect);
382 (void) add_avintr(NULL, ipl,
383 (avfunc)kcpc_hw_overflow_intr,
384 "apic pcint", irq, NULL, NULL, NULL, NULL);
385 kcpc_hw_overflow_intr_installed = 1;
386 kcpc_hw_enable_cpc_intr =
387 apic_cpcovf_mask_clear;
388 }
389 apic_reg_ops->apic_write(APIC_PCINT_VECT,
390 apic_cpcovf_vect);
391 }
392 }
393
394 if (nlvt >= 6) {
395 /* Only mask TM intr if the BIOS apparently doesn't use it */
396
397 uint32_t lvtval;
398
399 lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT);
400 if (((lvtval & AV_MASK) == AV_MASK) ||
401 ((lvtval & AV_DELIV_MODE) != AV_SMI)) {
402 apic_reg_ops->apic_write(APIC_THERM_VECT,
403 AV_MASK|APIC_RESV_IRQ);
404 }
405 }
406
407 /* Enable error interrupt */
408
409 if (nlvt >= 4 && apic_enable_error_intr) {
410 if (apic_errvect == 0) {
411 int ipl = 0xf; /* get highest priority intr */
412 int irq = apic_get_ipivect(ipl, -1);
413
414 ASSERT(irq != -1);
415 apic_errvect = apic_irq_table[irq]->airq_vector;
416 ASSERT(apic_errvect);
417 /*
418 * Not PSMI compliant, but we are going to merge
419 * with ON anyway
420 */
421 (void) add_avintr((void *)NULL, ipl,
422 (avfunc)apic_error_intr, "apic error intr",
423 irq, NULL, NULL, NULL, NULL);
424 }
425 apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect);
426 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
427 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
428 }
429
430 /* Enable CMCI interrupt */
431 if (cmi_enable_cmci) {
432
433 mutex_enter(&cmci_cpu_setup_lock);
434 if (cmci_cpu_setup_registered == 0) {
435 mutex_enter(&cpu_lock);
436 register_cpu_setup_func(cmci_cpu_setup, NULL);
437 mutex_exit(&cpu_lock);
438 cmci_cpu_setup_registered = 1;
439 }
440 mutex_exit(&cmci_cpu_setup_lock);
441
442 if (apic_cmci_vect == 0) {
443 int ipl = 0x2;
444 int irq = apic_get_ipivect(ipl, -1);
445
446 ASSERT(irq != -1);
447 apic_cmci_vect = apic_irq_table[irq]->airq_vector;
448 ASSERT(apic_cmci_vect);
449
450 (void) add_avintr(NULL, ipl,
451 (avfunc)cmi_cmci_trap,
452 "apic cmci intr", irq, NULL, NULL, NULL, NULL);
453 }
454 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
455 }
456 }
457
458 static void
459 apic_picinit(void)
460 {
461 int i, j;
462 uint_t isr;
463
464 /*
465 * Initialize and enable interrupt remapping before apic
466 * hardware initialization
467 */
468 apic_intrmap_init(apic_mode);
469
470 /*
471 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
472 * bit on without clearing it with EOI. Since softint
473 * uses vector 0x20 to interrupt itself, so softint will
474 * not work on this machine. In order to fix this problem
475 * a check is made to verify all the isr bits are clear.
476 * If not, EOIs are issued to clear the bits.
477 */
478 for (i = 7; i >= 1; i--) {
479 isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4));
480 if (isr != 0)
481 for (j = 0; ((j < 32) && (isr != 0)); j++)
482 if (isr & (1 << j)) {
483 apic_reg_ops->apic_write(
484 APIC_EOI_REG, 0);
485 isr &= ~(1 << j);
486 apic_error |= APIC_ERR_BOOT_EOI;
487 }
488 }
489
490 /* set a flag so we know we have run apic_picinit() */
491 apic_picinit_called = 1;
492 LOCK_INIT_CLEAR(&apic_gethrtime_lock);
493 LOCK_INIT_CLEAR(&apic_ioapic_lock);
494 LOCK_INIT_CLEAR(&apic_error_lock);
495 LOCK_INIT_CLEAR(&apic_mode_switch_lock);
496
497 picsetup(); /* initialise the 8259 */
498
499 /* add nmi handler - least priority nmi handler */
500 LOCK_INIT_CLEAR(&apic_nmi_lock);
501
502 if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr,
503 "pcplusmp NMI handler", (caddr_t)NULL))
504 cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler");
505
506 /*
507 * Check for directed-EOI capability in the local APIC.
508 */
509 if (apic_directed_EOI_supported() == 1) {
510 apic_set_directed_EOI_handler();
511 }
512
513 apic_init_intr();
514
515 /* enable apic mode if imcr present */
516 if (apic_imcrp) {
517 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
518 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC);
519 }
520
521 ioapic_init_intr(IOAPIC_MASK);
522 }
523
524 #ifdef DEBUG
525 void
526 apic_break(void)
527 {
528 }
529 #endif /* DEBUG */
530
531 /*
532 * platform_intr_enter
533 *
534 * Called at the beginning of the interrupt service routine to
535 * mask all level equal to and below the interrupt priority
536 * of the interrupting vector. An EOI should be given to
537 * the interrupt controller to enable other HW interrupts.
538 *
539 * Return -1 for spurious interrupts
540 *
541 */
542 /*ARGSUSED*/
543 static int
544 apic_intr_enter(int ipl, int *vectorp)
545 {
546 uchar_t vector;
547 int nipl;
548 int irq;
549 ulong_t iflag;
550 apic_cpus_info_t *cpu_infop;
551
552 /*
553 * The real vector delivered is (*vectorp + 0x20), but our caller
554 * subtracts 0x20 from the vector before passing it to us.
555 * (That's why APIC_BASE_VECT is 0x20.)
556 */
557 vector = (uchar_t)*vectorp;
558
559 /* if interrupted by the clock, increment apic_nsec_since_boot */
560 if (vector == apic_clkvect) {
561 if (!apic_oneshot) {
562 /* NOTE: this is not MT aware */
563 apic_hrtime_stamp++;
564 apic_nsec_since_boot += apic_nsec_per_intr;
565 apic_hrtime_stamp++;
566 last_count_read = apic_hertz_count;
567 apic_redistribute_compute();
568 }
569
570 /* We will avoid all the book keeping overhead for clock */
571 nipl = apic_ipls[vector];
572
573 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
574 if (apic_mode == LOCAL_APIC) {
575 #if defined(__amd64)
576 setcr8((ulong_t)(apic_ipltopri[nipl] >>
577 APIC_IPL_SHIFT));
578 #else
579 if (apic_have_32bit_cr8)
580 setcr8((ulong_t)(apic_ipltopri[nipl] >>
581 APIC_IPL_SHIFT));
582 else
583 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
584 (uint32_t)apic_ipltopri[nipl]);
585 #endif
586 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
587 } else {
588 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
589 X2APIC_WRITE(APIC_EOI_REG, 0);
590 }
591
592 return (nipl);
593 }
594
595 cpu_infop = &apic_cpus[psm_get_cpu_id()];
596
597 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
598 cpu_infop->aci_spur_cnt++;
599 return (APIC_INT_SPURIOUS);
600 }
601
602 /* Check if the vector we got is really what we need */
603 if (apic_revector_pending) {
604 /*
605 * Disable interrupts for the duration of
606 * the vector translation to prevent a self-race for
607 * the apic_revector_lock. This cannot be done
608 * in apic_xlate_vector because it is recursive and
609 * we want the vector translation to be atomic with
610 * respect to other (higher-priority) interrupts.
611 */
612 iflag = intr_clear();
613 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
614 APIC_BASE_VECT;
615 intr_restore(iflag);
616 }
617
618 nipl = apic_ipls[vector];
619 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
620
621 if (apic_mode == LOCAL_APIC) {
622 #if defined(__amd64)
623 setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
624 #else
625 if (apic_have_32bit_cr8)
626 setcr8((ulong_t)(apic_ipltopri[nipl] >>
627 APIC_IPL_SHIFT));
628 else
629 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
630 (uint32_t)apic_ipltopri[nipl]);
631 #endif
632 } else {
633 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
634 }
635
636 cpu_infop->aci_current[nipl] = (uchar_t)irq;
637 cpu_infop->aci_curipl = (uchar_t)nipl;
638 cpu_infop->aci_ISR_in_progress |= 1 << nipl;
639
640 /*
641 * apic_level_intr could have been assimilated into the irq struct.
642 * but, having it as a character array is more efficient in terms of
643 * cache usage. So, we leave it as is.
644 */
645 if (!apic_level_intr[irq]) {
646 if (apic_mode == LOCAL_APIC) {
647 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
648 } else {
649 X2APIC_WRITE(APIC_EOI_REG, 0);
650 }
651 }
652
653 #ifdef DEBUG
654 APIC_DEBUG_BUF_PUT(vector);
655 APIC_DEBUG_BUF_PUT(irq);
656 APIC_DEBUG_BUF_PUT(nipl);
657 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
658 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
659 drv_usecwait(apic_stretch_interrupts);
660
661 if (apic_break_on_cpu == psm_get_cpu_id())
662 apic_break();
663 #endif /* DEBUG */
664 return (nipl);
665 }
666
667 /*
668 * This macro is a common code used by MMIO local apic and X2APIC
669 * local apic.
670 */
671 #define APIC_INTR_EXIT() \
672 { \
673 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
674 if (apic_level_intr[irq]) \
675 apic_reg_ops->apic_send_eoi(irq); \
676 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
677 /* ISR above current pri could not be in progress */ \
678 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
679 }
680
681 /*
682 * Any changes made to this function must also change X2APIC
683 * version of intr_exit.
684 */
685 void
686 apic_intr_exit(int prev_ipl, int irq)
687 {
688 apic_cpus_info_t *cpu_infop;
689
690 #if defined(__amd64)
691 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
692 #else
693 if (apic_have_32bit_cr8)
694 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
695 else
696 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
697 #endif
698
699 APIC_INTR_EXIT();
700 }
701
702 /*
703 * Same as apic_intr_exit() except it uses MSR rather than MMIO
704 * to access local apic registers.
705 */
706 void
707 x2apic_intr_exit(int prev_ipl, int irq)
708 {
709 apic_cpus_info_t *cpu_infop;
710
711 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
712 APIC_INTR_EXIT();
713 }
714
715 intr_exit_fn_t
716 psm_intr_exit_fn(void)
717 {
718 if (apic_mode == LOCAL_X2APIC)
719 return (x2apic_intr_exit);
720
721 return (apic_intr_exit);
722 }
723
724 /*
725 * Mask all interrupts below or equal to the given IPL.
726 * Any changes made to this function must also change X2APIC
727 * version of setspl.
728 */
729 static void
730 apic_setspl(int ipl)
731 {
732 #if defined(__amd64)
733 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
734 #else
735 if (apic_have_32bit_cr8)
736 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
737 else
738 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
739 #endif
740
741 /* interrupts at ipl above this cannot be in progress */
742 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
743 /*
744 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
745 * have enough time to come in before the priority is raised again
746 * during the idle() loop.
747 */
748 if (apic_setspl_delay)
749 (void) apic_reg_ops->apic_get_pri();
750 }
751
752 /*
753 * X2APIC version of setspl.
754 * Mask all interrupts below or equal to the given IPL
755 */
756 static void
757 x2apic_setspl(int ipl)
758 {
759 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
760
761 /* interrupts at ipl above this cannot be in progress */
762 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
763 }
764
765 /*ARGSUSED*/
766 static int
767 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
768 {
769 return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
770 }
771
772 static int
773 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
774 {
775 return (apic_delspl_common(irqno, ipl, min_ipl, max_ipl));
776 }
777
778 static int
779 apic_post_cpu_start(void)
780 {
781 int cpun;
782 static int cpus_started = 1;
783
784 /* We know this CPU + BSP started successfully. */
785 cpus_started++;
786
787 /*
788 * On BSP we would have enabled X2APIC, if supported by processor,
789 * in acpi_probe(), but on AP we do it here.
790 *
791 * We enable X2APIC mode only if BSP is running in X2APIC & the
792 * local APIC mode of the current CPU is MMIO (xAPIC).
793 */
794 if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() &&
795 apic_local_mode() == LOCAL_APIC) {
796 apic_enable_x2apic();
797 }
798
799 /*
800 * Switch back to x2apic IPI sending method for performance when target
801 * CPU has entered x2apic mode.
802 */
803 if (apic_mode == LOCAL_X2APIC) {
804 apic_switch_ipi_callback(B_FALSE);
805 }
806
807 splx(ipltospl(LOCK_LEVEL));
808 apic_init_intr();
809
810 /*
811 * since some systems don't enable the internal cache on the non-boot
812 * cpus, so we have to enable them here
813 */
814 setcr0(getcr0() & ~(CR0_CD | CR0_NW));
815
816 #ifdef DEBUG
817 APIC_AV_PENDING_SET();
818 #else
819 if (apic_mode == LOCAL_APIC)
820 APIC_AV_PENDING_SET();
821 #endif /* DEBUG */
822
823 /*
824 * We may be booting, or resuming from suspend; aci_status will
825 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
826 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
827 */
828 cpun = psm_get_cpu_id();
829 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
830
831 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
832 return (PSM_SUCCESS);
833 }
834
835 /*
836 * type == -1 indicates it is an internal request. Do not change
837 * resv_vector for these requests
838 */
839 static int
840 apic_get_ipivect(int ipl, int type)
841 {
842 uchar_t vector;
843 int irq;
844
845 if ((irq = apic_allocate_irq(APIC_VECTOR(ipl))) != -1) {
846 if (vector = apic_allocate_vector(ipl, irq, 1)) {
847 apic_irq_table[irq]->airq_mps_intr_index =
848 RESERVE_INDEX;
849 apic_irq_table[irq]->airq_vector = vector;
850 if (type != -1) {
851 apic_resv_vector[ipl] = vector;
852 }
853 return (irq);
854 }
855 }
856 apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
857 return (-1); /* shouldn't happen */
858 }
859
860 static int
861 apic_getclkirq(int ipl)
862 {
863 int irq;
864
865 if ((irq = apic_get_ipivect(ipl, -1)) == -1)
866 return (-1);
867 /*
868 * Note the vector in apic_clkvect for per clock handling.
869 */
870 apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT;
871 APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n",
872 apic_clkvect));
873 return (irq);
874 }
875
876 /*
877 * Try and disable all interrupts. We just assign interrupts to other
878 * processors based on policy. If any were bound by user request, we
879 * let them continue and return failure. We do not bother to check
880 * for cache affinity while rebinding.
881 */
882
883 static int
884 apic_disable_intr(processorid_t cpun)
885 {
886 int bind_cpu = 0, i, hardbound = 0;
887 apic_irq_t *irq_ptr;
888 ulong_t iflag;
889
890 iflag = intr_clear();
891 lock_set(&apic_ioapic_lock);
892
893 for (i = 0; i <= APIC_MAX_VECTOR; i++) {
894 if (apic_reprogram_info[i].done == B_FALSE) {
895 if (apic_reprogram_info[i].bindcpu == cpun) {
896 /*
897 * CPU is busy -- it's the target of
898 * a pending reprogramming attempt
899 */
900 lock_clear(&apic_ioapic_lock);
901 intr_restore(iflag);
902 return (PSM_FAILURE);
903 }
904 }
905 }
906
907 apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE;
908
909 apic_cpus[cpun].aci_curipl = 0;
910
911 i = apic_min_device_irq;
912 for (; i <= apic_max_device_irq; i++) {
913 /*
914 * If there are bound interrupts on this cpu, then
915 * rebind them to other processors.
916 */
917 if ((irq_ptr = apic_irq_table[i]) != NULL) {
918 ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) ||
919 (irq_ptr->airq_temp_cpu == IRQ_UNINIT) ||
920 (apic_cpu_in_range(irq_ptr->airq_temp_cpu)));
921
922 if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) {
923 hardbound = 1;
924 continue;
925 }
926
927 if (irq_ptr->airq_temp_cpu == cpun) {
928 do {
929 bind_cpu =
930 apic_find_cpu(APIC_CPU_INTR_ENABLE);
931 } while (apic_rebind_all(irq_ptr, bind_cpu));
932 }
933 }
934 }
935
936 lock_clear(&apic_ioapic_lock);
937 intr_restore(iflag);
938
939 if (hardbound) {
940 cmn_err(CE_WARN, "Could not disable interrupts on %d"
941 "due to user bound interrupts", cpun);
942 return (PSM_FAILURE);
943 }
944 else
945 return (PSM_SUCCESS);
946 }
947
948 /*
949 * Bind interrupts to the CPU's local APIC.
950 * Interrupts should not be bound to a CPU's local APIC until the CPU
951 * is ready to receive interrupts.
952 */
953 static void
954 apic_enable_intr(processorid_t cpun)
955 {
956 int i;
957 apic_irq_t *irq_ptr;
958 ulong_t iflag;
959
960 iflag = intr_clear();
961 lock_set(&apic_ioapic_lock);
962
963 apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE;
964
965 i = apic_min_device_irq;
966 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
967 if ((irq_ptr = apic_irq_table[i]) != NULL) {
968 if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) {
969 (void) apic_rebind_all(irq_ptr,
970 irq_ptr->airq_cpu);
971 }
972 }
973 }
974
975 if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND)
976 apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND;
977
978 lock_clear(&apic_ioapic_lock);
979 intr_restore(iflag);
980 }
981
982 /*
983 * If this module needs a periodic handler for the interrupt distribution, it
984 * can be added here. The argument to the periodic handler is not currently
985 * used, but is reserved for future.
986 */
987 static void
988 apic_post_cyclic_setup(void *arg)
989 {
990 _NOTE(ARGUNUSED(arg))
991
992 cyc_handler_t cyh;
993 cyc_time_t cyt;
994
995 /* cpu_lock is held */
996 /* set up a periodic handler for intr redistribution */
997
998 /*
999 * In peridoc mode intr redistribution processing is done in
1000 * apic_intr_enter during clk intr processing
1001 */
1002 if (!apic_oneshot)
1003 return;
1004
1005 /*
1006 * Register a periodical handler for the redistribution processing.
1007 * Though we would generally prefer to use the DDI interface for
1008 * periodic handler invocation, ddi_periodic_add(9F), we are
1009 * unfortunately already holding cpu_lock, which ddi_periodic_add will
1010 * attempt to take for us. Thus, we add our own cyclic directly:
1011 */
1012 cyh.cyh_func = (void (*)(void *))apic_redistribute_compute;
1013 cyh.cyh_arg = NULL;
1014 cyh.cyh_level = CY_LOW_LEVEL;
1015
1016 cyt.cyt_when = 0;
1017 cyt.cyt_interval = apic_redistribute_sample_interval;
1018
1019 apic_cyclic_id = cyclic_add(&cyh, &cyt);
1020 }
1021
1022 static void
1023 apic_redistribute_compute(void)
1024 {
1025 int i, j, max_busy;
1026
1027 if (apic_enable_dynamic_migration) {
1028 if (++apic_nticks == apic_sample_factor_redistribution) {
1029 /*
1030 * Time to call apic_intr_redistribute().
1031 * reset apic_nticks. This will cause max_busy
1032 * to be calculated below and if it is more than
1033 * apic_int_busy, we will do the whole thing
1034 */
1035 apic_nticks = 0;
1036 }
1037 max_busy = 0;
1038 for (i = 0; i < apic_nproc; i++) {
1039 if (!apic_cpu_in_range(i))
1040 continue;
1041
1042 /*
1043 * Check if curipl is non zero & if ISR is in
1044 * progress
1045 */
1046 if (((j = apic_cpus[i].aci_curipl) != 0) &&
1047 (apic_cpus[i].aci_ISR_in_progress & (1 << j))) {
1048
1049 int irq;
1050 apic_cpus[i].aci_busy++;
1051 irq = apic_cpus[i].aci_current[j];
1052 apic_irq_table[irq]->airq_busy++;
1053 }
1054
1055 if (!apic_nticks &&
1056 (apic_cpus[i].aci_busy > max_busy))
1057 max_busy = apic_cpus[i].aci_busy;
1058 }
1059 if (!apic_nticks) {
1060 if (max_busy > apic_int_busy_mark) {
1061 /*
1062 * We could make the following check be
1063 * skipped > 1 in which case, we get a
1064 * redistribution at half the busy mark (due to
1065 * double interval). Need to be able to collect
1066 * more empirical data to decide if that is a
1067 * good strategy. Punt for now.
1068 */
1069 if (apic_skipped_redistribute) {
1070 apic_cleanup_busy();
1071 apic_skipped_redistribute = 0;
1072 } else {
1073 apic_intr_redistribute();
1074 }
1075 } else
1076 apic_skipped_redistribute++;
1077 }
1078 }
1079 }
1080
1081
1082 /*
1083 * The following functions are in the platform specific file so that they
1084 * can be different functions depending on whether we are running on
1085 * bare metal or a hypervisor.
1086 */
1087
1088 /*
1089 * Check to make sure there are enough irq slots
1090 */
1091 int
1092 apic_check_free_irqs(int count)
1093 {
1094 int i, avail;
1095
1096 avail = 0;
1097 for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) {
1098 if ((apic_irq_table[i] == NULL) ||
1099 apic_irq_table[i]->airq_mps_intr_index == FREE_INDEX) {
1100 if (++avail >= count)
1101 return (PSM_SUCCESS);
1102 }
1103 }
1104 return (PSM_FAILURE);
1105 }
1106
1107 /*
1108 * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1109 */
1110 int
1111 apic_alloc_msi_vectors(dev_info_t *dip, int inum, int count, int pri,
1112 int behavior)
1113 {
1114 int rcount, i;
1115 uchar_t start, irqno;
1116 uint32_t cpu;
1117 major_t major;
1118 apic_irq_t *irqptr;
1119
1120 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: dip=0x%p "
1121 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n",
1122 (void *)dip, inum, pri, count, behavior));
1123
1124 if (count > 1) {
1125 if (behavior == DDI_INTR_ALLOC_STRICT &&
1126 apic_multi_msi_enable == 0)
1127 return (0);
1128 if (apic_multi_msi_enable == 0)
1129 count = 1;
1130 }
1131
1132 if ((rcount = apic_navail_vector(dip, pri)) > count)
1133 rcount = count;
1134 else if (rcount == 0 || (rcount < count &&
1135 behavior == DDI_INTR_ALLOC_STRICT))
1136 return (0);
1137
1138 /* if not ISP2, then round it down */
1139 if (!ISP2(rcount))
1140 rcount = 1 << (highbit(rcount) - 1);
1141
1142 mutex_enter(&airq_mutex);
1143
1144 for (start = 0; rcount > 0; rcount >>= 1) {
1145 if ((start = apic_find_multi_vectors(pri, rcount)) != 0 ||
1146 behavior == DDI_INTR_ALLOC_STRICT)
1147 break;
1148 }
1149
1150 if (start == 0) {
1151 /* no vector available */
1152 mutex_exit(&airq_mutex);
1153 return (0);
1154 }
1155
1156 if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1157 /* not enough free irq slots available */
1158 mutex_exit(&airq_mutex);
1159 return (0);
1160 }
1161
1162 major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1163 for (i = 0; i < rcount; i++) {
1164 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1165 (uchar_t)-1) {
1166 /*
1167 * shouldn't happen because of the
1168 * apic_check_free_irqs() check earlier
1169 */
1170 mutex_exit(&airq_mutex);
1171 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1172 "apic_allocate_irq failed\n"));
1173 return (i);
1174 }
1175 apic_max_device_irq = max(irqno, apic_max_device_irq);
1176 apic_min_device_irq = min(irqno, apic_min_device_irq);
1177 irqptr = apic_irq_table[irqno];
1178 #ifdef DEBUG
1179 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ)
1180 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1181 "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1182 #endif
1183 apic_vector_to_irq[start + i] = (uchar_t)irqno;
1184
1185 irqptr->airq_vector = (uchar_t)(start + i);
1186 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */
1187 irqptr->airq_intin_no = (uchar_t)rcount;
1188 irqptr->airq_ipl = pri;
1189 irqptr->airq_vector = start + i;
1190 irqptr->airq_origirq = (uchar_t)(inum + i);
1191 irqptr->airq_share_id = 0;
1192 irqptr->airq_mps_intr_index = MSI_INDEX;
1193 irqptr->airq_dip = dip;
1194 irqptr->airq_major = major;
1195 if (i == 0) /* they all bound to the same cpu */
1196 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno,
1197 0xff, 0xff);
1198 else
1199 irqptr->airq_cpu = cpu;
1200 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: irq=0x%x "
1201 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno,
1202 (void *)irqptr->airq_dip, irqptr->airq_vector,
1203 irqptr->airq_origirq, pri));
1204 }
1205 mutex_exit(&airq_mutex);
1206 return (rcount);
1207 }
1208
1209 /*
1210 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1211 */
1212 int
1213 apic_alloc_msix_vectors(dev_info_t *dip, int inum, int count, int pri,
1214 int behavior)
1215 {
1216 int rcount, i;
1217 major_t major;
1218
1219 mutex_enter(&airq_mutex);
1220
1221 if ((rcount = apic_navail_vector(dip, pri)) > count)
1222 rcount = count;
1223 else if (rcount == 0 || (rcount < count &&
1224 behavior == DDI_INTR_ALLOC_STRICT)) {
1225 rcount = 0;
1226 goto out;
1227 }
1228
1229 if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1230 /* not enough free irq slots available */
1231 rcount = 0;
1232 goto out;
1233 }
1234
1235 major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1236 for (i = 0; i < rcount; i++) {
1237 uchar_t vector, irqno;
1238 apic_irq_t *irqptr;
1239
1240 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1241 (uchar_t)-1) {
1242 /*
1243 * shouldn't happen because of the
1244 * apic_check_free_irqs() check earlier
1245 */
1246 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1247 "apic_allocate_irq failed\n"));
1248 rcount = i;
1249 goto out;
1250 }
1251 if ((vector = apic_allocate_vector(pri, irqno, 1)) == 0) {
1252 /*
1253 * shouldn't happen because of the
1254 * apic_navail_vector() call earlier
1255 */
1256 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1257 "apic_allocate_vector failed\n"));
1258 rcount = i;
1259 goto out;
1260 }
1261 apic_max_device_irq = max(irqno, apic_max_device_irq);
1262 apic_min_device_irq = min(irqno, apic_min_device_irq);
1263 irqptr = apic_irq_table[irqno];
1264 irqptr->airq_vector = (uchar_t)vector;
1265 irqptr->airq_ipl = pri;
1266 irqptr->airq_origirq = (uchar_t)(inum + i);
1267 irqptr->airq_share_id = 0;
1268 irqptr->airq_mps_intr_index = MSIX_INDEX;
1269 irqptr->airq_dip = dip;
1270 irqptr->airq_major = major;
1271 irqptr->airq_cpu = apic_bind_intr(dip, irqno, 0xff, 0xff);
1272 }
1273 out:
1274 mutex_exit(&airq_mutex);
1275 return (rcount);
1276 }
1277
1278 /*
1279 * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1280 * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1281 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1282 * requests and allocated only when pri is set.
1283 */
1284 uchar_t
1285 apic_allocate_vector(int ipl, int irq, int pri)
1286 {
1287 int lowest, highest, i;
1288
1289 highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK;
1290 lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL;
1291
1292 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
1293 lowest -= APIC_VECTOR_PER_IPL;
1294
1295 #ifdef DEBUG
1296 if (apic_restrict_vector) /* for testing shared interrupt logic */
1297 highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS;
1298 #endif /* DEBUG */
1299 if (pri == 0)
1300 highest -= APIC_HI_PRI_VECTS;
1301
1302 for (i = lowest; i <= highest; i++) {
1303 if (APIC_CHECK_RESERVE_VECTORS(i))
1304 continue;
1305 if (apic_vector_to_irq[i] == APIC_RESV_IRQ) {
1306 apic_vector_to_irq[i] = (uchar_t)irq;
1307 return (i);
1308 }
1309 }
1310
1311 return (0);
1312 }
1313
1314 /* Mark vector as not being used by any irq */
1315 void
1316 apic_free_vector(uchar_t vector)
1317 {
1318 apic_vector_to_irq[vector] = APIC_RESV_IRQ;
1319 }
1320
1321 /*
1322 * Call rebind to do the actual programming.
1323 * Must be called with interrupts disabled and apic_ioapic_lock held
1324 * 'p' is polymorphic -- if this function is called to process a deferred
1325 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1326 * the irq pointer is retrieved. If not doing deferred reprogramming,
1327 * p is of the type 'apic_irq_t *'.
1328 *
1329 * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1330 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1331 * taken offline after a cpu is selected, but before apic_rebind is called to
1332 * bind interrupts to it.
1333 */
1334 int
1335 apic_setup_io_intr(void *p, int irq, boolean_t deferred)
1336 {
1337 apic_irq_t *irqptr;
1338 struct ioapic_reprogram_data *drep = NULL;
1339 int rv;
1340
1341 if (deferred) {
1342 drep = (struct ioapic_reprogram_data *)p;
1343 ASSERT(drep != NULL);
1344 irqptr = drep->irqp;
1345 } else
1346 irqptr = (apic_irq_t *)p;
1347
1348 ASSERT(irqptr != NULL);
1349
1350 rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, drep);
1351 if (rv) {
1352 /*
1353 * CPU is not up or interrupts are disabled. Fall back to
1354 * the first available CPU
1355 */
1356 rv = apic_rebind(irqptr, apic_find_cpu(APIC_CPU_INTR_ENABLE),
1357 drep);
1358 }
1359
1360 return (rv);
1361 }
1362
1363
1364 uchar_t
1365 apic_modify_vector(uchar_t vector, int irq)
1366 {
1367 apic_vector_to_irq[vector] = (uchar_t)irq;
1368 return (vector);
1369 }
1370
1371 char *
1372 apic_get_apic_type(void)
1373 {
1374 return (apic_psm_info.p_mach_idstring);
1375 }
1376
1377 void
1378 x2apic_update_psm(void)
1379 {
1380 struct psm_ops *pops = &apic_ops;
1381
1382 ASSERT(pops != NULL);
1383
1384 pops->psm_intr_exit = x2apic_intr_exit;
1385 pops->psm_setspl = x2apic_setspl;
1386
1387 pops->psm_send_ipi = x2apic_send_ipi;
1388 send_dirintf = pops->psm_send_ipi;
1389
1390 apic_mode = LOCAL_X2APIC;
1391 apic_change_ops();
1392 }