1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2010, Intel Corporation. 27 * All rights reserved. 28 */ 29 /* 30 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 31 */ 32 33 /* 34 * To understand how the pcplusmp module interacts with the interrupt subsystem 35 * read the theory statement in uts/i86pc/os/intr.c. 36 */ 37 38 /* 39 * PSMI 1.1 extensions are supported only in 2.6 and later versions. 40 * PSMI 1.2 extensions are supported only in 2.7 and later versions. 41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10. 42 * PSMI 1.5 extensions are supported in Solaris Nevada. 43 * PSMI 1.6 extensions are supported in Solaris Nevada. 44 * PSMI 1.7 extensions are supported in Solaris Nevada. 45 */ 46 #define PSMI_1_7 47 48 #include <sys/processor.h> 49 #include <sys/time.h> 50 #include <sys/psm.h> 51 #include <sys/smp_impldefs.h> 52 #include <sys/cram.h> 53 #include <sys/acpi/acpi.h> 54 #include <sys/acpica.h> 55 #include <sys/psm_common.h> 56 #include <sys/apic.h> 57 #include <sys/pit.h> 58 #include <sys/ddi.h> 59 #include <sys/sunddi.h> 60 #include <sys/ddi_impldefs.h> 61 #include <sys/pci.h> 62 #include <sys/promif.h> 63 #include <sys/x86_archext.h> 64 #include <sys/cpc_impl.h> 65 #include <sys/uadmin.h> 66 #include <sys/panic.h> 67 #include <sys/debug.h> 68 #include <sys/archsystm.h> 69 #include <sys/trap.h> 70 #include <sys/machsystm.h> 71 #include <sys/sysmacros.h> 72 #include <sys/cpuvar.h> 73 #include <sys/rm_platter.h> 74 #include <sys/privregs.h> 75 #include <sys/note.h> 76 #include <sys/pci_intr_lib.h> 77 #include <sys/spl.h> 78 #include <sys/clock.h> 79 #include <sys/cyclic.h> 80 #include <sys/dditypes.h> 81 #include <sys/sunddi.h> 82 #include <sys/x_call.h> 83 #include <sys/reboot.h> 84 #include <sys/hpet.h> 85 #include <sys/apic_common.h> 86 #include <sys/apic_timer.h> 87 88 /* 89 * Local Function Prototypes 90 */ 91 static void apic_init_intr(void); 92 93 /* 94 * standard MP entries 95 */ 96 static int apic_probe(void); 97 static int apic_getclkirq(int ipl); 98 static void apic_init(void); 99 static void apic_picinit(void); 100 static int apic_post_cpu_start(void); 101 static int apic_intr_enter(int ipl, int *vect); 102 static void apic_setspl(int ipl); 103 static void x2apic_setspl(int ipl); 104 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl); 105 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl); 106 static int apic_disable_intr(processorid_t cpun); 107 static void apic_enable_intr(processorid_t cpun); 108 static int apic_get_ipivect(int ipl, int type); 109 static void apic_post_cyclic_setup(void *arg); 110 111 /* 112 * The following vector assignments influence the value of ipltopri and 113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program 114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case 115 * we care to do so in future. Note some IPLs which are rarely used 116 * will share the vector ranges and heavily used IPLs (5 and 6) have 117 * a wide range. 118 * 119 * This array is used to initialize apic_ipls[] (in apic_init()). 120 * 121 * IPL Vector range. as passed to intr_enter 122 * 0 none. 123 * 1,2,3 0x20-0x2f 0x0-0xf 124 * 4 0x30-0x3f 0x10-0x1f 125 * 5 0x40-0x5f 0x20-0x3f 126 * 6 0x60-0x7f 0x40-0x5f 127 * 7,8,9 0x80-0x8f 0x60-0x6f 128 * 10 0x90-0x9f 0x70-0x7f 129 * 11 0xa0-0xaf 0x80-0x8f 130 * ... ... 131 * 15 0xe0-0xef 0xc0-0xcf 132 * 15 0xf0-0xff 0xd0-0xdf 133 */ 134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = { 135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15 136 }; 137 /* 138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4] 139 * NOTE that this is vector as passed into intr_enter which is 140 * programmed vector - 0x20 (APIC_BASE_VECT) 141 */ 142 143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */ 144 /* The taskpri to be programmed into apic to mask given ipl */ 145 146 /* 147 * Correlation of the hardware vector to the IPL in use, initialized 148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate 149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines 150 * connected to errata-stricken IOAPICs 151 */ 152 uchar_t apic_ipls[APIC_AVAIL_VECTOR]; 153 154 /* 155 * Patchable global variables. 156 */ 157 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */ 158 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */ 159 160 /* 161 * Local static data 162 */ 163 static struct psm_ops apic_ops = { 164 apic_probe, 165 166 apic_init, 167 apic_picinit, 168 apic_intr_enter, 169 apic_intr_exit, 170 apic_setspl, 171 apic_addspl, 172 apic_delspl, 173 apic_disable_intr, 174 apic_enable_intr, 175 (int (*)(int))NULL, /* psm_softlvl_to_irq */ 176 (void (*)(int))NULL, /* psm_set_softintr */ 177 178 apic_set_idlecpu, 179 apic_unset_idlecpu, 180 181 apic_clkinit, 182 apic_getclkirq, 183 (void (*)(void))NULL, /* psm_hrtimeinit */ 184 apic_gethrtime, 185 186 apic_get_next_processorid, 187 apic_cpu_start, 188 apic_post_cpu_start, 189 apic_shutdown, 190 apic_get_ipivect, 191 apic_send_ipi, 192 193 (int (*)(dev_info_t *, int))NULL, /* psm_translate_irq */ 194 (void (*)(int, char *))NULL, /* psm_notify_error */ 195 (void (*)(int))NULL, /* psm_notify_func */ 196 apic_timer_reprogram, 197 apic_timer_enable, 198 apic_timer_disable, 199 apic_post_cyclic_setup, 200 apic_preshutdown, 201 apic_intr_ops, /* Advanced DDI Interrupt framework */ 202 apic_state, /* save, restore apic state for S3 */ 203 apic_cpu_ops, /* CPU control interface. */ 204 }; 205 206 struct psm_ops *psmops = &apic_ops; 207 208 static struct psm_info apic_psm_info = { 209 PSM_INFO_VER01_7, /* version */ 210 PSM_OWN_EXCLUSIVE, /* ownership */ 211 (struct psm_ops *)&apic_ops, /* operation */ 212 APIC_PCPLUSMP_NAME, /* machine name */ 213 "pcplusmp v1.4 compatible", 214 }; 215 216 static void *apic_hdlp; 217 218 /* 219 * apic_let_idle_redistribute can have the following values: 220 * 0 - If clock decremented it from 1 to 0, clock has to call redistribute. 221 * apic_redistribute_lock prevents multiple idle cpus from redistributing 222 */ 223 int apic_num_idle_redistributions = 0; 224 static int apic_let_idle_redistribute = 0; 225 226 /* to gather intr data and redistribute */ 227 static void apic_redistribute_compute(void); 228 229 /* 230 * This is the loadable module wrapper 231 */ 232 233 int 234 _init(void) 235 { 236 if (apic_coarse_hrtime) 237 apic_ops.psm_gethrtime = &apic_gettime; 238 return (psm_mod_init(&apic_hdlp, &apic_psm_info)); 239 } 240 241 int 242 _fini(void) 243 { 244 return (psm_mod_fini(&apic_hdlp, &apic_psm_info)); 245 } 246 247 int 248 _info(struct modinfo *modinfop) 249 { 250 return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop)); 251 } 252 253 static int 254 apic_probe(void) 255 { 256 /* check if apix is initialized */ 257 if (apix_enable && apix_loaded()) 258 return (PSM_FAILURE); 259 else 260 apix_enable = 0; /* continue using pcplusmp PSM */ 261 262 return (apic_probe_common(apic_psm_info.p_mach_idstring)); 263 } 264 265 static uchar_t 266 apic_xlate_vector_by_irq(uchar_t irq) 267 { 268 if (apic_irq_table[irq] == NULL) 269 return (0); 270 271 return (apic_irq_table[irq]->airq_vector); 272 } 273 274 void 275 apic_init(void) 276 { 277 int i; 278 int j = 1; 279 280 psm_get_ioapicid = apic_get_ioapicid; 281 psm_get_localapicid = apic_get_localapicid; 282 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq; 283 284 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */ 285 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) { 286 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) && 287 (apic_vectortoipl[i + 1] == apic_vectortoipl[i])) 288 /* get to highest vector at the same ipl */ 289 continue; 290 for (; j <= apic_vectortoipl[i]; j++) { 291 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + 292 APIC_BASE_VECT; 293 } 294 } 295 for (; j < MAXIPL + 1; j++) 296 /* fill up any empty ipltopri slots */ 297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT; 298 apic_init_common(); 299 #ifndef __amd64 300 if (cpuid_have_cr8access(CPU)) 301 apic_have_32bit_cr8 = 1; 302 #endif /* !__amd64 */ 303 } 304 305 static void 306 apic_init_intr(void) 307 { 308 processorid_t cpun = psm_get_cpu_id(); 309 uint_t nlvt; 310 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR; 311 312 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL); 313 314 if (apic_mode == LOCAL_APIC) { 315 /* 316 * We are running APIC in MMIO mode. 317 */ 318 if (apic_flat_model) { 319 apic_reg_ops->apic_write(APIC_FORMAT_REG, 320 APIC_FLAT_MODEL); 321 } else { 322 apic_reg_ops->apic_write(APIC_FORMAT_REG, 323 APIC_CLUSTER_MODEL); 324 } 325 326 apic_reg_ops->apic_write(APIC_DEST_REG, 327 AV_HIGH_ORDER >> cpun); 328 } 329 330 if (apic_directed_EOI_supported()) { 331 /* 332 * Setting the 12th bit in the Spurious Interrupt Vector 333 * Register suppresses broadcast EOIs generated by the local 334 * APIC. The suppression of broadcast EOIs happens only when 335 * interrupts are level-triggered. 336 */ 337 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI; 338 } 339 340 /* need to enable APIC before unmasking NMI */ 341 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr); 342 343 /* 344 * Presence of an invalid vector with delivery mode AV_FIXED can 345 * cause an error interrupt, even if the entry is masked...so 346 * write a valid vector to LVT entries along with the mask bit 347 */ 348 349 /* All APICs have timer and LINT0/1 */ 350 apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ); 351 apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ); 352 apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI); /* enable NMI */ 353 354 /* 355 * On integrated APICs, the number of LVT entries is 356 * 'Max LVT entry' + 1; on 82489DX's (non-integrated 357 * APICs), nlvt is "3" (LINT0, LINT1, and timer) 358 */ 359 360 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) { 361 nlvt = 3; 362 } else { 363 nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) & 364 0xFF) + 1; 365 } 366 367 if (nlvt >= 5) { 368 /* Enable performance counter overflow interrupt */ 369 370 if (!is_x86_feature(x86_featureset, X86FSET_MSR)) 371 apic_enable_cpcovf_intr = 0; 372 if (apic_enable_cpcovf_intr) { 373 if (apic_cpcovf_vect == 0) { 374 int ipl = APIC_PCINT_IPL; 375 int irq = apic_get_ipivect(ipl, -1); 376 377 ASSERT(irq != -1); 378 apic_cpcovf_vect = 379 apic_irq_table[irq]->airq_vector; 380 ASSERT(apic_cpcovf_vect); 381 (void) add_avintr(NULL, ipl, 382 (avfunc)kcpc_hw_overflow_intr, 383 "apic pcint", irq, NULL, NULL, NULL, NULL); 384 kcpc_hw_overflow_intr_installed = 1; 385 kcpc_hw_enable_cpc_intr = 386 apic_cpcovf_mask_clear; 387 } 388 apic_reg_ops->apic_write(APIC_PCINT_VECT, 389 apic_cpcovf_vect); 390 } 391 } 392 393 if (nlvt >= 6) { 394 /* Only mask TM intr if the BIOS apparently doesn't use it */ 395 396 uint32_t lvtval; 397 398 lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT); 399 if (((lvtval & AV_MASK) == AV_MASK) || 400 ((lvtval & AV_DELIV_MODE) != AV_SMI)) { 401 apic_reg_ops->apic_write(APIC_THERM_VECT, 402 AV_MASK|APIC_RESV_IRQ); 403 } 404 } 405 406 /* Enable error interrupt */ 407 408 if (nlvt >= 4 && apic_enable_error_intr) { 409 if (apic_errvect == 0) { 410 int ipl = 0xf; /* get highest priority intr */ 411 int irq = apic_get_ipivect(ipl, -1); 412 413 ASSERT(irq != -1); 414 apic_errvect = apic_irq_table[irq]->airq_vector; 415 ASSERT(apic_errvect); 416 /* 417 * Not PSMI compliant, but we are going to merge 418 * with ON anyway 419 */ 420 (void) add_avintr((void *)NULL, ipl, 421 (avfunc)apic_error_intr, "apic error intr", 422 irq, NULL, NULL, NULL, NULL); 423 } 424 apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect); 425 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0); 426 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0); 427 } 428 429 /* Enable CMCI interrupt */ 430 if (cmi_enable_cmci) { 431 432 mutex_enter(&cmci_cpu_setup_lock); 433 if (cmci_cpu_setup_registered == 0) { 434 mutex_enter(&cpu_lock); 435 register_cpu_setup_func(cmci_cpu_setup, NULL); 436 mutex_exit(&cpu_lock); 437 cmci_cpu_setup_registered = 1; 438 } 439 mutex_exit(&cmci_cpu_setup_lock); 440 441 if (apic_cmci_vect == 0) { 442 int ipl = 0x2; 443 int irq = apic_get_ipivect(ipl, -1); 444 445 ASSERT(irq != -1); 446 apic_cmci_vect = apic_irq_table[irq]->airq_vector; 447 ASSERT(apic_cmci_vect); 448 449 (void) add_avintr(NULL, ipl, 450 (avfunc)cmi_cmci_trap, 451 "apic cmci intr", irq, NULL, NULL, NULL, NULL); 452 } 453 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect); 454 } 455 } 456 457 static void 458 apic_picinit(void) 459 { 460 int i, j; 461 uint_t isr; 462 463 /* 464 * Initialize and enable interrupt remapping before apic 465 * hardware initialization 466 */ 467 apic_intrmap_init(apic_mode); 468 469 /* 470 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr 471 * bit on without clearing it with EOI. Since softint 472 * uses vector 0x20 to interrupt itself, so softint will 473 * not work on this machine. In order to fix this problem 474 * a check is made to verify all the isr bits are clear. 475 * If not, EOIs are issued to clear the bits. 476 */ 477 for (i = 7; i >= 1; i--) { 478 isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4)); 479 if (isr != 0) 480 for (j = 0; ((j < 32) && (isr != 0)); j++) 481 if (isr & (1 << j)) { 482 apic_reg_ops->apic_write( 483 APIC_EOI_REG, 0); 484 isr &= ~(1 << j); 485 apic_error |= APIC_ERR_BOOT_EOI; 486 } 487 } 488 489 /* set a flag so we know we have run apic_picinit() */ 490 apic_picinit_called = 1; 491 LOCK_INIT_CLEAR(&apic_gethrtime_lock); 492 LOCK_INIT_CLEAR(&apic_ioapic_lock); 493 LOCK_INIT_CLEAR(&apic_error_lock); 494 LOCK_INIT_CLEAR(&apic_mode_switch_lock); 495 496 picsetup(); /* initialise the 8259 */ 497 498 /* add nmi handler - least priority nmi handler */ 499 LOCK_INIT_CLEAR(&apic_nmi_lock); 500 501 if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr, 502 "pcplusmp NMI handler", (caddr_t)NULL)) 503 cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler"); 504 505 /* 506 * Check for directed-EOI capability in the local APIC. 507 */ 508 if (apic_directed_EOI_supported() == 1) { 509 apic_set_directed_EOI_handler(); 510 } 511 512 apic_init_intr(); 513 514 /* enable apic mode if imcr present */ 515 if (apic_imcrp) { 516 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 517 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC); 518 } 519 520 ioapic_init_intr(IOAPIC_MASK); 521 } 522 523 #ifdef DEBUG 524 void 525 apic_break(void) 526 { 527 } 528 #endif /* DEBUG */ 529 530 /* 531 * platform_intr_enter 532 * 533 * Called at the beginning of the interrupt service routine to 534 * mask all level equal to and below the interrupt priority 535 * of the interrupting vector. An EOI should be given to 536 * the interrupt controller to enable other HW interrupts. 537 * 538 * Return -1 for spurious interrupts 539 * 540 */ 541 /*ARGSUSED*/ 542 static int 543 apic_intr_enter(int ipl, int *vectorp) 544 { 545 uchar_t vector; 546 int nipl; 547 int irq; 548 ulong_t iflag; 549 apic_cpus_info_t *cpu_infop; 550 551 /* 552 * The real vector delivered is (*vectorp + 0x20), but our caller 553 * subtracts 0x20 from the vector before passing it to us. 554 * (That's why APIC_BASE_VECT is 0x20.) 555 */ 556 vector = (uchar_t)*vectorp; 557 558 /* if interrupted by the clock, increment apic_nsec_since_boot */ 559 if (vector == apic_clkvect) { 560 if (!apic_oneshot) { 561 /* NOTE: this is not MT aware */ 562 apic_hrtime_stamp++; 563 apic_nsec_since_boot += apic_nsec_per_intr; 564 apic_hrtime_stamp++; 565 last_count_read = apic_hertz_count; 566 apic_redistribute_compute(); 567 } 568 569 /* We will avoid all the book keeping overhead for clock */ 570 nipl = apic_ipls[vector]; 571 572 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT]; 573 574 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]); 575 apic_reg_ops->apic_send_eoi(0); 576 577 return (nipl); 578 } 579 580 cpu_infop = &apic_cpus[psm_get_cpu_id()]; 581 582 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) { 583 cpu_infop->aci_spur_cnt++; 584 return (APIC_INT_SPURIOUS); 585 } 586 587 /* Check if the vector we got is really what we need */ 588 if (apic_revector_pending) { 589 /* 590 * Disable interrupts for the duration of 591 * the vector translation to prevent a self-race for 592 * the apic_revector_lock. This cannot be done 593 * in apic_xlate_vector because it is recursive and 594 * we want the vector translation to be atomic with 595 * respect to other (higher-priority) interrupts. 596 */ 597 iflag = intr_clear(); 598 vector = apic_xlate_vector(vector + APIC_BASE_VECT) - 599 APIC_BASE_VECT; 600 intr_restore(iflag); 601 } 602 603 nipl = apic_ipls[vector]; 604 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT]; 605 606 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]); 607 608 cpu_infop->aci_current[nipl] = (uchar_t)irq; 609 cpu_infop->aci_curipl = (uchar_t)nipl; 610 cpu_infop->aci_ISR_in_progress |= 1 << nipl; 611 612 /* 613 * apic_level_intr could have been assimilated into the irq struct. 614 * but, having it as a character array is more efficient in terms of 615 * cache usage. So, we leave it as is. 616 */ 617 if (!apic_level_intr[irq]) { 618 apic_reg_ops->apic_send_eoi(0); 619 } 620 621 #ifdef DEBUG 622 APIC_DEBUG_BUF_PUT(vector); 623 APIC_DEBUG_BUF_PUT(irq); 624 APIC_DEBUG_BUF_PUT(nipl); 625 APIC_DEBUG_BUF_PUT(psm_get_cpu_id()); 626 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl))) 627 drv_usecwait(apic_stretch_interrupts); 628 629 if (apic_break_on_cpu == psm_get_cpu_id()) 630 apic_break(); 631 #endif /* DEBUG */ 632 return (nipl); 633 } 634 635 /* 636 * This macro is a common code used by MMIO local apic and X2APIC 637 * local apic. 638 */ 639 #define APIC_INTR_EXIT() \ 640 { \ 641 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \ 642 if (apic_level_intr[irq]) \ 643 apic_reg_ops->apic_send_eoi(irq); \ 644 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \ 645 /* ISR above current pri could not be in progress */ \ 646 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \ 647 } 648 649 /* 650 * Any changes made to this function must also change X2APIC 651 * version of intr_exit. 652 */ 653 void 654 apic_intr_exit(int prev_ipl, int irq) 655 { 656 apic_cpus_info_t *cpu_infop; 657 658 local_apic_write_task_reg(apic_ipltopri[prev_ipl]); 659 660 APIC_INTR_EXIT(); 661 } 662 663 /* 664 * Same as apic_intr_exit() except it uses MSR rather than MMIO 665 * to access local apic registers. 666 */ 667 void 668 x2apic_intr_exit(int prev_ipl, int irq) 669 { 670 apic_cpus_info_t *cpu_infop; 671 672 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]); 673 APIC_INTR_EXIT(); 674 } 675 676 intr_exit_fn_t 677 psm_intr_exit_fn(void) 678 { 679 if (apic_mode == LOCAL_X2APIC) 680 return (x2apic_intr_exit); 681 682 return (apic_intr_exit); 683 } 684 685 /* 686 * Mask all interrupts below or equal to the given IPL. 687 * Any changes made to this function must also change X2APIC 688 * version of setspl. 689 */ 690 static void 691 apic_setspl(int ipl) 692 { 693 local_apic_write_task_reg(apic_ipltopri[ipl]); 694 695 /* interrupts at ipl above this cannot be in progress */ 696 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1; 697 /* 698 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts 699 * have enough time to come in before the priority is raised again 700 * during the idle() loop. 701 */ 702 if (apic_setspl_delay) 703 (void) apic_reg_ops->apic_get_pri(); 704 } 705 706 /* 707 * X2APIC version of setspl. 708 * Mask all interrupts below or equal to the given IPL 709 */ 710 static void 711 x2apic_setspl(int ipl) 712 { 713 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]); 714 715 /* interrupts at ipl above this cannot be in progress */ 716 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1; 717 } 718 719 /*ARGSUSED*/ 720 static int 721 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl) 722 { 723 return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl)); 724 } 725 726 static int 727 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl) 728 { 729 return (apic_delspl_common(irqno, ipl, min_ipl, max_ipl)); 730 } 731 732 static int 733 apic_post_cpu_start(void) 734 { 735 int cpun; 736 static int cpus_started = 1; 737 738 /* We know this CPU + BSP started successfully. */ 739 cpus_started++; 740 741 /* 742 * On BSP we would have enabled X2APIC, if supported by processor, 743 * in acpi_probe(), but on AP we do it here. 744 * 745 * We enable X2APIC mode only if BSP is running in X2APIC & the 746 * local APIC mode of the current CPU is MMIO (xAPIC). 747 */ 748 if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() && 749 apic_local_mode() == LOCAL_APIC) { 750 apic_enable_x2apic(); 751 } 752 753 /* 754 * Switch back to x2apic IPI sending method for performance when target 755 * CPU has entered x2apic mode. 756 */ 757 if (apic_mode == LOCAL_X2APIC) { 758 apic_switch_ipi_callback(B_FALSE); 759 } 760 761 splx(ipltospl(LOCK_LEVEL)); 762 apic_init_intr(); 763 764 /* 765 * since some systems don't enable the internal cache on the non-boot 766 * cpus, so we have to enable them here 767 */ 768 setcr0(getcr0() & ~(CR0_CD | CR0_NW)); 769 770 #ifdef DEBUG 771 APIC_AV_PENDING_SET(); 772 #else 773 if (apic_mode == LOCAL_APIC) 774 APIC_AV_PENDING_SET(); 775 #endif /* DEBUG */ 776 777 /* 778 * We may be booting, or resuming from suspend; aci_status will 779 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the 780 * APIC_CPU_ONLINE flag here rather than setting aci_status completely. 781 */ 782 cpun = psm_get_cpu_id(); 783 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE; 784 785 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init); 786 return (PSM_SUCCESS); 787 } 788 789 /* 790 * type == -1 indicates it is an internal request. Do not change 791 * resv_vector for these requests 792 */ 793 static int 794 apic_get_ipivect(int ipl, int type) 795 { 796 uchar_t vector; 797 int irq; 798 799 if ((irq = apic_allocate_irq(APIC_VECTOR(ipl))) != -1) { 800 if (vector = apic_allocate_vector(ipl, irq, 1)) { 801 apic_irq_table[irq]->airq_mps_intr_index = 802 RESERVE_INDEX; 803 apic_irq_table[irq]->airq_vector = vector; 804 if (type != -1) { 805 apic_resv_vector[ipl] = vector; 806 } 807 return (irq); 808 } 809 } 810 apic_error |= APIC_ERR_GET_IPIVECT_FAIL; 811 return (-1); /* shouldn't happen */ 812 } 813 814 static int 815 apic_getclkirq(int ipl) 816 { 817 int irq; 818 819 if ((irq = apic_get_ipivect(ipl, -1)) == -1) 820 return (-1); 821 /* 822 * Note the vector in apic_clkvect for per clock handling. 823 */ 824 apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT; 825 APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n", 826 apic_clkvect)); 827 return (irq); 828 } 829 830 /* 831 * Try and disable all interrupts. We just assign interrupts to other 832 * processors based on policy. If any were bound by user request, we 833 * let them continue and return failure. We do not bother to check 834 * for cache affinity while rebinding. 835 */ 836 837 static int 838 apic_disable_intr(processorid_t cpun) 839 { 840 int bind_cpu = 0, i, hardbound = 0; 841 apic_irq_t *irq_ptr; 842 ulong_t iflag; 843 844 iflag = intr_clear(); 845 lock_set(&apic_ioapic_lock); 846 847 for (i = 0; i <= APIC_MAX_VECTOR; i++) { 848 if (apic_reprogram_info[i].done == B_FALSE) { 849 if (apic_reprogram_info[i].bindcpu == cpun) { 850 /* 851 * CPU is busy -- it's the target of 852 * a pending reprogramming attempt 853 */ 854 lock_clear(&apic_ioapic_lock); 855 intr_restore(iflag); 856 return (PSM_FAILURE); 857 } 858 } 859 } 860 861 apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE; 862 863 apic_cpus[cpun].aci_curipl = 0; 864 865 i = apic_min_device_irq; 866 for (; i <= apic_max_device_irq; i++) { 867 /* 868 * If there are bound interrupts on this cpu, then 869 * rebind them to other processors. 870 */ 871 if ((irq_ptr = apic_irq_table[i]) != NULL) { 872 ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) || 873 (irq_ptr->airq_temp_cpu == IRQ_UNINIT) || 874 (apic_cpu_in_range(irq_ptr->airq_temp_cpu))); 875 876 if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) { 877 hardbound = 1; 878 continue; 879 } 880 881 if (irq_ptr->airq_temp_cpu == cpun) { 882 do { 883 bind_cpu = 884 apic_find_cpu(APIC_CPU_INTR_ENABLE); 885 } while (apic_rebind_all(irq_ptr, bind_cpu)); 886 } 887 } 888 } 889 890 lock_clear(&apic_ioapic_lock); 891 intr_restore(iflag); 892 893 if (hardbound) { 894 cmn_err(CE_WARN, "Could not disable interrupts on %d" 895 "due to user bound interrupts", cpun); 896 return (PSM_FAILURE); 897 } 898 else 899 return (PSM_SUCCESS); 900 } 901 902 /* 903 * Bind interrupts to the CPU's local APIC. 904 * Interrupts should not be bound to a CPU's local APIC until the CPU 905 * is ready to receive interrupts. 906 */ 907 static void 908 apic_enable_intr(processorid_t cpun) 909 { 910 int i; 911 apic_irq_t *irq_ptr; 912 ulong_t iflag; 913 914 iflag = intr_clear(); 915 lock_set(&apic_ioapic_lock); 916 917 apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE; 918 919 i = apic_min_device_irq; 920 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 921 if ((irq_ptr = apic_irq_table[i]) != NULL) { 922 if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) { 923 (void) apic_rebind_all(irq_ptr, 924 irq_ptr->airq_cpu); 925 } 926 } 927 } 928 929 if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND) 930 apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND; 931 932 lock_clear(&apic_ioapic_lock); 933 intr_restore(iflag); 934 } 935 936 /* 937 * If this module needs a periodic handler for the interrupt distribution, it 938 * can be added here. The argument to the periodic handler is not currently 939 * used, but is reserved for future. 940 */ 941 static void 942 apic_post_cyclic_setup(void *arg) 943 { 944 _NOTE(ARGUNUSED(arg)) 945 946 cyc_handler_t cyh; 947 cyc_time_t cyt; 948 949 /* cpu_lock is held */ 950 /* set up a periodic handler for intr redistribution */ 951 952 /* 953 * In peridoc mode intr redistribution processing is done in 954 * apic_intr_enter during clk intr processing 955 */ 956 if (!apic_oneshot) 957 return; 958 959 /* 960 * Register a periodical handler for the redistribution processing. 961 * Though we would generally prefer to use the DDI interface for 962 * periodic handler invocation, ddi_periodic_add(9F), we are 963 * unfortunately already holding cpu_lock, which ddi_periodic_add will 964 * attempt to take for us. Thus, we add our own cyclic directly: 965 */ 966 cyh.cyh_func = (void (*)(void *))apic_redistribute_compute; 967 cyh.cyh_arg = NULL; 968 cyh.cyh_level = CY_LOW_LEVEL; 969 970 cyt.cyt_when = 0; 971 cyt.cyt_interval = apic_redistribute_sample_interval; 972 973 apic_cyclic_id = cyclic_add(&cyh, &cyt); 974 } 975 976 static void 977 apic_redistribute_compute(void) 978 { 979 int i, j, max_busy; 980 981 if (apic_enable_dynamic_migration) { 982 if (++apic_nticks == apic_sample_factor_redistribution) { 983 /* 984 * Time to call apic_intr_redistribute(). 985 * reset apic_nticks. This will cause max_busy 986 * to be calculated below and if it is more than 987 * apic_int_busy, we will do the whole thing 988 */ 989 apic_nticks = 0; 990 } 991 max_busy = 0; 992 for (i = 0; i < apic_nproc; i++) { 993 if (!apic_cpu_in_range(i)) 994 continue; 995 996 /* 997 * Check if curipl is non zero & if ISR is in 998 * progress 999 */ 1000 if (((j = apic_cpus[i].aci_curipl) != 0) && 1001 (apic_cpus[i].aci_ISR_in_progress & (1 << j))) { 1002 1003 int irq; 1004 apic_cpus[i].aci_busy++; 1005 irq = apic_cpus[i].aci_current[j]; 1006 apic_irq_table[irq]->airq_busy++; 1007 } 1008 1009 if (!apic_nticks && 1010 (apic_cpus[i].aci_busy > max_busy)) 1011 max_busy = apic_cpus[i].aci_busy; 1012 } 1013 if (!apic_nticks) { 1014 if (max_busy > apic_int_busy_mark) { 1015 /* 1016 * We could make the following check be 1017 * skipped > 1 in which case, we get a 1018 * redistribution at half the busy mark (due to 1019 * double interval). Need to be able to collect 1020 * more empirical data to decide if that is a 1021 * good strategy. Punt for now. 1022 */ 1023 if (apic_skipped_redistribute) { 1024 apic_cleanup_busy(); 1025 apic_skipped_redistribute = 0; 1026 } else { 1027 apic_intr_redistribute(); 1028 } 1029 } else 1030 apic_skipped_redistribute++; 1031 } 1032 } 1033 } 1034 1035 1036 /* 1037 * The following functions are in the platform specific file so that they 1038 * can be different functions depending on whether we are running on 1039 * bare metal or a hypervisor. 1040 */ 1041 1042 /* 1043 * Check to make sure there are enough irq slots 1044 */ 1045 int 1046 apic_check_free_irqs(int count) 1047 { 1048 int i, avail; 1049 1050 avail = 0; 1051 for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) { 1052 if ((apic_irq_table[i] == NULL) || 1053 apic_irq_table[i]->airq_mps_intr_index == FREE_INDEX) { 1054 if (++avail >= count) 1055 return (PSM_SUCCESS); 1056 } 1057 } 1058 return (PSM_FAILURE); 1059 } 1060 1061 /* 1062 * This function allocates "count" MSI vector(s) for the given "dip/pri/type" 1063 */ 1064 int 1065 apic_alloc_msi_vectors(dev_info_t *dip, int inum, int count, int pri, 1066 int behavior) 1067 { 1068 int rcount, i; 1069 uchar_t start, irqno; 1070 uint32_t cpu; 1071 major_t major; 1072 apic_irq_t *irqptr; 1073 1074 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: dip=0x%p " 1075 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n", 1076 (void *)dip, inum, pri, count, behavior)); 1077 1078 if (count > 1) { 1079 if (behavior == DDI_INTR_ALLOC_STRICT && 1080 apic_multi_msi_enable == 0) 1081 return (0); 1082 if (apic_multi_msi_enable == 0) 1083 count = 1; 1084 } 1085 1086 if ((rcount = apic_navail_vector(dip, pri)) > count) 1087 rcount = count; 1088 else if (rcount == 0 || (rcount < count && 1089 behavior == DDI_INTR_ALLOC_STRICT)) 1090 return (0); 1091 1092 /* if not ISP2, then round it down */ 1093 if (!ISP2(rcount)) 1094 rcount = 1 << (highbit(rcount) - 1); 1095 1096 mutex_enter(&airq_mutex); 1097 1098 for (start = 0; rcount > 0; rcount >>= 1) { 1099 if ((start = apic_find_multi_vectors(pri, rcount)) != 0 || 1100 behavior == DDI_INTR_ALLOC_STRICT) 1101 break; 1102 } 1103 1104 if (start == 0) { 1105 /* no vector available */ 1106 mutex_exit(&airq_mutex); 1107 return (0); 1108 } 1109 1110 if (apic_check_free_irqs(rcount) == PSM_FAILURE) { 1111 /* not enough free irq slots available */ 1112 mutex_exit(&airq_mutex); 1113 return (0); 1114 } 1115 1116 major = (dip != NULL) ? ddi_driver_major(dip) : 0; 1117 for (i = 0; i < rcount; i++) { 1118 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 1119 (uchar_t)-1) { 1120 /* 1121 * shouldn't happen because of the 1122 * apic_check_free_irqs() check earlier 1123 */ 1124 mutex_exit(&airq_mutex); 1125 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: " 1126 "apic_allocate_irq failed\n")); 1127 return (i); 1128 } 1129 apic_max_device_irq = max(irqno, apic_max_device_irq); 1130 apic_min_device_irq = min(irqno, apic_min_device_irq); 1131 irqptr = apic_irq_table[irqno]; 1132 #ifdef DEBUG 1133 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ) 1134 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: " 1135 "apic_vector_to_irq is not APIC_RESV_IRQ\n")); 1136 #endif 1137 apic_vector_to_irq[start + i] = (uchar_t)irqno; 1138 1139 irqptr->airq_vector = (uchar_t)(start + i); 1140 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */ 1141 irqptr->airq_intin_no = (uchar_t)rcount; 1142 irqptr->airq_ipl = pri; 1143 irqptr->airq_vector = start + i; 1144 irqptr->airq_origirq = (uchar_t)(inum + i); 1145 irqptr->airq_share_id = 0; 1146 irqptr->airq_mps_intr_index = MSI_INDEX; 1147 irqptr->airq_dip = dip; 1148 irqptr->airq_major = major; 1149 if (i == 0) /* they all bound to the same cpu */ 1150 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno, 1151 0xff, 0xff); 1152 else 1153 irqptr->airq_cpu = cpu; 1154 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: irq=0x%x " 1155 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno, 1156 (void *)irqptr->airq_dip, irqptr->airq_vector, 1157 irqptr->airq_origirq, pri)); 1158 } 1159 mutex_exit(&airq_mutex); 1160 return (rcount); 1161 } 1162 1163 /* 1164 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type" 1165 */ 1166 int 1167 apic_alloc_msix_vectors(dev_info_t *dip, int inum, int count, int pri, 1168 int behavior) 1169 { 1170 int rcount, i; 1171 major_t major; 1172 1173 mutex_enter(&airq_mutex); 1174 1175 if ((rcount = apic_navail_vector(dip, pri)) > count) 1176 rcount = count; 1177 else if (rcount == 0 || (rcount < count && 1178 behavior == DDI_INTR_ALLOC_STRICT)) { 1179 rcount = 0; 1180 goto out; 1181 } 1182 1183 if (apic_check_free_irqs(rcount) == PSM_FAILURE) { 1184 /* not enough free irq slots available */ 1185 rcount = 0; 1186 goto out; 1187 } 1188 1189 major = (dip != NULL) ? ddi_driver_major(dip) : 0; 1190 for (i = 0; i < rcount; i++) { 1191 uchar_t vector, irqno; 1192 apic_irq_t *irqptr; 1193 1194 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 1195 (uchar_t)-1) { 1196 /* 1197 * shouldn't happen because of the 1198 * apic_check_free_irqs() check earlier 1199 */ 1200 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: " 1201 "apic_allocate_irq failed\n")); 1202 rcount = i; 1203 goto out; 1204 } 1205 if ((vector = apic_allocate_vector(pri, irqno, 1)) == 0) { 1206 /* 1207 * shouldn't happen because of the 1208 * apic_navail_vector() call earlier 1209 */ 1210 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: " 1211 "apic_allocate_vector failed\n")); 1212 rcount = i; 1213 goto out; 1214 } 1215 apic_max_device_irq = max(irqno, apic_max_device_irq); 1216 apic_min_device_irq = min(irqno, apic_min_device_irq); 1217 irqptr = apic_irq_table[irqno]; 1218 irqptr->airq_vector = (uchar_t)vector; 1219 irqptr->airq_ipl = pri; 1220 irqptr->airq_origirq = (uchar_t)(inum + i); 1221 irqptr->airq_share_id = 0; 1222 irqptr->airq_mps_intr_index = MSIX_INDEX; 1223 irqptr->airq_dip = dip; 1224 irqptr->airq_major = major; 1225 irqptr->airq_cpu = apic_bind_intr(dip, irqno, 0xff, 0xff); 1226 } 1227 out: 1228 mutex_exit(&airq_mutex); 1229 return (rcount); 1230 } 1231 1232 /* 1233 * Allocate a free vector for irq at ipl. Takes care of merging of multiple 1234 * IPLs into a single APIC level as well as stretching some IPLs onto multiple 1235 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority 1236 * requests and allocated only when pri is set. 1237 */ 1238 uchar_t 1239 apic_allocate_vector(int ipl, int irq, int pri) 1240 { 1241 int lowest, highest, i; 1242 1243 highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK; 1244 lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL; 1245 1246 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */ 1247 lowest -= APIC_VECTOR_PER_IPL; 1248 1249 #ifdef DEBUG 1250 if (apic_restrict_vector) /* for testing shared interrupt logic */ 1251 highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS; 1252 #endif /* DEBUG */ 1253 if (pri == 0) 1254 highest -= APIC_HI_PRI_VECTS; 1255 1256 for (i = lowest; i <= highest; i++) { 1257 if (APIC_CHECK_RESERVE_VECTORS(i)) 1258 continue; 1259 if (apic_vector_to_irq[i] == APIC_RESV_IRQ) { 1260 apic_vector_to_irq[i] = (uchar_t)irq; 1261 return (i); 1262 } 1263 } 1264 1265 return (0); 1266 } 1267 1268 /* Mark vector as not being used by any irq */ 1269 void 1270 apic_free_vector(uchar_t vector) 1271 { 1272 apic_vector_to_irq[vector] = APIC_RESV_IRQ; 1273 } 1274 1275 /* 1276 * Call rebind to do the actual programming. 1277 * Must be called with interrupts disabled and apic_ioapic_lock held 1278 * 'p' is polymorphic -- if this function is called to process a deferred 1279 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which 1280 * the irq pointer is retrieved. If not doing deferred reprogramming, 1281 * p is of the type 'apic_irq_t *'. 1282 * 1283 * apic_ioapic_lock must be held across this call, as it protects apic_rebind 1284 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be 1285 * taken offline after a cpu is selected, but before apic_rebind is called to 1286 * bind interrupts to it. 1287 */ 1288 int 1289 apic_setup_io_intr(void *p, int irq, boolean_t deferred) 1290 { 1291 apic_irq_t *irqptr; 1292 struct ioapic_reprogram_data *drep = NULL; 1293 int rv; 1294 1295 if (deferred) { 1296 drep = (struct ioapic_reprogram_data *)p; 1297 ASSERT(drep != NULL); 1298 irqptr = drep->irqp; 1299 } else 1300 irqptr = (apic_irq_t *)p; 1301 1302 ASSERT(irqptr != NULL); 1303 1304 rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, drep); 1305 if (rv) { 1306 /* 1307 * CPU is not up or interrupts are disabled. Fall back to 1308 * the first available CPU 1309 */ 1310 rv = apic_rebind(irqptr, apic_find_cpu(APIC_CPU_INTR_ENABLE), 1311 drep); 1312 } 1313 1314 return (rv); 1315 } 1316 1317 1318 uchar_t 1319 apic_modify_vector(uchar_t vector, int irq) 1320 { 1321 apic_vector_to_irq[vector] = (uchar_t)irq; 1322 return (vector); 1323 } 1324 1325 char * 1326 apic_get_apic_type(void) 1327 { 1328 return (apic_psm_info.p_mach_idstring); 1329 } 1330 1331 void 1332 x2apic_update_psm(void) 1333 { 1334 struct psm_ops *pops = &apic_ops; 1335 1336 ASSERT(pops != NULL); 1337 1338 pops->psm_intr_exit = x2apic_intr_exit; 1339 pops->psm_setspl = x2apic_setspl; 1340 1341 pops->psm_send_ipi = x2apic_send_ipi; 1342 send_dirintf = pops->psm_send_ipi; 1343 1344 apic_mode = LOCAL_X2APIC; 1345 apic_change_ops(); 1346 }