Print this page
patch apic-simplify
patch remove-apic_cr8pri
patch spacing-fix
patch apic-task-reg-write-dup

@@ -141,14 +141,10 @@
          */
 
 uchar_t apic_ipltopri[MAXIPL + 1];      /* unix ipl to apic pri */
         /* The taskpri to be programmed into apic to mask given ipl */
 
-#if defined(__amd64)
-uchar_t apic_cr8pri[MAXIPL + 1];        /* unix ipl to cr8 pri  */
-#endif
-
 /*
  * Correlation of the hardware vector to the IPL in use, initialized
  * from apic_vectortoipl[] in apic_init().  The final IPLs may not correlate
  * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
  * connected to errata-stricken IOAPICs

@@ -298,21 +294,14 @@
         }
         for (; j < MAXIPL + 1; j++)
                 /* fill up any empty ipltopri slots */
                 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
         apic_init_common();
-#if defined(__amd64)
-        /*
-         * Make cpu-specific interrupt info point to cr8pri vector
-         */
-        for (i = 0; i <= MAXIPL; i++)
-                apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT;
-        CPU->cpu_pri_data = apic_cr8pri;
-#else
+#ifndef __amd64
         if (cpuid_have_cr8access(CPU))
                 apic_have_32bit_cr8 = 1;
-#endif  /* __amd64 */
+#endif  /* !__amd64 */
 }
 
 static void
 apic_init_intr(void)
 {

@@ -579,27 +568,13 @@
 
                 /* We will avoid all the book keeping overhead for clock */
                 nipl = apic_ipls[vector];
 
                 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
-                if (apic_mode == LOCAL_APIC) {
-#if defined(__amd64)
-                        setcr8((ulong_t)(apic_ipltopri[nipl] >>
-                            APIC_IPL_SHIFT));
-#else
-                        if (apic_have_32bit_cr8)
-                                setcr8((ulong_t)(apic_ipltopri[nipl] >>
-                                    APIC_IPL_SHIFT));
-                        else
-                                LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
-                                    (uint32_t)apic_ipltopri[nipl]);
-#endif
-                        LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
-                } else {
-                        X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
-                        X2APIC_WRITE(APIC_EOI_REG, 0);
-                }
+
+                apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
+                apic_reg_ops->apic_send_eoi(0);
 
                 return (nipl);
         }
 
         cpu_infop = &apic_cpus[psm_get_cpu_id()];

@@ -626,24 +601,11 @@
         }
 
         nipl = apic_ipls[vector];
         *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 
-        if (apic_mode == LOCAL_APIC) {
-#if defined(__amd64)
-                setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
-#else
-                if (apic_have_32bit_cr8)
-                        setcr8((ulong_t)(apic_ipltopri[nipl] >>
-                            APIC_IPL_SHIFT));
-                else
-                        LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
-                            (uint32_t)apic_ipltopri[nipl]);
-#endif
-        } else {
-                X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
-        }
+        apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
 
         cpu_infop->aci_current[nipl] = (uchar_t)irq;
         cpu_infop->aci_curipl = (uchar_t)nipl;
         cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 

@@ -651,15 +613,11 @@
          * apic_level_intr could have been assimilated into the irq struct.
          * but, having it as a character array is more efficient in terms of
          * cache usage. So, we leave it as is.
          */
         if (!apic_level_intr[irq]) {
-                if (apic_mode == LOCAL_APIC) {
-                        LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
-                } else {
-                        X2APIC_WRITE(APIC_EOI_REG, 0);
-                }
+                apic_reg_ops->apic_send_eoi(0);
         }
 
 #ifdef  DEBUG
         APIC_DEBUG_BUF_PUT(vector);
         APIC_DEBUG_BUF_PUT(irq);

@@ -695,18 +653,11 @@
 void
 apic_intr_exit(int prev_ipl, int irq)
 {
         apic_cpus_info_t *cpu_infop;
 
-#if defined(__amd64)
-        setcr8((ulong_t)apic_cr8pri[prev_ipl]);
-#else
-        if (apic_have_32bit_cr8)
-                setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
-        else
-                apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
-#endif
+        local_apic_write_task_reg(apic_ipltopri[prev_ipl]);
 
         APIC_INTR_EXIT();
 }
 
 /*

@@ -737,18 +688,11 @@
  * version of setspl.
  */
 static void
 apic_setspl(int ipl)
 {
-#if defined(__amd64)
-        setcr8((ulong_t)apic_cr8pri[ipl]);
-#else
-        if (apic_have_32bit_cr8)
-                setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
-        else
-                apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
-#endif
+        local_apic_write_task_reg(apic_ipltopri[ipl]);
 
         /* interrupts at ipl above this cannot be in progress */
         apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
         /*
          * this is a patch fix for the ALR QSMP P5 machine, so that interrupts