127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 #if defined(__amd64)
147 uchar_t apic_cr8pri[MAXIPL + 1]; /* unix ipl to cr8 pri */
148 #endif
149
150 /*
151 * Correlation of the hardware vector to the IPL in use, initialized
152 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
153 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
154 * connected to errata-stricken IOAPICs
155 */
156 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
157
158 /*
159 * Patchable global variables.
160 */
161 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
162 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
163
164 /*
165 * Local static data
166 */
167 static struct psm_ops apic_ops = {
284 psm_get_ioapicid = apic_get_ioapicid;
285 psm_get_localapicid = apic_get_localapicid;
286 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
287
288 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
289 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
290 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
291 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
292 /* get to highest vector at the same ipl */
293 continue;
294 for (; j <= apic_vectortoipl[i]; j++) {
295 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
296 APIC_BASE_VECT;
297 }
298 }
299 for (; j < MAXIPL + 1; j++)
300 /* fill up any empty ipltopri slots */
301 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
302 apic_init_common();
303 #if defined(__amd64)
304 /*
305 * Make cpu-specific interrupt info point to cr8pri vector
306 */
307 for (i = 0; i <= MAXIPL; i++)
308 apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT;
309 CPU->cpu_pri_data = apic_cr8pri;
310 #else
311 if (cpuid_have_cr8access(CPU))
312 apic_have_32bit_cr8 = 1;
313 #endif /* __amd64 */
314 }
315
316 static void
317 apic_init_intr(void)
318 {
319 processorid_t cpun = psm_get_cpu_id();
320 uint_t nlvt;
321 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
322
323 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
324
325 if (apic_mode == LOCAL_APIC) {
326 /*
327 * We are running APIC in MMIO mode.
328 */
329 if (apic_flat_model) {
681 #define APIC_INTR_EXIT() \
682 { \
683 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
684 if (apic_level_intr[irq]) \
685 apic_reg_ops->apic_send_eoi(irq); \
686 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
687 /* ISR above current pri could not be in progress */ \
688 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
689 }
690
691 /*
692 * Any changes made to this function must also change X2APIC
693 * version of intr_exit.
694 */
695 void
696 apic_intr_exit(int prev_ipl, int irq)
697 {
698 apic_cpus_info_t *cpu_infop;
699
700 #if defined(__amd64)
701 setcr8((ulong_t)apic_cr8pri[prev_ipl]);
702 #else
703 if (apic_have_32bit_cr8)
704 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
705 else
706 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
707 #endif
708
709 APIC_INTR_EXIT();
710 }
711
712 /*
713 * Same as apic_intr_exit() except it uses MSR rather than MMIO
714 * to access local apic registers.
715 */
716 void
717 x2apic_intr_exit(int prev_ipl, int irq)
718 {
719 apic_cpus_info_t *cpu_infop;
720
721 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
723 }
724
725 intr_exit_fn_t
726 psm_intr_exit_fn(void)
727 {
728 if (apic_mode == LOCAL_X2APIC)
729 return (x2apic_intr_exit);
730
731 return (apic_intr_exit);
732 }
733
734 /*
735 * Mask all interrupts below or equal to the given IPL.
736 * Any changes made to this function must also change X2APIC
737 * version of setspl.
738 */
739 static void
740 apic_setspl(int ipl)
741 {
742 #if defined(__amd64)
743 setcr8((ulong_t)apic_cr8pri[ipl]);
744 #else
745 if (apic_have_32bit_cr8)
746 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
747 else
748 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
749 #endif
750
751 /* interrupts at ipl above this cannot be in progress */
752 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
753 /*
754 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
755 * have enough time to come in before the priority is raised again
756 * during the idle() loop.
757 */
758 if (apic_setspl_delay)
759 (void) apic_reg_ops->apic_get_pri();
760 }
761
762 /*
763 * X2APIC version of setspl.
|
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 #if defined(__amd64)
147 static unsigned char dummy_cpu_pri[MAXIPL + 1];
148 #endif
149
150 /*
151 * Correlation of the hardware vector to the IPL in use, initialized
152 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
153 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
154 * connected to errata-stricken IOAPICs
155 */
156 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
157
158 /*
159 * Patchable global variables.
160 */
161 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
162 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
163
164 /*
165 * Local static data
166 */
167 static struct psm_ops apic_ops = {
284 psm_get_ioapicid = apic_get_ioapicid;
285 psm_get_localapicid = apic_get_localapicid;
286 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
287
288 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
289 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
290 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
291 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
292 /* get to highest vector at the same ipl */
293 continue;
294 for (; j <= apic_vectortoipl[i]; j++) {
295 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
296 APIC_BASE_VECT;
297 }
298 }
299 for (; j < MAXIPL + 1; j++)
300 /* fill up any empty ipltopri slots */
301 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
302 apic_init_common();
303 #if defined(__amd64)
304 CPU->cpu_pri_data = dummy_cpu_pri;
305 #else
306 if (cpuid_have_cr8access(CPU))
307 apic_have_32bit_cr8 = 1;
308 #endif /* __amd64 */
309 }
310
311 static void
312 apic_init_intr(void)
313 {
314 processorid_t cpun = psm_get_cpu_id();
315 uint_t nlvt;
316 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
317
318 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
319
320 if (apic_mode == LOCAL_APIC) {
321 /*
322 * We are running APIC in MMIO mode.
323 */
324 if (apic_flat_model) {
676 #define APIC_INTR_EXIT() \
677 { \
678 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
679 if (apic_level_intr[irq]) \
680 apic_reg_ops->apic_send_eoi(irq); \
681 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
682 /* ISR above current pri could not be in progress */ \
683 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
684 }
685
686 /*
687 * Any changes made to this function must also change X2APIC
688 * version of intr_exit.
689 */
690 void
691 apic_intr_exit(int prev_ipl, int irq)
692 {
693 apic_cpus_info_t *cpu_infop;
694
695 #if defined(__amd64)
696 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
697 #else
698 if (apic_have_32bit_cr8)
699 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
700 else
701 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
702 #endif
703
704 APIC_INTR_EXIT();
705 }
706
707 /*
708 * Same as apic_intr_exit() except it uses MSR rather than MMIO
709 * to access local apic registers.
710 */
711 void
712 x2apic_intr_exit(int prev_ipl, int irq)
713 {
714 apic_cpus_info_t *cpu_infop;
715
716 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
718 }
719
720 intr_exit_fn_t
721 psm_intr_exit_fn(void)
722 {
723 if (apic_mode == LOCAL_X2APIC)
724 return (x2apic_intr_exit);
725
726 return (apic_intr_exit);
727 }
728
729 /*
730 * Mask all interrupts below or equal to the given IPL.
731 * Any changes made to this function must also change X2APIC
732 * version of setspl.
733 */
734 static void
735 apic_setspl(int ipl)
736 {
737 #if defined(__amd64)
738 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
739 #else
740 if (apic_have_32bit_cr8)
741 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
742 else
743 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
744 #endif
745
746 /* interrupts at ipl above this cannot be in progress */
747 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
748 /*
749 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
750 * have enough time to come in before the priority is raised again
751 * during the idle() loop.
752 */
753 if (apic_setspl_delay)
754 (void) apic_reg_ops->apic_get_pri();
755 }
756
757 /*
758 * X2APIC version of setspl.
|