1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25 /*
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
28 */
29 /*
30 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
32 */
33
34 #include <sys/types.h>
35 #include <sys/thread.h>
36 #include <sys/cpuvar.h>
37 #include <sys/cpu.h>
38 #include <sys/t_lock.h>
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/disp.h>
42 #include <sys/class.h>
43 #include <sys/cmn_err.h>
44 #include <sys/debug.h>
45 #include <sys/note.h>
46 #include <sys/asm_linkage.h>
47 #include <sys/x_call.h>
48 #include <sys/systm.h>
49 #include <sys/var.h>
50 #include <sys/vtrace.h>
51 #include <vm/hat.h>
52 #include <vm/as.h>
53 #include <vm/seg_kmem.h>
54 #include <vm/seg_kp.h>
55 #include <sys/segments.h>
56 #include <sys/kmem.h>
57 #include <sys/stack.h>
58 #include <sys/smp_impldefs.h>
59 #include <sys/x86_archext.h>
60 #include <sys/machsystm.h>
61 #include <sys/traptrace.h>
62 #include <sys/clock.h>
63 #include <sys/cpc_impl.h>
64 #include <sys/pg.h>
65 #include <sys/cmt.h>
66 #include <sys/dtrace.h>
67 #include <sys/archsystm.h>
68 #include <sys/fp.h>
69 #include <sys/reboot.h>
70 #include <sys/kdi_machimpl.h>
71 #include <vm/hat_i86.h>
72 #include <vm/vm_dep.h>
73 #include <sys/memnode.h>
74 #include <sys/pci_cfgspace.h>
75 #include <sys/mach_mmu.h>
76 #include <sys/sysmacros.h>
77 #if defined(__xpv)
78 #include <sys/hypervisor.h>
79 #endif
80 #include <sys/cpu_module.h>
81 #include <sys/ontrap.h>
82
83 struct cpu cpus[1]; /* CPU data */
84 struct cpu *cpu[NCPU] = {&cpus[0]}; /* pointers to all CPUs */
85 struct cpu *cpu_free_list; /* list for released CPUs */
86 cpu_core_t cpu_core[NCPU]; /* cpu_core structures */
87
88 #define cpu_next_free cpu_prev
89
90 /*
91 * Useful for disabling MP bring-up on a MP capable system.
92 */
93 int use_mp = 1;
94
95 /*
96 * to be set by a PSM to indicate what cpus
97 * are sitting around on the system.
98 */
99 cpuset_t mp_cpus;
100
101 /*
102 * This variable is used by the hat layer to decide whether or not
103 * critical sections are needed to prevent race conditions. For sun4m,
104 * this variable is set once enough MP initialization has been done in
105 * order to allow cross calls.
106 */
107 int flushes_require_xcalls;
108
109 cpuset_t cpu_ready_set; /* initialized in startup() */
110
111 static void mp_startup_boot(void);
112 static void mp_startup_hotplug(void);
113
114 static void cpu_sep_enable(void);
115 static void cpu_sep_disable(void);
116 static void cpu_asysc_enable(void);
117 static void cpu_asysc_disable(void);
118
119 /*
120 * Init CPU info - get CPU type info for processor_info system call.
121 */
122 void
123 init_cpu_info(struct cpu *cp)
124 {
125 processor_info_t *pi = &cp->cpu_type_info;
126
127 /*
128 * Get clock-frequency property for the CPU.
129 */
130 pi->pi_clock = cpu_freq;
131
132 /*
133 * Current frequency in Hz.
134 */
135 cp->cpu_curr_clock = cpu_freq_hz;
136
137 /*
138 * Supported frequencies.
139 */
140 if (cp->cpu_supp_freqs == NULL) {
141 cpu_set_supp_freqs(cp, NULL);
142 }
143
144 (void) strcpy(pi->pi_processor_type, "i386");
145 if (fpu_exists)
146 (void) strcpy(pi->pi_fputypes, "i387 compatible");
147
148 cp->cpu_idstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
149 cp->cpu_brandstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
150
151 /*
152 * If called for the BSP, cp is equal to current CPU.
153 * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
154 * of current CPU as default values for cpu_idstr and cpu_brandstr.
155 * They will be corrected in mp_startup_common() after cpuid_pass1()
156 * has been invoked on target CPU.
157 */
158 (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
159 (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
160 }
161
162 /*
163 * Configure syscall support on this CPU.
164 */
165 /*ARGSUSED*/
166 void
167 init_cpu_syscall(struct cpu *cp)
168 {
169 kpreempt_disable();
170
171 #if defined(__amd64)
172 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
173 is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
174
175 #if !defined(__lint)
176 /*
177 * The syscall instruction imposes a certain ordering on
178 * segment selectors, so we double-check that ordering
179 * here.
180 */
181 ASSERT(KDS_SEL == KCS_SEL + 8);
182 ASSERT(UDS_SEL == U32CS_SEL + 8);
183 ASSERT(UCS_SEL == U32CS_SEL + 16);
184 #endif
185 /*
186 * Turn syscall/sysret extensions on.
187 */
188 cpu_asysc_enable();
189
190 /*
191 * Program the magic registers ..
192 */
193 wrmsr(MSR_AMD_STAR,
194 ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32);
195 wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
196 wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);
197
198 /*
199 * This list of flags is masked off the incoming
200 * %rfl when we enter the kernel.
201 */
202 wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T));
203 }
204 #endif
205
206 /*
207 * On 32-bit kernels, we use sysenter/sysexit because it's too
208 * hard to use syscall/sysret, and it is more portable anyway.
209 *
210 * On 64-bit kernels on Nocona machines, the 32-bit syscall
211 * variant isn't available to 32-bit applications, but sysenter is.
212 */
213 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
214 is_x86_feature(x86_featureset, X86FSET_SEP)) {
215
216 #if !defined(__lint)
217 /*
218 * The sysenter instruction imposes a certain ordering on
219 * segment selectors, so we double-check that ordering
220 * here. See "sysenter" in Intel document 245471-012, "IA-32
221 * Intel Architecture Software Developer's Manual Volume 2:
222 * Instruction Set Reference"
223 */
224 ASSERT(KDS_SEL == KCS_SEL + 8);
225
226 ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3));
227 ASSERT32(UDS_SEL == UCS_SEL + 8);
228
229 ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3));
230 ASSERT64(UDS_SEL == U32CS_SEL + 8);
231 #endif
232
233 cpu_sep_enable();
234
235 /*
236 * resume() sets this value to the base of the threads stack
237 * via a context handler.
238 */
239 wrmsr(MSR_INTC_SEP_ESP, 0);
240 wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);
241 }
242
243 kpreempt_enable();
244 }
245
246 /*
247 * Multiprocessor initialization.
248 *
249 * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the
250 * startup and idle threads for the specified CPU.
251 * Parameter boot is true for boot time operations and is false for CPU
252 * DR operations.
253 */
254 static struct cpu *
255 mp_cpu_configure_common(int cpun, boolean_t boot)
256 {
257 struct cpu *cp;
258 kthread_id_t tp;
259 caddr_t sp;
260 proc_t *procp;
261 #if !defined(__xpv)
262 extern int idle_cpu_prefer_mwait;
263 extern void cpu_idle_mwait();
264 #endif
265 extern void idle();
266 extern void cpu_idle();
267
268 #ifdef TRAPTRACE
269 trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun];
270 #endif
271
272 ASSERT(MUTEX_HELD(&cpu_lock));
273 ASSERT(cpun < NCPU && cpu[cpun] == NULL);
274
275 if (cpu_free_list == NULL) {
276 cp = kmem_zalloc(sizeof (*cp), KM_SLEEP);
277 } else {
278 cp = cpu_free_list;
279 cpu_free_list = cp->cpu_next_free;
280 }
281
282 cp->cpu_m.mcpu_istamp = cpun << 16;
283
284 /* Create per CPU specific threads in the process p0. */
285 procp = &p0;
286
287 /*
288 * Initialize the dispatcher first.
289 */
290 disp_cpu_init(cp);
291
292 cpu_vm_data_init(cp);
293
294 /*
295 * Allocate and initialize the startup thread for this CPU.
296 * Interrupt and process switch stacks get allocated later
297 * when the CPU starts running.
298 */
299 tp = thread_create(NULL, 0, NULL, NULL, 0, procp,
300 TS_STOPPED, maxclsyspri);
301
302 /*
303 * Set state to TS_ONPROC since this thread will start running
304 * as soon as the CPU comes online.
305 *
306 * All the other fields of the thread structure are setup by
307 * thread_create().
308 */
309 THREAD_ONPROC(tp, cp);
310 tp->t_preempt = 1;
311 tp->t_bound_cpu = cp;
312 tp->t_affinitycnt = 1;
313 tp->t_cpu = cp;
314 tp->t_disp_queue = cp->cpu_disp;
315
316 /*
317 * Setup thread to start in mp_startup_common.
318 */
319 sp = tp->t_stk;
320 tp->t_sp = (uintptr_t)(sp - MINFRAME);
321 #if defined(__amd64)
322 tp->t_sp -= STACK_ENTRY_ALIGN; /* fake a call */
323 #endif
324 /*
325 * Setup thread start entry point for boot or hotplug.
326 */
327 if (boot) {
328 tp->t_pc = (uintptr_t)mp_startup_boot;
329 } else {
330 tp->t_pc = (uintptr_t)mp_startup_hotplug;
331 }
332
333 cp->cpu_id = cpun;
334 cp->cpu_self = cp;
335 cp->cpu_thread = tp;
336 cp->cpu_lwp = NULL;
337 cp->cpu_dispthread = tp;
338 cp->cpu_dispatch_pri = DISP_PRIO(tp);
339
340 /*
341 * cpu_base_spl must be set explicitly here to prevent any blocking
342 * operations in mp_startup_common from causing the spl of the cpu
343 * to drop to 0 (allowing device interrupts before we're ready) in
344 * resume().
345 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY.
346 * As an extra bit of security on DEBUG kernels, this is enforced with
347 * an assertion in mp_startup_common() -- before cpu_base_spl is set
348 * to its proper value.
349 */
350 cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
351
352 /*
353 * Now, initialize per-CPU idle thread for this CPU.
354 */
355 tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1);
356
357 cp->cpu_idle_thread = tp;
358
359 tp->t_preempt = 1;
360 tp->t_bound_cpu = cp;
361 tp->t_affinitycnt = 1;
362 tp->t_cpu = cp;
363 tp->t_disp_queue = cp->cpu_disp;
364
365 /*
366 * Bootstrap the CPU's PG data
367 */
368 pg_cpu_bootstrap(cp);
369
370 /*
371 * Perform CPC initialization on the new CPU.
372 */
373 kcpc_hw_init(cp);
374
375 /*
376 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2
377 * for each CPU.
378 */
379 setup_vaddr_for_ppcopy(cp);
380
381 /*
382 * Allocate page for new GDT and initialize from current GDT.
383 */
384 #if !defined(__lint)
385 ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
386 #endif
387 cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
388 bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
389
390 #if defined(__i386)
391 /*
392 * setup kernel %gs.
393 */
394 set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
395 SEL_KPL, 0, 1);
396 #endif
397
398 /*
399 * If we have more than one node, each cpu gets a copy of IDT
400 * local to its node. If this is a Pentium box, we use cpu 0's
401 * IDT. cpu 0's IDT has been made read-only to workaround the
402 * cmpxchgl register bug
403 */
404 if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) {
405 #if !defined(__lint)
406 ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE);
407 #endif
408 cp->cpu_idt = kmem_zalloc(PAGESIZE, KM_SLEEP);
409 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);
410 } else {
411 cp->cpu_idt = CPU->cpu_idt;
412 }
413
414 /*
415 * alloc space for cpuid info
416 */
417 cpuid_alloc_space(cp);
418 #if !defined(__xpv)
419 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
420 idle_cpu_prefer_mwait) {
421 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
422 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
423 } else
424 #endif
425 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
426
427 init_cpu_info(cp);
428
429 /*
430 * alloc space for ucode_info
431 */
432 ucode_alloc_space(cp);
433 xc_init_cpu(cp);
434 hat_cpu_online(cp);
435
436 #ifdef TRAPTRACE
437 /*
438 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers
439 */
440 ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP);
441 ttc->ttc_next = ttc->ttc_first;
442 ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize;
443 #endif
444
445 /*
446 * Record that we have another CPU.
447 */
448 /*
449 * Initialize the interrupt threads for this CPU
450 */
451 cpu_intr_alloc(cp, NINTR_THREADS);
452
453 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
454 cpu_set_state(cp);
455
456 /*
457 * Add CPU to list of available CPUs. It'll be on the active list
458 * after mp_startup_common().
459 */
460 cpu_add_unit(cp);
461
462 return (cp);
463 }
464
465 /*
466 * Undo what was done in mp_cpu_configure_common
467 */
468 static void
469 mp_cpu_unconfigure_common(struct cpu *cp, int error)
470 {
471 ASSERT(MUTEX_HELD(&cpu_lock));
472
473 /*
474 * Remove the CPU from the list of available CPUs.
475 */
476 cpu_del_unit(cp->cpu_id);
477
478 if (error == ETIMEDOUT) {
479 /*
480 * The cpu was started, but never *seemed* to run any
481 * code in the kernel; it's probably off spinning in its
482 * own private world, though with potential references to
483 * our kmem-allocated IDTs and GDTs (for example).
484 *
485 * Worse still, it may actually wake up some time later,
486 * so rather than guess what it might or might not do, we
487 * leave the fundamental data structures intact.
488 */
489 cp->cpu_flags = 0;
490 return;
491 }
492
493 /*
494 * At this point, the only threads bound to this CPU should
495 * special per-cpu threads: it's idle thread, it's pause threads,
496 * and it's interrupt threads. Clean these up.
497 */
498 cpu_destroy_bound_threads(cp);
499 cp->cpu_idle_thread = NULL;
500
501 /*
502 * Free the interrupt stack.
503 */
504 segkp_release(segkp,
505 cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME)));
506 cp->cpu_intr_stack = NULL;
507
508 #ifdef TRAPTRACE
509 /*
510 * Discard the trap trace buffer
511 */
512 {
513 trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id];
514
515 kmem_free((void *)ttc->ttc_first, trap_trace_bufsize);
516 ttc->ttc_first = NULL;
517 }
518 #endif
519
520 hat_cpu_offline(cp);
521
522 ucode_free_space(cp);
523
524 /* Free CPU ID string and brand string. */
525 if (cp->cpu_idstr) {
526 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
527 cp->cpu_idstr = NULL;
528 }
529 if (cp->cpu_brandstr) {
530 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
531 cp->cpu_brandstr = NULL;
532 }
533
534 #if !defined(__xpv)
535 if (cp->cpu_m.mcpu_mwait != NULL) {
536 cpuid_mwait_free(cp);
537 cp->cpu_m.mcpu_mwait = NULL;
538 }
539 #endif
540 cpuid_free_space(cp);
541
542 if (cp->cpu_idt != CPU->cpu_idt)
543 kmem_free(cp->cpu_idt, PAGESIZE);
544 cp->cpu_idt = NULL;
545
546 kmem_free(cp->cpu_gdt, PAGESIZE);
547 cp->cpu_gdt = NULL;
548
549 if (cp->cpu_supp_freqs != NULL) {
550 size_t len = strlen(cp->cpu_supp_freqs) + 1;
551 kmem_free(cp->cpu_supp_freqs, len);
552 cp->cpu_supp_freqs = NULL;
553 }
554
555 teardown_vaddr_for_ppcopy(cp);
556
557 kcpc_hw_fini(cp);
558
559 cp->cpu_dispthread = NULL;
560 cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */
561
562 cpu_vm_data_destroy(cp);
563
564 xc_fini_cpu(cp);
565 disp_cpu_fini(cp);
566
567 ASSERT(cp != CPU0);
568 bzero(cp, sizeof (*cp));
569 cp->cpu_next_free = cpu_free_list;
570 cpu_free_list = cp;
571 }
572
573 /*
574 * Apply workarounds for known errata, and warn about those that are absent.
575 *
576 * System vendors occasionally create configurations which contain different
577 * revisions of the CPUs that are almost but not exactly the same. At the
578 * time of writing, this meant that their clock rates were the same, their
579 * feature sets were the same, but the required workaround were -not-
580 * necessarily the same. So, this routine is invoked on -every- CPU soon
581 * after starting to make sure that the resulting system contains the most
582 * pessimal set of workarounds needed to cope with *any* of the CPUs in the
583 * system.
584 *
585 * workaround_errata is invoked early in mlsetup() for CPU 0, and in
586 * mp_startup_common() for all slave CPUs. Slaves process workaround_errata
587 * prior to acknowledging their readiness to the master, so this routine will
588 * never be executed by multiple CPUs in parallel, thus making updates to
589 * global data safe.
590 *
591 * These workarounds are based on Rev 3.57 of the Revision Guide for
592 * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005.
593 */
594
595 #if defined(OPTERON_ERRATUM_88)
596 int opteron_erratum_88; /* if non-zero -> at least one cpu has it */
597 #endif
598
599 #if defined(OPTERON_ERRATUM_91)
600 int opteron_erratum_91; /* if non-zero -> at least one cpu has it */
601 #endif
602
603 #if defined(OPTERON_ERRATUM_93)
604 int opteron_erratum_93; /* if non-zero -> at least one cpu has it */
605 #endif
606
607 #if defined(OPTERON_ERRATUM_95)
608 int opteron_erratum_95; /* if non-zero -> at least one cpu has it */
609 #endif
610
611 #if defined(OPTERON_ERRATUM_100)
612 int opteron_erratum_100; /* if non-zero -> at least one cpu has it */
613 #endif
614
615 #if defined(OPTERON_ERRATUM_108)
616 int opteron_erratum_108; /* if non-zero -> at least one cpu has it */
617 #endif
618
619 #if defined(OPTERON_ERRATUM_109)
620 int opteron_erratum_109; /* if non-zero -> at least one cpu has it */
621 #endif
622
623 #if defined(OPTERON_ERRATUM_121)
624 int opteron_erratum_121; /* if non-zero -> at least one cpu has it */
625 #endif
626
627 #if defined(OPTERON_ERRATUM_122)
628 int opteron_erratum_122; /* if non-zero -> at least one cpu has it */
629 #endif
630
631 #if defined(OPTERON_ERRATUM_123)
632 int opteron_erratum_123; /* if non-zero -> at least one cpu has it */
633 #endif
634
635 #if defined(OPTERON_ERRATUM_131)
636 int opteron_erratum_131; /* if non-zero -> at least one cpu has it */
637 #endif
638
639 #if defined(OPTERON_WORKAROUND_6336786)
640 int opteron_workaround_6336786; /* non-zero -> WA relevant and applied */
641 int opteron_workaround_6336786_UP = 0; /* Not needed for UP */
642 #endif
643
644 #if defined(OPTERON_WORKAROUND_6323525)
645 int opteron_workaround_6323525; /* if non-zero -> at least one cpu has it */
646 #endif
647
648 #if defined(OPTERON_ERRATUM_298)
649 int opteron_erratum_298;
650 #endif
651
652 #if defined(OPTERON_ERRATUM_721)
653 int opteron_erratum_721;
654 #endif
655
656 static void
657 workaround_warning(cpu_t *cp, uint_t erratum)
658 {
659 cmn_err(CE_WARN, "cpu%d: no workaround for erratum %u",
660 cp->cpu_id, erratum);
661 }
662
663 static void
664 workaround_applied(uint_t erratum)
665 {
666 if (erratum > 1000000)
667 cmn_err(CE_CONT, "?workaround applied for cpu issue #%d\n",
668 erratum);
669 else
670 cmn_err(CE_CONT, "?workaround applied for cpu erratum #%d\n",
671 erratum);
672 }
673
674 static void
675 msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error)
676 {
677 cmn_err(CE_WARN, "cpu%d: couldn't %smsr 0x%x, error %d",
678 cp->cpu_id, rw, msr, error);
679 }
680
681 /*
682 * Determine the number of nodes in a Hammer / Greyhound / Griffin family
683 * system.
684 */
685 static uint_t
686 opteron_get_nnodes(void)
687 {
688 static uint_t nnodes = 0;
689
690 if (nnodes == 0) {
691 #ifdef DEBUG
692 uint_t family;
693
694 /*
695 * This routine uses a PCI config space based mechanism
696 * for retrieving the number of nodes in the system.
697 * Device 24, function 0, offset 0x60 as used here is not
698 * AMD processor architectural, and may not work on processor
699 * families other than those listed below.
700 *
701 * Callers of this routine must ensure that we're running on
702 * a processor which supports this mechanism.
703 * The assertion below is meant to catch calls on unsupported
704 * processors.
705 */
706 family = cpuid_getfamily(CPU);
707 ASSERT(family == 0xf || family == 0x10 || family == 0x11);
708 #endif /* DEBUG */
709
710 /*
711 * Obtain the number of nodes in the system from
712 * bits [6:4] of the Node ID register on node 0.
713 *
714 * The actual node count is NodeID[6:4] + 1
715 *
716 * The Node ID register is accessed via function 0,
717 * offset 0x60. Node 0 is device 24.
718 */
719 nnodes = ((pci_getl_func(0, 24, 0, 0x60) & 0x70) >> 4) + 1;
720 }
721 return (nnodes);
722 }
723
724 uint_t
725 do_erratum_298(struct cpu *cpu)
726 {
727 static int osvwrc = -3;
728 extern int osvw_opteron_erratum(cpu_t *, uint_t);
729
730 /*
731 * L2 Eviction May Occur During Processor Operation To Set
732 * Accessed or Dirty Bit.
733 */
734 if (osvwrc == -3) {
735 osvwrc = osvw_opteron_erratum(cpu, 298);
736 } else {
737 /* osvw return codes should be consistent for all cpus */
738 ASSERT(osvwrc == osvw_opteron_erratum(cpu, 298));
739 }
740
741 switch (osvwrc) {
742 case 0: /* erratum is not present: do nothing */
743 break;
744 case 1: /* erratum is present: BIOS workaround applied */
745 /*
746 * check if workaround is actually in place and issue warning
747 * if not.
748 */
749 if (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
750 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0)) {
751 #if defined(OPTERON_ERRATUM_298)
752 opteron_erratum_298++;
753 #else
754 workaround_warning(cpu, 298);
755 return (1);
756 #endif
757 }
758 break;
759 case -1: /* cannot determine via osvw: check cpuid */
760 if ((cpuid_opteron_erratum(cpu, 298) > 0) &&
761 (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
762 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0))) {
763 #if defined(OPTERON_ERRATUM_298)
764 opteron_erratum_298++;
765 #else
766 workaround_warning(cpu, 298);
767 return (1);
768 #endif
769 }
770 break;
771 }
772 return (0);
773 }
774
775 uint_t
776 workaround_errata(struct cpu *cpu)
777 {
778 uint_t missing = 0;
779
780 ASSERT(cpu == CPU);
781
782 /*LINTED*/
783 if (cpuid_opteron_erratum(cpu, 88) > 0) {
784 /*
785 * SWAPGS May Fail To Read Correct GS Base
786 */
787 #if defined(OPTERON_ERRATUM_88)
788 /*
789 * The workaround is an mfence in the relevant assembler code
790 */
791 opteron_erratum_88++;
792 #else
793 workaround_warning(cpu, 88);
794 missing++;
795 #endif
796 }
797
798 if (cpuid_opteron_erratum(cpu, 91) > 0) {
799 /*
800 * Software Prefetches May Report A Page Fault
801 */
802 #if defined(OPTERON_ERRATUM_91)
803 /*
804 * fix is in trap.c
805 */
806 opteron_erratum_91++;
807 #else
808 workaround_warning(cpu, 91);
809 missing++;
810 #endif
811 }
812
813 if (cpuid_opteron_erratum(cpu, 93) > 0) {
814 /*
815 * RSM Auto-Halt Restart Returns to Incorrect RIP
816 */
817 #if defined(OPTERON_ERRATUM_93)
818 /*
819 * fix is in trap.c
820 */
821 opteron_erratum_93++;
822 #else
823 workaround_warning(cpu, 93);
824 missing++;
825 #endif
826 }
827
828 /*LINTED*/
829 if (cpuid_opteron_erratum(cpu, 95) > 0) {
830 /*
831 * RET Instruction May Return to Incorrect EIP
832 */
833 #if defined(OPTERON_ERRATUM_95)
834 #if defined(_LP64)
835 /*
836 * Workaround this by ensuring that 32-bit user code and
837 * 64-bit kernel code never occupy the same address
838 * range mod 4G.
839 */
840 if (_userlimit32 > 0xc0000000ul)
841 *(uintptr_t *)&_userlimit32 = 0xc0000000ul;
842
843 /*LINTED*/
844 ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u);
845 opteron_erratum_95++;
846 #endif /* _LP64 */
847 #else
848 workaround_warning(cpu, 95);
849 missing++;
850 #endif
851 }
852
853 if (cpuid_opteron_erratum(cpu, 100) > 0) {
854 /*
855 * Compatibility Mode Branches Transfer to Illegal Address
856 */
857 #if defined(OPTERON_ERRATUM_100)
858 /*
859 * fix is in trap.c
860 */
861 opteron_erratum_100++;
862 #else
863 workaround_warning(cpu, 100);
864 missing++;
865 #endif
866 }
867
868 /*LINTED*/
869 if (cpuid_opteron_erratum(cpu, 108) > 0) {
870 /*
871 * CPUID Instruction May Return Incorrect Model Number In
872 * Some Processors
873 */
874 #if defined(OPTERON_ERRATUM_108)
875 /*
876 * (Our cpuid-handling code corrects the model number on
877 * those processors)
878 */
879 #else
880 workaround_warning(cpu, 108);
881 missing++;
882 #endif
883 }
884
885 /*LINTED*/
886 if (cpuid_opteron_erratum(cpu, 109) > 0) do {
887 /*
888 * Certain Reverse REP MOVS May Produce Unpredictable Behavior
889 */
890 #if defined(OPTERON_ERRATUM_109)
891 /*
892 * The "workaround" is to print a warning to upgrade the BIOS
893 */
894 uint64_t value;
895 const uint_t msr = MSR_AMD_PATCHLEVEL;
896 int err;
897
898 if ((err = checked_rdmsr(msr, &value)) != 0) {
899 msr_warning(cpu, "rd", msr, err);
900 workaround_warning(cpu, 109);
901 missing++;
902 }
903 if (value == 0)
904 opteron_erratum_109++;
905 #else
906 workaround_warning(cpu, 109);
907 missing++;
908 #endif
909 /*CONSTANTCONDITION*/
910 } while (0);
911
912 /*LINTED*/
913 if (cpuid_opteron_erratum(cpu, 121) > 0) {
914 /*
915 * Sequential Execution Across Non_Canonical Boundary Caused
916 * Processor Hang
917 */
918 #if defined(OPTERON_ERRATUM_121)
919 #if defined(_LP64)
920 /*
921 * Erratum 121 is only present in long (64 bit) mode.
922 * Workaround is to include the page immediately before the
923 * va hole to eliminate the possibility of system hangs due to
924 * sequential execution across the va hole boundary.
925 */
926 if (opteron_erratum_121)
927 opteron_erratum_121++;
928 else {
929 if (hole_start) {
930 hole_start -= PAGESIZE;
931 } else {
932 /*
933 * hole_start not yet initialized by
934 * mmu_init. Initialize hole_start
935 * with value to be subtracted.
936 */
937 hole_start = PAGESIZE;
938 }
939 opteron_erratum_121++;
940 }
941 #endif /* _LP64 */
942 #else
943 workaround_warning(cpu, 121);
944 missing++;
945 #endif
946 }
947
948 /*LINTED*/
949 if (cpuid_opteron_erratum(cpu, 122) > 0) do {
950 /*
951 * TLB Flush Filter May Cause Coherency Problem in
952 * Multiprocessor Systems
953 */
954 #if defined(OPTERON_ERRATUM_122)
955 uint64_t value;
956 const uint_t msr = MSR_AMD_HWCR;
957 int error;
958
959 /*
960 * Erratum 122 is only present in MP configurations (multi-core
961 * or multi-processor).
962 */
963 #if defined(__xpv)
964 if (!DOMAIN_IS_INITDOMAIN(xen_info))
965 break;
966 if (!opteron_erratum_122 && xpv_nr_phys_cpus() == 1)
967 break;
968 #else
969 if (!opteron_erratum_122 && opteron_get_nnodes() == 1 &&
970 cpuid_get_ncpu_per_chip(cpu) == 1)
971 break;
972 #endif
973 /* disable TLB Flush Filter */
974
975 if ((error = checked_rdmsr(msr, &value)) != 0) {
976 msr_warning(cpu, "rd", msr, error);
977 workaround_warning(cpu, 122);
978 missing++;
979 } else {
980 value |= (uint64_t)AMD_HWCR_FFDIS;
981 if ((error = checked_wrmsr(msr, value)) != 0) {
982 msr_warning(cpu, "wr", msr, error);
983 workaround_warning(cpu, 122);
984 missing++;
985 }
986 }
987 opteron_erratum_122++;
988 #else
989 workaround_warning(cpu, 122);
990 missing++;
991 #endif
992 /*CONSTANTCONDITION*/
993 } while (0);
994
995 /*LINTED*/
996 if (cpuid_opteron_erratum(cpu, 123) > 0) do {
997 /*
998 * Bypassed Reads May Cause Data Corruption of System Hang in
999 * Dual Core Processors
1000 */
1001 #if defined(OPTERON_ERRATUM_123)
1002 uint64_t value;
1003 const uint_t msr = MSR_AMD_PATCHLEVEL;
1004 int err;
1005
1006 /*
1007 * Erratum 123 applies only to multi-core cpus.
1008 */
1009 if (cpuid_get_ncpu_per_chip(cpu) < 2)
1010 break;
1011 #if defined(__xpv)
1012 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1013 break;
1014 #endif
1015 /*
1016 * The "workaround" is to print a warning to upgrade the BIOS
1017 */
1018 if ((err = checked_rdmsr(msr, &value)) != 0) {
1019 msr_warning(cpu, "rd", msr, err);
1020 workaround_warning(cpu, 123);
1021 missing++;
1022 }
1023 if (value == 0)
1024 opteron_erratum_123++;
1025 #else
1026 workaround_warning(cpu, 123);
1027 missing++;
1028
1029 #endif
1030 /*CONSTANTCONDITION*/
1031 } while (0);
1032
1033 /*LINTED*/
1034 if (cpuid_opteron_erratum(cpu, 131) > 0) do {
1035 /*
1036 * Multiprocessor Systems with Four or More Cores May Deadlock
1037 * Waiting for a Probe Response
1038 */
1039 #if defined(OPTERON_ERRATUM_131)
1040 uint64_t nbcfg;
1041 const uint_t msr = MSR_AMD_NB_CFG;
1042 const uint64_t wabits =
1043 AMD_NB_CFG_SRQ_HEARTBEAT | AMD_NB_CFG_SRQ_SPR;
1044 int error;
1045
1046 /*
1047 * Erratum 131 applies to any system with four or more cores.
1048 */
1049 if (opteron_erratum_131)
1050 break;
1051 #if defined(__xpv)
1052 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1053 break;
1054 if (xpv_nr_phys_cpus() < 4)
1055 break;
1056 #else
1057 if (opteron_get_nnodes() * cpuid_get_ncpu_per_chip(cpu) < 4)
1058 break;
1059 #endif
1060 /*
1061 * Print a warning if neither of the workarounds for
1062 * erratum 131 is present.
1063 */
1064 if ((error = checked_rdmsr(msr, &nbcfg)) != 0) {
1065 msr_warning(cpu, "rd", msr, error);
1066 workaround_warning(cpu, 131);
1067 missing++;
1068 } else if ((nbcfg & wabits) == 0) {
1069 opteron_erratum_131++;
1070 } else {
1071 /* cannot have both workarounds set */
1072 ASSERT((nbcfg & wabits) != wabits);
1073 }
1074 #else
1075 workaround_warning(cpu, 131);
1076 missing++;
1077 #endif
1078 /*CONSTANTCONDITION*/
1079 } while (0);
1080
1081 /*
1082 * This isn't really an erratum, but for convenience the
1083 * detection/workaround code lives here and in cpuid_opteron_erratum.
1084 */
1085 if (cpuid_opteron_erratum(cpu, 6336786) > 0) {
1086 #if defined(OPTERON_WORKAROUND_6336786)
1087 /*
1088 * Disable C1-Clock ramping on multi-core/multi-processor
1089 * K8 platforms to guard against TSC drift.
1090 */
1091 if (opteron_workaround_6336786) {
1092 opteron_workaround_6336786++;
1093 #if defined(__xpv)
1094 } else if ((DOMAIN_IS_INITDOMAIN(xen_info) &&
1095 xpv_nr_phys_cpus() > 1) ||
1096 opteron_workaround_6336786_UP) {
1097 /*
1098 * XXPV Hmm. We can't walk the Northbridges on
1099 * the hypervisor; so just complain and drive
1100 * on. This probably needs to be fixed in
1101 * the hypervisor itself.
1102 */
1103 opteron_workaround_6336786++;
1104 workaround_warning(cpu, 6336786);
1105 #else /* __xpv */
1106 } else if ((opteron_get_nnodes() *
1107 cpuid_get_ncpu_per_chip(cpu) > 1) ||
1108 opteron_workaround_6336786_UP) {
1109
1110 uint_t node, nnodes;
1111 uint8_t data;
1112
1113 nnodes = opteron_get_nnodes();
1114 for (node = 0; node < nnodes; node++) {
1115 /*
1116 * Clear PMM7[1:0] (function 3, offset 0x87)
1117 * Northbridge device is the node id + 24.
1118 */
1119 data = pci_getb_func(0, node + 24, 3, 0x87);
1120 data &= 0xFC;
1121 pci_putb_func(0, node + 24, 3, 0x87, data);
1122 }
1123 opteron_workaround_6336786++;
1124 #endif /* __xpv */
1125 }
1126 #else
1127 workaround_warning(cpu, 6336786);
1128 missing++;
1129 #endif
1130 }
1131
1132 /*LINTED*/
1133 /*
1134 * Mutex primitives don't work as expected.
1135 */
1136 if (cpuid_opteron_erratum(cpu, 6323525) > 0) {
1137 #if defined(OPTERON_WORKAROUND_6323525)
1138 /*
1139 * This problem only occurs with 2 or more cores. If bit in
1140 * MSR_AMD_BU_CFG set, then not applicable. The workaround
1141 * is to patch the semaphone routines with the lfence
1142 * instruction to provide necessary load memory barrier with
1143 * possible subsequent read-modify-write ops.
1144 *
1145 * It is too early in boot to call the patch routine so
1146 * set erratum variable to be done in startup_end().
1147 */
1148 if (opteron_workaround_6323525) {
1149 opteron_workaround_6323525++;
1150 #if defined(__xpv)
1151 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2)) {
1152 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1153 /*
1154 * XXPV Use dom0_msr here when extended
1155 * operations are supported?
1156 */
1157 if (xpv_nr_phys_cpus() > 1)
1158 opteron_workaround_6323525++;
1159 } else {
1160 /*
1161 * We have no way to tell how many physical
1162 * cpus there are, or even if this processor
1163 * has the problem, so enable the workaround
1164 * unconditionally (at some performance cost).
1165 */
1166 opteron_workaround_6323525++;
1167 }
1168 #else /* __xpv */
1169 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2) &&
1170 ((opteron_get_nnodes() *
1171 cpuid_get_ncpu_per_chip(cpu)) > 1)) {
1172 if ((xrdmsr(MSR_AMD_BU_CFG) & (UINT64_C(1) << 33)) == 0)
1173 opteron_workaround_6323525++;
1174 #endif /* __xpv */
1175 }
1176 #else
1177 workaround_warning(cpu, 6323525);
1178 missing++;
1179 #endif
1180 }
1181
1182 missing += do_erratum_298(cpu);
1183
1184 if (cpuid_opteron_erratum(cpu, 721) > 0) {
1185 #if defined(OPTERON_ERRATUM_721)
1186 on_trap_data_t otd;
1187
1188 if (!on_trap(&otd, OT_DATA_ACCESS))
1189 wrmsr(MSR_AMD_DE_CFG,
1190 rdmsr(MSR_AMD_DE_CFG) | AMD_DE_CFG_E721);
1191 no_trap();
1192
1193 opteron_erratum_721++;
1194 #else
1195 workaround_warning(cpu, 721);
1196 missing++;
1197 #endif
1198 }
1199
1200 #ifdef __xpv
1201 return (0);
1202 #else
1203 return (missing);
1204 #endif
1205 }
1206
1207 void
1208 workaround_errata_end()
1209 {
1210 #if defined(OPTERON_ERRATUM_88)
1211 if (opteron_erratum_88)
1212 workaround_applied(88);
1213 #endif
1214 #if defined(OPTERON_ERRATUM_91)
1215 if (opteron_erratum_91)
1216 workaround_applied(91);
1217 #endif
1218 #if defined(OPTERON_ERRATUM_93)
1219 if (opteron_erratum_93)
1220 workaround_applied(93);
1221 #endif
1222 #if defined(OPTERON_ERRATUM_95)
1223 if (opteron_erratum_95)
1224 workaround_applied(95);
1225 #endif
1226 #if defined(OPTERON_ERRATUM_100)
1227 if (opteron_erratum_100)
1228 workaround_applied(100);
1229 #endif
1230 #if defined(OPTERON_ERRATUM_108)
1231 if (opteron_erratum_108)
1232 workaround_applied(108);
1233 #endif
1234 #if defined(OPTERON_ERRATUM_109)
1235 if (opteron_erratum_109) {
1236 cmn_err(CE_WARN,
1237 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1238 " processor\nerratum 109 was not detected; updating your"
1239 " system's BIOS to a version\ncontaining this"
1240 " microcode patch is HIGHLY recommended or erroneous"
1241 " system\noperation may occur.\n");
1242 }
1243 #endif
1244 #if defined(OPTERON_ERRATUM_121)
1245 if (opteron_erratum_121)
1246 workaround_applied(121);
1247 #endif
1248 #if defined(OPTERON_ERRATUM_122)
1249 if (opteron_erratum_122)
1250 workaround_applied(122);
1251 #endif
1252 #if defined(OPTERON_ERRATUM_123)
1253 if (opteron_erratum_123) {
1254 cmn_err(CE_WARN,
1255 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1256 " processor\nerratum 123 was not detected; updating your"
1257 " system's BIOS to a version\ncontaining this"
1258 " microcode patch is HIGHLY recommended or erroneous"
1259 " system\noperation may occur.\n");
1260 }
1261 #endif
1262 #if defined(OPTERON_ERRATUM_131)
1263 if (opteron_erratum_131) {
1264 cmn_err(CE_WARN,
1265 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1266 " processor\nerratum 131 was not detected; updating your"
1267 " system's BIOS to a version\ncontaining this"
1268 " microcode patch is HIGHLY recommended or erroneous"
1269 " system\noperation may occur.\n");
1270 }
1271 #endif
1272 #if defined(OPTERON_WORKAROUND_6336786)
1273 if (opteron_workaround_6336786)
1274 workaround_applied(6336786);
1275 #endif
1276 #if defined(OPTERON_WORKAROUND_6323525)
1277 if (opteron_workaround_6323525)
1278 workaround_applied(6323525);
1279 #endif
1280 #if defined(OPTERON_ERRATUM_298)
1281 if (opteron_erratum_298) {
1282 cmn_err(CE_WARN,
1283 "BIOS microcode patch for AMD 64/Opteron(tm)"
1284 " processor\nerratum 298 was not detected; updating your"
1285 " system's BIOS to a version\ncontaining this"
1286 " microcode patch is HIGHLY recommended or erroneous"
1287 " system\noperation may occur.\n");
1288 }
1289 #endif
1290 #if defined(OPTERON_ERRATUM_721)
1291 if (opteron_erratum_721)
1292 workaround_applied(721);
1293 #endif
1294 }
1295
1296 /*
1297 * The procset_slave and procset_master are used to synchronize
1298 * between the control CPU and the target CPU when starting CPUs.
1299 */
1300 static cpuset_t procset_slave, procset_master;
1301
1302 static void
1303 mp_startup_wait(cpuset_t *sp, processorid_t cpuid)
1304 {
1305 cpuset_t tempset;
1306
1307 for (tempset = *sp; !CPU_IN_SET(tempset, cpuid);
1308 tempset = *(volatile cpuset_t *)sp) {
1309 SMT_PAUSE();
1310 }
1311 CPUSET_ATOMIC_DEL(*(cpuset_t *)sp, cpuid);
1312 }
1313
1314 static void
1315 mp_startup_signal(cpuset_t *sp, processorid_t cpuid)
1316 {
1317 cpuset_t tempset;
1318
1319 CPUSET_ATOMIC_ADD(*(cpuset_t *)sp, cpuid);
1320 for (tempset = *sp; CPU_IN_SET(tempset, cpuid);
1321 tempset = *(volatile cpuset_t *)sp) {
1322 SMT_PAUSE();
1323 }
1324 }
1325
1326 int
1327 mp_start_cpu_common(cpu_t *cp, boolean_t boot)
1328 {
1329 _NOTE(ARGUNUSED(boot));
1330
1331 void *ctx;
1332 int delays;
1333 int error = 0;
1334 cpuset_t tempset;
1335 processorid_t cpuid;
1336 #ifndef __xpv
1337 extern void cpupm_init(cpu_t *);
1338 #endif
1339
1340 ASSERT(cp != NULL);
1341 cpuid = cp->cpu_id;
1342 ctx = mach_cpucontext_alloc(cp);
1343 if (ctx == NULL) {
1344 cmn_err(CE_WARN,
1345 "cpu%d: failed to allocate context", cp->cpu_id);
1346 return (EAGAIN);
1347 }
1348 error = mach_cpu_start(cp, ctx);
1349 if (error != 0) {
1350 cmn_err(CE_WARN,
1351 "cpu%d: failed to start, error %d", cp->cpu_id, error);
1352 mach_cpucontext_free(cp, ctx, error);
1353 return (error);
1354 }
1355
1356 for (delays = 0, tempset = procset_slave; !CPU_IN_SET(tempset, cpuid);
1357 delays++) {
1358 if (delays == 500) {
1359 /*
1360 * After five seconds, things are probably looking
1361 * a bit bleak - explain the hang.
1362 */
1363 cmn_err(CE_NOTE, "cpu%d: started, "
1364 "but not running in the kernel yet", cpuid);
1365 } else if (delays > 2000) {
1366 /*
1367 * We waited at least 20 seconds, bail ..
1368 */
1369 error = ETIMEDOUT;
1370 cmn_err(CE_WARN, "cpu%d: timed out", cpuid);
1371 mach_cpucontext_free(cp, ctx, error);
1372 return (error);
1373 }
1374
1375 /*
1376 * wait at least 10ms, then check again..
1377 */
1378 delay(USEC_TO_TICK_ROUNDUP(10000));
1379 tempset = *((volatile cpuset_t *)&procset_slave);
1380 }
1381 CPUSET_ATOMIC_DEL(procset_slave, cpuid);
1382
1383 mach_cpucontext_free(cp, ctx, 0);
1384
1385 #ifndef __xpv
1386 if (tsc_gethrtime_enable)
1387 tsc_sync_master(cpuid);
1388 #endif
1389
1390 if (dtrace_cpu_init != NULL) {
1391 (*dtrace_cpu_init)(cpuid);
1392 }
1393
1394 /*
1395 * During CPU DR operations, the cpu_lock is held by current
1396 * (the control) thread. We can't release the cpu_lock here
1397 * because that will break the CPU DR logic.
1398 * On the other hand, CPUPM and processor group initialization
1399 * routines need to access the cpu_lock. So we invoke those
1400 * routines here on behalf of mp_startup_common().
1401 *
1402 * CPUPM and processor group initialization routines depend
1403 * on the cpuid probing results. Wait for mp_startup_common()
1404 * to signal that cpuid probing is done.
1405 */
1406 mp_startup_wait(&procset_slave, cpuid);
1407 #ifndef __xpv
1408 cpupm_init(cp);
1409 #endif
1410 (void) pg_cpu_init(cp, B_FALSE);
1411 cpu_set_state(cp);
1412 mp_startup_signal(&procset_master, cpuid);
1413
1414 return (0);
1415 }
1416
1417 /*
1418 * Start a single cpu, assuming that the kernel context is available
1419 * to successfully start another cpu.
1420 *
1421 * (For example, real mode code is mapped into the right place
1422 * in memory and is ready to be run.)
1423 */
1424 int
1425 start_cpu(processorid_t who)
1426 {
1427 cpu_t *cp;
1428 int error = 0;
1429 cpuset_t tempset;
1430
1431 ASSERT(who != 0);
1432
1433 /*
1434 * Check if there's at least a Mbyte of kmem available
1435 * before attempting to start the cpu.
1436 */
1437 if (kmem_avail() < 1024 * 1024) {
1438 /*
1439 * Kick off a reap in case that helps us with
1440 * later attempts ..
1441 */
1442 kmem_reap();
1443 return (ENOMEM);
1444 }
1445
1446 /*
1447 * First configure cpu.
1448 */
1449 cp = mp_cpu_configure_common(who, B_TRUE);
1450 ASSERT(cp != NULL);
1451
1452 /*
1453 * Then start cpu.
1454 */
1455 error = mp_start_cpu_common(cp, B_TRUE);
1456 if (error != 0) {
1457 mp_cpu_unconfigure_common(cp, error);
1458 return (error);
1459 }
1460
1461 mutex_exit(&cpu_lock);
1462 tempset = cpu_ready_set;
1463 while (!CPU_IN_SET(tempset, who)) {
1464 drv_usecwait(1);
1465 tempset = *((volatile cpuset_t *)&cpu_ready_set);
1466 }
1467 mutex_enter(&cpu_lock);
1468
1469 return (0);
1470 }
1471
1472 void
1473 start_other_cpus(int cprboot)
1474 {
1475 _NOTE(ARGUNUSED(cprboot));
1476
1477 uint_t who;
1478 uint_t bootcpuid = 0;
1479
1480 /*
1481 * Initialize our own cpu_info.
1482 */
1483 init_cpu_info(CPU);
1484
1485 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_idstr);
1486 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_brandstr);
1487
1488 /*
1489 * Initialize our syscall handlers
1490 */
1491 init_cpu_syscall(CPU);
1492
1493 /*
1494 * Take the boot cpu out of the mp_cpus set because we know
1495 * it's already running. Add it to the cpu_ready_set for
1496 * precisely the same reason.
1497 */
1498 CPUSET_DEL(mp_cpus, bootcpuid);
1499 CPUSET_ADD(cpu_ready_set, bootcpuid);
1500
1501 /*
1502 * skip the rest of this if
1503 * . only 1 cpu dectected and system isn't hotplug-capable
1504 * . not using MP
1505 */
1506 if ((CPUSET_ISNULL(mp_cpus) && plat_dr_support_cpu() == 0) ||
1507 use_mp == 0) {
1508 if (use_mp == 0)
1509 cmn_err(CE_CONT, "?***** Not in MP mode\n");
1510 goto done;
1511 }
1512
1513 /*
1514 * perform such initialization as is needed
1515 * to be able to take CPUs on- and off-line.
1516 */
1517 cpu_pause_init();
1518
1519 xc_init_cpu(CPU); /* initialize processor crosscalls */
1520
1521 if (mach_cpucontext_init() != 0)
1522 goto done;
1523
1524 flushes_require_xcalls = 1;
1525
1526 /*
1527 * We lock our affinity to the master CPU to ensure that all slave CPUs
1528 * do their TSC syncs with the same CPU.
1529 */
1530 affinity_set(CPU_CURRENT);
1531
1532 for (who = 0; who < NCPU; who++) {
1533 if (!CPU_IN_SET(mp_cpus, who))
1534 continue;
1535 ASSERT(who != bootcpuid);
1536
1537 mutex_enter(&cpu_lock);
1538 if (start_cpu(who) != 0)
1539 CPUSET_DEL(mp_cpus, who);
1540 cpu_state_change_notify(who, CPU_SETUP);
1541 mutex_exit(&cpu_lock);
1542 }
1543
1544 /* Free the space allocated to hold the microcode file */
1545 ucode_cleanup();
1546
1547 affinity_clear();
1548
1549 mach_cpucontext_fini();
1550
1551 done:
1552 if (get_hwenv() == HW_NATIVE)
1553 workaround_errata_end();
1554 cmi_post_mpstartup();
1555
1556 if (use_mp && ncpus != boot_max_ncpus) {
1557 cmn_err(CE_NOTE,
1558 "System detected %d cpus, but "
1559 "only %d cpu(s) were enabled during boot.",
1560 boot_max_ncpus, ncpus);
1561 cmn_err(CE_NOTE,
1562 "Use \"boot-ncpus\" parameter to enable more CPU(s). "
1563 "See eeprom(1M).");
1564 }
1565 }
1566
1567 int
1568 mp_cpu_configure(int cpuid)
1569 {
1570 cpu_t *cp;
1571
1572 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1573 return (ENOTSUP);
1574 }
1575
1576 cp = cpu_get(cpuid);
1577 if (cp != NULL) {
1578 return (EALREADY);
1579 }
1580
1581 /*
1582 * Check if there's at least a Mbyte of kmem available
1583 * before attempting to start the cpu.
1584 */
1585 if (kmem_avail() < 1024 * 1024) {
1586 /*
1587 * Kick off a reap in case that helps us with
1588 * later attempts ..
1589 */
1590 kmem_reap();
1591 return (ENOMEM);
1592 }
1593
1594 cp = mp_cpu_configure_common(cpuid, B_FALSE);
1595 ASSERT(cp != NULL && cpu_get(cpuid) == cp);
1596
1597 return (cp != NULL ? 0 : EAGAIN);
1598 }
1599
1600 int
1601 mp_cpu_unconfigure(int cpuid)
1602 {
1603 cpu_t *cp;
1604
1605 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1606 return (ENOTSUP);
1607 } else if (cpuid < 0 || cpuid >= max_ncpus) {
1608 return (EINVAL);
1609 }
1610
1611 cp = cpu_get(cpuid);
1612 if (cp == NULL) {
1613 return (ENODEV);
1614 }
1615 mp_cpu_unconfigure_common(cp, 0);
1616
1617 return (0);
1618 }
1619
1620 /*
1621 * Startup function for 'other' CPUs (besides boot cpu).
1622 * Called from real_mode_start.
1623 *
1624 * WARNING: until CPU_READY is set, mp_startup_common and routines called by
1625 * mp_startup_common should not call routines (e.g. kmem_free) that could call
1626 * hat_unload which requires CPU_READY to be set.
1627 */
1628 static void
1629 mp_startup_common(boolean_t boot)
1630 {
1631 cpu_t *cp = CPU;
1632 uchar_t new_x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
1633 extern void cpu_event_init_cpu(cpu_t *);
1634
1635 /*
1636 * We need to get TSC on this proc synced (i.e., any delta
1637 * from cpu0 accounted for) as soon as we can, because many
1638 * many things use gethrtime/pc_gethrestime, including
1639 * interrupts, cmn_err, etc.
1640 */
1641
1642 /* Let the control CPU continue into tsc_sync_master() */
1643 mp_startup_signal(&procset_slave, cp->cpu_id);
1644
1645 #ifndef __xpv
1646 if (tsc_gethrtime_enable)
1647 tsc_sync_slave();
1648 #endif
1649
1650 /*
1651 * Once this was done from assembly, but it's safer here; if
1652 * it blocks, we need to be able to swtch() to and from, and
1653 * since we get here by calling t_pc, we need to do that call
1654 * before swtch() overwrites it.
1655 */
1656 (void) (*ap_mlsetup)();
1657
1658 bzero(new_x86_featureset, BT_SIZEOFMAP(NUM_X86_FEATURES));
1659 cpuid_pass1(cp, new_x86_featureset);
1660
1661 #ifndef __xpv
1662 /*
1663 * Program this cpu's PAT
1664 */
1665 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1666 pat_sync();
1667 #endif
1668
1669 /*
1670 * Set up TSC_AUX to contain the cpuid for this processor
1671 * for the rdtscp instruction.
1672 */
1673 if (is_x86_feature(x86_featureset, X86FSET_TSCP))
1674 (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id);
1675
1676 /*
1677 * Initialize this CPU's syscall handlers
1678 */
1679 init_cpu_syscall(cp);
1680
1681 /*
1682 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the
1683 * highest level at which a routine is permitted to block on
1684 * an adaptive mutex (allows for cpu poke interrupt in case
1685 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks
1686 * device interrupts that may end up in the hat layer issuing cross
1687 * calls before CPU_READY is set.
1688 */
1689 splx(ipltospl(LOCK_LEVEL));
1690 sti();
1691
1692 /*
1693 * Do a sanity check to make sure this new CPU is a sane thing
1694 * to add to the collection of processors running this system.
1695 *
1696 * XXX Clearly this needs to get more sophisticated, if x86
1697 * systems start to get built out of heterogenous CPUs; as is
1698 * likely to happen once the number of processors in a configuration
1699 * gets large enough.
1700 */
1701 if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
1702 B_FALSE) {
1703 cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
1704 print_x86_featureset(new_x86_featureset);
1705 cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
1706 }
1707
1708 /*
1709 * We do not support cpus with mixed monitor/mwait support if the
1710 * boot cpu supports monitor/mwait.
1711 */
1712 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) !=
1713 is_x86_feature(new_x86_featureset, X86FSET_MWAIT))
1714 panic("unsupported mixed cpu monitor/mwait support detected");
1715
1716 /*
1717 * We could be more sophisticated here, and just mark the CPU
1718 * as "faulted" but at this point we'll opt for the easier
1719 * answer of dying horribly. Provided the boot cpu is ok,
1720 * the system can be recovered by booting with use_mp set to zero.
1721 */
1722 if (workaround_errata(cp) != 0)
1723 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1724
1725 /*
1726 * We can touch cpu_flags here without acquiring the cpu_lock here
1727 * because the cpu_lock is held by the control CPU which is running
1728 * mp_start_cpu_common().
1729 * Need to clear CPU_QUIESCED flag before calling any function which
1730 * may cause thread context switching, such as kmem_alloc() etc.
1731 * The idle thread checks for CPU_QUIESCED flag and loops for ever if
1732 * it's set. So the startup thread may have no chance to switch back
1733 * again if it's switched away with CPU_QUIESCED set.
1734 */
1735 cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1736
1737 /*
1738 * Setup this processor for XSAVE.
1739 */
1740 if (fp_save_mech == FP_XSAVE) {
1741 xsave_setup_msr(cp);
1742 }
1743
1744 cpuid_pass2(cp);
1745 cpuid_pass3(cp);
1746 cpuid_pass4(cp, NULL);
1747
1748 /*
1749 * Correct cpu_idstr and cpu_brandstr on target CPU after
1750 * cpuid_pass1() is done.
1751 */
1752 (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1753 (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1754
1755 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1756
1757 post_startup_cpu_fixups();
1758
1759 cpu_event_init_cpu(cp);
1760
1761 /*
1762 * Enable preemption here so that contention for any locks acquired
1763 * later in mp_startup_common may be preempted if the thread owning
1764 * those locks is continuously executing on other CPUs (for example,
1765 * this CPU must be preemptible to allow other CPUs to pause it during
1766 * their startup phases). It's safe to enable preemption here because
1767 * the CPU state is pretty-much fully constructed.
1768 */
1769 curthread->t_preempt = 0;
1770
1771 /* The base spl should still be at LOCK LEVEL here */
1772 ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1773 set_base_spl(); /* Restore the spl to its proper value */
1774
1775 pghw_physid_create(cp);
1776 /*
1777 * Delegate initialization tasks, which need to access the cpu_lock,
1778 * to mp_start_cpu_common() because we can't acquire the cpu_lock here
1779 * during CPU DR operations.
1780 */
1781 mp_startup_signal(&procset_slave, cp->cpu_id);
1782 mp_startup_wait(&procset_master, cp->cpu_id);
1783 pg_cmt_cpu_startup(cp);
1784
1785 if (boot) {
1786 mutex_enter(&cpu_lock);
1787 cp->cpu_flags &= ~CPU_OFFLINE;
1788 cpu_enable_intr(cp);
1789 cpu_add_active(cp);
1790 mutex_exit(&cpu_lock);
1791 }
1792
1793 /* Enable interrupts */
1794 (void) spl0();
1795
1796 /*
1797 * Fill out cpu_ucode_info. Update microcode if necessary.
1798 */
1799 ucode_check(cp);
1800
1801 #ifndef __xpv
1802 {
1803 /*
1804 * Set up the CPU module for this CPU. This can't be done
1805 * before this CPU is made CPU_READY, because we may (in
1806 * heterogeneous systems) need to go load another CPU module.
1807 * The act of attempting to load a module may trigger a
1808 * cross-call, which will ASSERT unless this cpu is CPU_READY.
1809 */
1810 cmi_hdl_t hdl;
1811
1812 if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU),
1813 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL) {
1814 if (is_x86_feature(x86_featureset, X86FSET_MCA))
1815 cmi_mca_init(hdl);
1816 cp->cpu_m.mcpu_cmi_hdl = hdl;
1817 }
1818 }
1819 #endif /* __xpv */
1820
1821 if (boothowto & RB_DEBUG)
1822 kdi_cpu_init();
1823
1824 /*
1825 * Setting the bit in cpu_ready_set must be the last operation in
1826 * processor initialization; the boot CPU will continue to boot once
1827 * it sees this bit set for all active CPUs.
1828 */
1829 CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
1830
1831 (void) mach_cpu_create_device_node(cp, NULL);
1832
1833 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1834 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1835 cmn_err(CE_CONT, "?cpu%d initialization complete - online\n",
1836 cp->cpu_id);
1837
1838 /*
1839 * Now we are done with the startup thread, so free it up.
1840 */
1841 thread_exit();
1842 panic("mp_startup: cannot return");
1843 /*NOTREACHED*/
1844 }
1845
1846 /*
1847 * Startup function for 'other' CPUs at boot time (besides boot cpu).
1848 */
1849 static void
1850 mp_startup_boot(void)
1851 {
1852 mp_startup_common(B_TRUE);
1853 }
1854
1855 /*
1856 * Startup function for hotplug CPUs at runtime.
1857 */
1858 void
1859 mp_startup_hotplug(void)
1860 {
1861 mp_startup_common(B_FALSE);
1862 }
1863
1864 /*
1865 * Start CPU on user request.
1866 */
1867 /* ARGSUSED */
1868 int
1869 mp_cpu_start(struct cpu *cp)
1870 {
1871 ASSERT(MUTEX_HELD(&cpu_lock));
1872 return (0);
1873 }
1874
1875 /*
1876 * Stop CPU on user request.
1877 */
1878 int
1879 mp_cpu_stop(struct cpu *cp)
1880 {
1881 extern int cbe_psm_timer_mode;
1882 ASSERT(MUTEX_HELD(&cpu_lock));
1883
1884 #ifdef __xpv
1885 /*
1886 * We can't offline vcpu0.
1887 */
1888 if (cp->cpu_id == 0)
1889 return (EBUSY);
1890 #endif
1891
1892 /*
1893 * If TIMER_PERIODIC mode is used, CPU0 is the one running it;
1894 * can't stop it. (This is true only for machines with no TSC.)
1895 */
1896
1897 if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1898 return (EBUSY);
1899
1900 return (0);
1901 }
1902
1903 /*
1904 * Take the specified CPU out of participation in interrupts.
1905 */
1906 int
1907 cpu_disable_intr(struct cpu *cp)
1908 {
1909 if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
1910 return (EBUSY);
1911
1912 cp->cpu_flags &= ~CPU_ENABLE;
1913 return (0);
1914 }
1915
1916 /*
1917 * Allow the specified CPU to participate in interrupts.
1918 */
1919 void
1920 cpu_enable_intr(struct cpu *cp)
1921 {
1922 ASSERT(MUTEX_HELD(&cpu_lock));
1923 cp->cpu_flags |= CPU_ENABLE;
1924 psm_enable_intr(cp->cpu_id);
1925 }
1926
1927 void
1928 mp_cpu_faulted_enter(struct cpu *cp)
1929 {
1930 #ifdef __xpv
1931 _NOTE(ARGUNUSED(cp));
1932 #else
1933 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1934
1935 if (hdl != NULL) {
1936 cmi_hdl_hold(hdl);
1937 } else {
1938 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1939 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
1940 }
1941 if (hdl != NULL) {
1942 cmi_faulted_enter(hdl);
1943 cmi_hdl_rele(hdl);
1944 }
1945 #endif
1946 }
1947
1948 void
1949 mp_cpu_faulted_exit(struct cpu *cp)
1950 {
1951 #ifdef __xpv
1952 _NOTE(ARGUNUSED(cp));
1953 #else
1954 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1955
1956 if (hdl != NULL) {
1957 cmi_hdl_hold(hdl);
1958 } else {
1959 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1960 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
1961 }
1962 if (hdl != NULL) {
1963 cmi_faulted_exit(hdl);
1964 cmi_hdl_rele(hdl);
1965 }
1966 #endif
1967 }
1968
1969 /*
1970 * The following two routines are used as context operators on threads belonging
1971 * to processes with a private LDT (see sysi86). Due to the rarity of such
1972 * processes, these routines are currently written for best code readability and
1973 * organization rather than speed. We could avoid checking x86_featureset at
1974 * every context switch by installing different context ops, depending on
1975 * x86_featureset, at LDT creation time -- one for each combination of fast
1976 * syscall features.
1977 */
1978
1979 /*ARGSUSED*/
1980 void
1981 cpu_fast_syscall_disable(void *arg)
1982 {
1983 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1984 is_x86_feature(x86_featureset, X86FSET_SEP))
1985 cpu_sep_disable();
1986 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1987 is_x86_feature(x86_featureset, X86FSET_ASYSC))
1988 cpu_asysc_disable();
1989 }
1990
1991 /*ARGSUSED*/
1992 void
1993 cpu_fast_syscall_enable(void *arg)
1994 {
1995 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1996 is_x86_feature(x86_featureset, X86FSET_SEP))
1997 cpu_sep_enable();
1998 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1999 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2000 cpu_asysc_enable();
2001 }
2002
2003 static void
2004 cpu_sep_enable(void)
2005 {
2006 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2007 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2008
2009 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
2010 }
2011
2012 static void
2013 cpu_sep_disable(void)
2014 {
2015 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2016 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2017
2018 /*
2019 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
2020 * the sysenter or sysexit instruction to trigger a #gp fault.
2021 */
2022 wrmsr(MSR_INTC_SEP_CS, 0);
2023 }
2024
2025 static void
2026 cpu_asysc_enable(void)
2027 {
2028 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2029 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2030
2031 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
2032 (uint64_t)(uintptr_t)AMD_EFER_SCE);
2033 }
2034
2035 static void
2036 cpu_asysc_disable(void)
2037 {
2038 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2039 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2040
2041 /*
2042 * Turn off the SCE (syscall enable) bit in the EFER register. Software
2043 * executing syscall or sysret with this bit off will incur a #ud trap.
2044 */
2045 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
2046 ~((uint64_t)(uintptr_t)AMD_EFER_SCE));
2047 }