1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
28 /* All Rights Reserved */
29 /* */
30 /* Copyright (c) 1987, 1988 Microsoft Corporation */
31 /* All Rights Reserved */
32 /* */
33
34 /*
35 * Copyright 2012 Joyent, Inc. All rights reserved.
36 */
37
38 #include <sys/types.h>
39 #include <sys/sysmacros.h>
40 #include <sys/param.h>
41 #include <sys/signal.h>
42 #include <sys/systm.h>
43 #include <sys/user.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/class.h>
47 #include <sys/core.h>
48 #include <sys/syscall.h>
49 #include <sys/cpuvar.h>
50 #include <sys/vm.h>
51 #include <sys/sysinfo.h>
52 #include <sys/fault.h>
53 #include <sys/stack.h>
54 #include <sys/psw.h>
55 #include <sys/regset.h>
56 #include <sys/fp.h>
57 #include <sys/trap.h>
58 #include <sys/kmem.h>
59 #include <sys/vtrace.h>
60 #include <sys/cmn_err.h>
61 #include <sys/prsystm.h>
62 #include <sys/mutex_impl.h>
63 #include <sys/machsystm.h>
64 #include <sys/archsystm.h>
65 #include <sys/sdt.h>
66 #include <sys/avintr.h>
67 #include <sys/kobj.h>
68
69 #include <vm/hat.h>
70
71 #include <vm/seg_kmem.h>
72 #include <vm/as.h>
73 #include <vm/seg.h>
74 #include <vm/hat_pte.h>
75 #include <vm/hat_i86.h>
76
77 #include <sys/procfs.h>
78
79 #include <sys/reboot.h>
80 #include <sys/debug.h>
81 #include <sys/debugreg.h>
82 #include <sys/modctl.h>
83 #include <sys/aio_impl.h>
84 #include <sys/tnf.h>
85 #include <sys/tnf_probe.h>
86 #include <sys/cred.h>
87 #include <sys/mman.h>
88 #include <sys/x86_archext.h>
89 #include <sys/copyops.h>
90 #include <c2/audit.h>
91 #include <sys/ftrace.h>
92 #include <sys/panic.h>
93 #include <sys/traptrace.h>
94 #include <sys/ontrap.h>
95 #include <sys/cpc_impl.h>
96 #include <sys/bootconf.h>
97 #include <sys/bootinfo.h>
98 #include <sys/promif.h>
99 #include <sys/mach_mmu.h>
100 #if defined(__xpv)
101 #include <sys/hypervisor.h>
102 #endif
103 #include <sys/contract/process_impl.h>
104
105 #define USER 0x10000 /* user-mode flag added to trap type */
106
107 static const char *trap_type_mnemonic[] = {
108 "de", "db", "2", "bp",
109 "of", "br", "ud", "nm",
110 "df", "9", "ts", "np",
111 "ss", "gp", "pf", "15",
112 "mf", "ac", "mc", "xf"
113 };
114
115 static const char *trap_type[] = {
116 "Divide error", /* trap id 0 */
117 "Debug", /* trap id 1 */
118 "NMI interrupt", /* trap id 2 */
119 "Breakpoint", /* trap id 3 */
120 "Overflow", /* trap id 4 */
121 "BOUND range exceeded", /* trap id 5 */
122 "Invalid opcode", /* trap id 6 */
123 "Device not available", /* trap id 7 */
124 "Double fault", /* trap id 8 */
125 "Coprocessor segment overrun", /* trap id 9 */
126 "Invalid TSS", /* trap id 10 */
127 "Segment not present", /* trap id 11 */
128 "Stack segment fault", /* trap id 12 */
129 "General protection", /* trap id 13 */
130 "Page fault", /* trap id 14 */
131 "Reserved", /* trap id 15 */
132 "x87 floating point error", /* trap id 16 */
133 "Alignment check", /* trap id 17 */
134 "Machine check", /* trap id 18 */
135 "SIMD floating point exception", /* trap id 19 */
136 };
137
138 #define TRAP_TYPES (sizeof (trap_type) / sizeof (trap_type[0]))
139
140 #define SLOW_SCALL_SIZE 2
141 #define FAST_SCALL_SIZE 2
142
143 int tudebug = 0;
144 int tudebugbpt = 0;
145 int tudebugfpe = 0;
146 int tudebugsse = 0;
147
148 #if defined(TRAPDEBUG) || defined(lint)
149 int tdebug = 0;
150 int lodebug = 0;
151 int faultdebug = 0;
152 #else
153 #define tdebug 0
154 #define lodebug 0
155 #define faultdebug 0
156 #endif /* defined(TRAPDEBUG) || defined(lint) */
157
158 #if defined(TRAPTRACE)
159 /*
160 * trap trace record for cpu0 is allocated here.
161 * trap trace records for non-boot cpus are allocated in mp_startup_init().
162 */
163 static trap_trace_rec_t trap_tr0[TRAPTR_NENT];
164 trap_trace_ctl_t trap_trace_ctl[NCPU] = {
165 {
166 (uintptr_t)trap_tr0, /* next record */
167 (uintptr_t)trap_tr0, /* first record */
168 (uintptr_t)(trap_tr0 + TRAPTR_NENT), /* limit */
169 (uintptr_t)0 /* current */
170 },
171 };
172
173 /*
174 * default trap buffer size
175 */
176 size_t trap_trace_bufsize = TRAPTR_NENT * sizeof (trap_trace_rec_t);
177 int trap_trace_freeze = 0;
178 int trap_trace_off = 0;
179
180 /*
181 * A dummy TRAPTRACE entry to use after death.
182 */
183 trap_trace_rec_t trap_trace_postmort;
184
185 static void dump_ttrace(void);
186 #endif /* TRAPTRACE */
187 static void dumpregs(struct regs *);
188 static void showregs(uint_t, struct regs *, caddr_t);
189 static int kern_gpfault(struct regs *);
190
191 /*ARGSUSED*/
192 static int
193 die(uint_t type, struct regs *rp, caddr_t addr, processorid_t cpuid)
194 {
195 struct panic_trap_info ti;
196 const char *trap_name, *trap_mnemonic;
197
198 if (type < TRAP_TYPES) {
199 trap_name = trap_type[type];
200 trap_mnemonic = trap_type_mnemonic[type];
201 } else {
202 trap_name = "trap";
203 trap_mnemonic = "-";
204 }
205
206 #ifdef TRAPTRACE
207 TRAPTRACE_FREEZE;
208 #endif
209
210 ti.trap_regs = rp;
211 ti.trap_type = type & ~USER;
212 ti.trap_addr = addr;
213
214 curthread->t_panic_trap = &ti;
215
216 if (type == T_PGFLT && addr < (caddr_t)KERNELBASE) {
217 panic("BAD TRAP: type=%x (#%s %s) rp=%p addr=%p "
218 "occurred in module \"%s\" due to %s",
219 type, trap_mnemonic, trap_name, (void *)rp, (void *)addr,
220 mod_containing_pc((caddr_t)rp->r_pc),
221 addr < (caddr_t)PAGESIZE ?
222 "a NULL pointer dereference" :
223 "an illegal access to a user address");
224 } else
225 panic("BAD TRAP: type=%x (#%s %s) rp=%p addr=%p",
226 type, trap_mnemonic, trap_name, (void *)rp, (void *)addr);
227 return (0);
228 }
229
230 /*
231 * Rewrite the instruction at pc to be an int $T_SYSCALLINT instruction.
232 *
233 * int <vector> is two bytes: 0xCD <vector>
234 */
235
236 static int
237 rewrite_syscall(caddr_t pc)
238 {
239 uchar_t instr[SLOW_SCALL_SIZE] = { 0xCD, T_SYSCALLINT };
240
241 if (uwrite(curthread->t_procp, instr, SLOW_SCALL_SIZE,
242 (uintptr_t)pc) != 0)
243 return (1);
244
245 return (0);
246 }
247
248 /*
249 * Test to see if the instruction at pc is sysenter or syscall. The second
250 * argument should be the x86 feature flag corresponding to the expected
251 * instruction.
252 *
253 * sysenter is two bytes: 0x0F 0x34
254 * syscall is two bytes: 0x0F 0x05
255 * int $T_SYSCALLINT is two bytes: 0xCD 0x91
256 */
257
258 static int
259 instr_is_other_syscall(caddr_t pc, int which)
260 {
261 uchar_t instr[FAST_SCALL_SIZE];
262
263 ASSERT(which == X86FSET_SEP || which == X86FSET_ASYSC || which == 0xCD);
264
265 if (copyin_nowatch(pc, (caddr_t)instr, FAST_SCALL_SIZE) != 0)
266 return (0);
267
268 switch (which) {
269 case X86FSET_SEP:
270 if (instr[0] == 0x0F && instr[1] == 0x34)
271 return (1);
272 break;
273 case X86FSET_ASYSC:
274 if (instr[0] == 0x0F && instr[1] == 0x05)
275 return (1);
276 break;
277 case 0xCD:
278 if (instr[0] == 0xCD && instr[1] == T_SYSCALLINT)
279 return (1);
280 break;
281 }
282
283 return (0);
284 }
285
286 static const char *
287 syscall_insn_string(int syscall_insn)
288 {
289 switch (syscall_insn) {
290 case X86FSET_SEP:
291 return ("sysenter");
292 case X86FSET_ASYSC:
293 return ("syscall");
294 case 0xCD:
295 return ("int");
296 default:
297 return ("Unknown");
298 }
299 }
300
301 static int
302 ldt_rewrite_syscall(struct regs *rp, proc_t *p, int syscall_insn)
303 {
304 caddr_t linearpc;
305 int return_code = 0;
306
307 mutex_enter(&p->p_ldtlock); /* Must be held across linear_pc() */
308
309 if (linear_pc(rp, p, &linearpc) == 0) {
310
311 /*
312 * If another thread beat us here, it already changed
313 * this site to the slower (int) syscall instruction.
314 */
315 if (instr_is_other_syscall(linearpc, 0xCD)) {
316 return_code = 1;
317 } else if (instr_is_other_syscall(linearpc, syscall_insn)) {
318
319 if (rewrite_syscall(linearpc) == 0) {
320 return_code = 1;
321 }
322 #ifdef DEBUG
323 else
324 cmn_err(CE_WARN, "failed to rewrite %s "
325 "instruction in process %d",
326 syscall_insn_string(syscall_insn),
327 p->p_pid);
328 #endif /* DEBUG */
329 }
330 }
331
332 mutex_exit(&p->p_ldtlock); /* Must be held across linear_pc() */
333
334 return (return_code);
335 }
336
337 /*
338 * Test to see if the instruction at pc is a system call instruction.
339 *
340 * The bytes of an lcall instruction used for the syscall trap.
341 * static uchar_t lcall[7] = { 0x9a, 0, 0, 0, 0, 0x7, 0 };
342 * static uchar_t lcallalt[7] = { 0x9a, 0, 0, 0, 0, 0x27, 0 };
343 */
344
345 #define LCALLSIZE 7
346
347 static int
348 instr_is_lcall_syscall(caddr_t pc)
349 {
350 uchar_t instr[LCALLSIZE];
351
352 if (copyin_nowatch(pc, (caddr_t)instr, LCALLSIZE) == 0 &&
353 instr[0] == 0x9a &&
354 instr[1] == 0 &&
355 instr[2] == 0 &&
356 instr[3] == 0 &&
357 instr[4] == 0 &&
358 (instr[5] == 0x7 || instr[5] == 0x27) &&
359 instr[6] == 0)
360 return (1);
361
362 return (0);
363 }
364
365 #ifdef __amd64
366
367 /*
368 * In the first revisions of amd64 CPUs produced by AMD, the LAHF and
369 * SAHF instructions were not implemented in 64-bit mode. Later revisions
370 * did implement these instructions. An extension to the cpuid instruction
371 * was added to check for the capability of executing these instructions
372 * in 64-bit mode.
373 *
374 * Intel originally did not implement these instructions in EM64T either,
375 * but added them in later revisions.
376 *
377 * So, there are different chip revisions by both vendors out there that
378 * may or may not implement these instructions. The easy solution is to
379 * just always emulate these instructions on demand.
380 *
381 * SAHF == store %ah in the lower 8 bits of %rflags (opcode 0x9e)
382 * LAHF == load the lower 8 bits of %rflags into %ah (opcode 0x9f)
383 */
384
385 #define LSAHFSIZE 1
386
387 static int
388 instr_is_lsahf(caddr_t pc, uchar_t *instr)
389 {
390 if (copyin_nowatch(pc, (caddr_t)instr, LSAHFSIZE) == 0 &&
391 (*instr == 0x9e || *instr == 0x9f))
392 return (1);
393 return (0);
394 }
395
396 /*
397 * Emulate the LAHF and SAHF instructions. The reference manuals define
398 * these instructions to always load/store bit 1 as a 1, and bits 3 and 5
399 * as a 0. The other, defined, bits are copied (the PS_ICC bits and PS_P).
400 *
401 * Note that %ah is bits 8-15 of %rax.
402 */
403 static void
404 emulate_lsahf(struct regs *rp, uchar_t instr)
405 {
406 if (instr == 0x9e) {
407 /* sahf. Copy bits from %ah to flags. */
408 rp->r_ps = (rp->r_ps & ~0xff) |
409 ((rp->r_rax >> 8) & PSL_LSAHFMASK) | PS_MB1;
410 } else {
411 /* lahf. Copy bits from flags to %ah. */
412 rp->r_rax = (rp->r_rax & ~0xff00) |
413 (((rp->r_ps & PSL_LSAHFMASK) | PS_MB1) << 8);
414 }
415 rp->r_pc += LSAHFSIZE;
416 }
417 #endif /* __amd64 */
418
419 #ifdef OPTERON_ERRATUM_91
420
421 /*
422 * Test to see if the instruction at pc is a prefetch instruction.
423 *
424 * The first byte of prefetch instructions is always 0x0F.
425 * The second byte is 0x18 for regular prefetch or 0x0D for AMD 3dnow prefetch.
426 * The third byte (ModRM) contains the register field bits (bits 3-5).
427 * These bits must be between 0 and 3 inclusive for regular prefetch and
428 * 0 and 1 inclusive for AMD 3dnow prefetch.
429 *
430 * In 64-bit mode, there may be a one-byte REX prefex (0x40-0x4F).
431 */
432
433 static int
434 cmp_to_prefetch(uchar_t *p)
435 {
436 #ifdef _LP64
437 if ((p[0] & 0xF0) == 0x40) /* 64-bit REX prefix */
438 p++;
439 #endif
440 return ((p[0] == 0x0F && p[1] == 0x18 && ((p[2] >> 3) & 7) <= 3) ||
441 (p[0] == 0x0F && p[1] == 0x0D && ((p[2] >> 3) & 7) <= 1));
442 }
443
444 static int
445 instr_is_prefetch(caddr_t pc)
446 {
447 uchar_t instr[4]; /* optional REX prefix plus 3-byte opcode */
448
449 return (copyin_nowatch(pc, instr, sizeof (instr)) == 0 &&
450 cmp_to_prefetch(instr));
451 }
452
453 #endif /* OPTERON_ERRATUM_91 */
454
455 /*
456 * Called from the trap handler when a processor trap occurs.
457 *
458 * Note: All user-level traps that might call stop() must exit
459 * trap() by 'goto out' or by falling through.
460 * Note Also: trap() is usually called with interrupts enabled, (PS_IE == 1)
461 * however, there are paths that arrive here with PS_IE == 0 so special care
462 * must be taken in those cases.
463 */
464 void
465 trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
466 {
467 kthread_t *ct = curthread;
468 enum seg_rw rw;
469 unsigned type;
470 proc_t *p = ttoproc(ct);
471 klwp_t *lwp = ttolwp(ct);
472 uintptr_t lofault;
473 label_t *onfault;
474 faultcode_t pagefault(), res, errcode;
475 enum fault_type fault_type;
476 k_siginfo_t siginfo;
477 uint_t fault = 0;
478 int mstate;
479 int sicode = 0;
480 int watchcode;
481 int watchpage;
482 caddr_t vaddr;
483 int singlestep_twiddle;
484 size_t sz;
485 int ta;
486 #ifdef __amd64
487 uchar_t instr;
488 #endif
489
490 ASSERT_STACK_ALIGNED();
491
492 type = rp->r_trapno;
493 CPU_STATS_ADDQ(CPU, sys, trap, 1);
494
495 if (type == T_PGFLT) {
496
497 errcode = rp->r_err;
498 if (errcode & PF_ERR_WRITE)
499 rw = S_WRITE;
500 else if ((caddr_t)rp->r_pc == addr ||
501 (mmu.pt_nx != 0 && (errcode & PF_ERR_EXEC)))
502 rw = S_EXEC;
503 else
504 rw = S_READ;
505
506 #if defined(__i386)
507 /*
508 * Pentium Pro work-around
509 */
510 if ((errcode & PF_ERR_PROT) && pentiumpro_bug4046376) {
511 uint_t attr;
512 uint_t priv_violation;
513 uint_t access_violation;
514
515 if (hat_getattr(addr < (caddr_t)kernelbase ?
516 curproc->p_as->a_hat : kas.a_hat, addr, &attr)
517 == -1) {
518 errcode &= ~PF_ERR_PROT;
519 } else {
520 priv_violation = (errcode & PF_ERR_USER) &&
521 !(attr & PROT_USER);
522 access_violation = (errcode & PF_ERR_WRITE) &&
523 !(attr & PROT_WRITE);
524 if (!priv_violation && !access_violation)
525 goto cleanup;
526 }
527 }
528 #endif /* __i386 */
529
530 } else if (type == T_SGLSTP && lwp != NULL)
531 lwp->lwp_pcb.pcb_drstat = (uintptr_t)addr;
532
533 if (tdebug)
534 showregs(type, rp, addr);
535
536 if (USERMODE(rp->r_cs)) {
537 /*
538 * Set up the current cred to use during this trap. u_cred
539 * no longer exists. t_cred is used instead.
540 * The current process credential applies to the thread for
541 * the entire trap. If trapping from the kernel, this
542 * should already be set up.
543 */
544 if (ct->t_cred != p->p_cred) {
545 cred_t *oldcred = ct->t_cred;
546 /*
547 * DTrace accesses t_cred in probe context. t_cred
548 * must always be either NULL, or point to a valid,
549 * allocated cred structure.
550 */
551 ct->t_cred = crgetcred();
552 crfree(oldcred);
553 }
554 ASSERT(lwp != NULL);
555 type |= USER;
556 ASSERT(lwptoregs(lwp) == rp);
557 lwp->lwp_state = LWP_SYS;
558
559 switch (type) {
560 case T_PGFLT + USER:
561 if ((caddr_t)rp->r_pc == addr)
562 mstate = LMS_TFAULT;
563 else
564 mstate = LMS_DFAULT;
565 break;
566 default:
567 mstate = LMS_TRAP;
568 break;
569 }
570 /* Kernel probe */
571 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
572 tnf_microstate, state, mstate);
573 mstate = new_mstate(ct, mstate);
574
575 bzero(&siginfo, sizeof (siginfo));
576 }
577
578 switch (type) {
579 case T_PGFLT + USER:
580 case T_SGLSTP:
581 case T_SGLSTP + USER:
582 case T_BPTFLT + USER:
583 break;
584
585 default:
586 FTRACE_2("trap(): type=0x%lx, regs=0x%lx",
587 (ulong_t)type, (ulong_t)rp);
588 break;
589 }
590
591 switch (type) {
592 case T_SIMDFPE:
593 /* Make sure we enable interrupts before die()ing */
594 sti(); /* The SIMD exception comes in via cmninttrap */
595 /*FALLTHROUGH*/
596 default:
597 if (type & USER) {
598 if (tudebug)
599 showregs(type, rp, (caddr_t)0);
600 printf("trap: Unknown trap type %d in user mode\n",
601 type & ~USER);
602 siginfo.si_signo = SIGILL;
603 siginfo.si_code = ILL_ILLTRP;
604 siginfo.si_addr = (caddr_t)rp->r_pc;
605 siginfo.si_trapno = type & ~USER;
606 fault = FLTILL;
607 break;
608 } else {
609 (void) die(type, rp, addr, cpuid);
610 /*NOTREACHED*/
611 }
612
613 case T_PGFLT: /* system page fault */
614 /*
615 * If we're under on_trap() protection (see <sys/ontrap.h>),
616 * set ot_trap and bounce back to the on_trap() call site
617 * via the installed trampoline.
618 */
619 if ((ct->t_ontrap != NULL) &&
620 (ct->t_ontrap->ot_prot & OT_DATA_ACCESS)) {
621 ct->t_ontrap->ot_trap |= OT_DATA_ACCESS;
622 rp->r_pc = ct->t_ontrap->ot_trampoline;
623 goto cleanup;
624 }
625
626 /*
627 * If we have an Instruction fault in kernel mode, then that
628 * means we've tried to execute a user page (SMEP) or both of
629 * PAE and NXE are enabled. In either case, given that it's a
630 * kernel fault, we should panic immediately and not try to make
631 * any more forward progress. This indicates a bug in the
632 * kernel, which if execution continued, could be exploited to
633 * wreak havoc on the system.
634 */
635 if (errcode & PF_ERR_EXEC) {
636 (void) die(type, rp, addr, cpuid);
637 }
638
639 /*
640 * See if we can handle as pagefault. Save lofault and onfault
641 * across this. Here we assume that an address less than
642 * KERNELBASE is a user fault. We can do this as copy.s
643 * routines verify that the starting address is less than
644 * KERNELBASE before starting and because we know that we
645 * always have KERNELBASE mapped as invalid to serve as a
646 * "barrier".
647 */
648 lofault = ct->t_lofault;
649 onfault = ct->t_onfault;
650 ct->t_lofault = 0;
651
652 mstate = new_mstate(ct, LMS_KFAULT);
653
654 if (addr < (caddr_t)kernelbase) {
655 res = pagefault(addr,
656 (errcode & PF_ERR_PROT)? F_PROT: F_INVAL, rw, 0);
657 if (res == FC_NOMAP &&
658 addr < p->p_usrstack &&
659 grow(addr))
660 res = 0;
661 } else {
662 res = pagefault(addr,
663 (errcode & PF_ERR_PROT)? F_PROT: F_INVAL, rw, 1);
664 }
665 (void) new_mstate(ct, mstate);
666
667 /*
668 * Restore lofault and onfault. If we resolved the fault, exit.
669 * If we didn't and lofault wasn't set, die.
670 */
671 ct->t_lofault = lofault;
672 ct->t_onfault = onfault;
673 if (res == 0)
674 goto cleanup;
675
676 #if defined(OPTERON_ERRATUM_93) && defined(_LP64)
677 if (lofault == 0 && opteron_erratum_93) {
678 /*
679 * Workaround for Opteron Erratum 93. On return from
680 * a System Managment Interrupt at a HLT instruction
681 * the %rip might be truncated to a 32 bit value.
682 * BIOS is supposed to fix this, but some don't.
683 * If this occurs we simply restore the high order bits.
684 * The HLT instruction is 1 byte of 0xf4.
685 */
686 uintptr_t rip = rp->r_pc;
687
688 if ((rip & 0xfffffffful) == rip) {
689 rip |= 0xfffffffful << 32;
690 if (hat_getpfnum(kas.a_hat, (caddr_t)rip) !=
691 PFN_INVALID &&
692 (*(uchar_t *)rip == 0xf4 ||
693 *(uchar_t *)(rip - 1) == 0xf4)) {
694 rp->r_pc = rip;
695 goto cleanup;
696 }
697 }
698 }
699 #endif /* OPTERON_ERRATUM_93 && _LP64 */
700
701 #ifdef OPTERON_ERRATUM_91
702 if (lofault == 0 && opteron_erratum_91) {
703 /*
704 * Workaround for Opteron Erratum 91. Prefetches may
705 * generate a page fault (they're not supposed to do
706 * that!). If this occurs we simply return back to the
707 * instruction.
708 */
709 caddr_t pc = (caddr_t)rp->r_pc;
710
711 /*
712 * If the faulting PC is not mapped, this is a
713 * legitimate kernel page fault that must result in a
714 * panic. If the faulting PC is mapped, it could contain
715 * a prefetch instruction. Check for that here.
716 */
717 if (hat_getpfnum(kas.a_hat, pc) != PFN_INVALID) {
718 if (cmp_to_prefetch((uchar_t *)pc)) {
719 #ifdef DEBUG
720 cmn_err(CE_WARN, "Opteron erratum 91 "
721 "occurred: kernel prefetch"
722 " at %p generated a page fault!",
723 (void *)rp->r_pc);
724 #endif /* DEBUG */
725 goto cleanup;
726 }
727 }
728 (void) die(type, rp, addr, cpuid);
729 }
730 #endif /* OPTERON_ERRATUM_91 */
731
732 if (lofault == 0)
733 (void) die(type, rp, addr, cpuid);
734
735 /*
736 * Cannot resolve fault. Return to lofault.
737 */
738 if (lodebug) {
739 showregs(type, rp, addr);
740 traceregs(rp);
741 }
742 if (FC_CODE(res) == FC_OBJERR)
743 res = FC_ERRNO(res);
744 else
745 res = EFAULT;
746 rp->r_r0 = res;
747 rp->r_pc = ct->t_lofault;
748 goto cleanup;
749
750 case T_PGFLT + USER: /* user page fault */
751 if (faultdebug) {
752 char *fault_str;
753
754 switch (rw) {
755 case S_READ:
756 fault_str = "read";
757 break;
758 case S_WRITE:
759 fault_str = "write";
760 break;
761 case S_EXEC:
762 fault_str = "exec";
763 break;
764 default:
765 fault_str = "";
766 break;
767 }
768 printf("user %s fault: addr=0x%lx errcode=0x%x\n",
769 fault_str, (uintptr_t)addr, errcode);
770 }
771
772 #if defined(OPTERON_ERRATUM_100) && defined(_LP64)
773 /*
774 * Workaround for AMD erratum 100
775 *
776 * A 32-bit process may receive a page fault on a non
777 * 32-bit address by mistake. The range of the faulting
778 * address will be
779 *
780 * 0xffffffff80000000 .. 0xffffffffffffffff or
781 * 0x0000000100000000 .. 0x000000017fffffff
782 *
783 * The fault is always due to an instruction fetch, however
784 * the value of r_pc should be correct (in 32 bit range),
785 * so we ignore the page fault on the bogus address.
786 */
787 if (p->p_model == DATAMODEL_ILP32 &&
788 (0xffffffff80000000 <= (uintptr_t)addr ||
789 (0x100000000 <= (uintptr_t)addr &&
790 (uintptr_t)addr <= 0x17fffffff))) {
791 if (!opteron_erratum_100)
792 panic("unexpected erratum #100");
793 if (rp->r_pc <= 0xffffffff)
794 goto out;
795 }
796 #endif /* OPTERON_ERRATUM_100 && _LP64 */
797
798 ASSERT(!(curthread->t_flag & T_WATCHPT));
799 watchpage = (pr_watch_active(p) && pr_is_watchpage(addr, rw));
800 #ifdef __i386
801 /*
802 * In 32-bit mode, the lcall (system call) instruction fetches
803 * one word from the stack, at the stack pointer, because of the
804 * way the call gate is constructed. This is a bogus
805 * read and should not be counted as a read watchpoint.
806 * We work around the problem here by testing to see if
807 * this situation applies and, if so, simply jumping to
808 * the code in locore.s that fields the system call trap.
809 * The registers on the stack are already set up properly
810 * due to the match between the call gate sequence and the
811 * trap gate sequence. We just have to adjust the pc.
812 */
813 if (watchpage && addr == (caddr_t)rp->r_sp &&
814 rw == S_READ && instr_is_lcall_syscall((caddr_t)rp->r_pc)) {
815 extern void watch_syscall(void);
816
817 rp->r_pc += LCALLSIZE;
818 watch_syscall(); /* never returns */
819 /* NOTREACHED */
820 }
821 #endif /* __i386 */
822 vaddr = addr;
823 if (!watchpage || (sz = instr_size(rp, &vaddr, rw)) <= 0)
824 fault_type = (errcode & PF_ERR_PROT)? F_PROT: F_INVAL;
825 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
826 sz, NULL, rw)) != 0) {
827 if (ta) {
828 do_watch_step(vaddr, sz, rw,
829 watchcode, rp->r_pc);
830 fault_type = F_INVAL;
831 } else {
832 bzero(&siginfo, sizeof (siginfo));
833 siginfo.si_signo = SIGTRAP;
834 siginfo.si_code = watchcode;
835 siginfo.si_addr = vaddr;
836 siginfo.si_trapafter = 0;
837 siginfo.si_pc = (caddr_t)rp->r_pc;
838 fault = FLTWATCH;
839 break;
840 }
841 } else {
842 /* XXX pr_watch_emul() never succeeds (for now) */
843 if (rw != S_EXEC && pr_watch_emul(rp, vaddr, rw))
844 goto out;
845 do_watch_step(vaddr, sz, rw, 0, 0);
846 fault_type = F_INVAL;
847 }
848
849 res = pagefault(addr, fault_type, rw, 0);
850
851 /*
852 * If pagefault() succeeded, ok.
853 * Otherwise attempt to grow the stack.
854 */
855 if (res == 0 ||
856 (res == FC_NOMAP &&
857 addr < p->p_usrstack &&
858 grow(addr))) {
859 lwp->lwp_lastfault = FLTPAGE;
860 lwp->lwp_lastfaddr = addr;
861 if (prismember(&p->p_fltmask, FLTPAGE)) {
862 bzero(&siginfo, sizeof (siginfo));
863 siginfo.si_addr = addr;
864 (void) stop_on_fault(FLTPAGE, &siginfo);
865 }
866 goto out;
867 } else if (res == FC_PROT && addr < p->p_usrstack &&
868 (mmu.pt_nx != 0 && (errcode & PF_ERR_EXEC))) {
869 report_stack_exec(p, addr);
870 }
871
872 #ifdef OPTERON_ERRATUM_91
873 /*
874 * Workaround for Opteron Erratum 91. Prefetches may generate a
875 * page fault (they're not supposed to do that!). If this
876 * occurs we simply return back to the instruction.
877 *
878 * We rely on copyin to properly fault in the page with r_pc.
879 */
880 if (opteron_erratum_91 &&
881 addr != (caddr_t)rp->r_pc &&
882 instr_is_prefetch((caddr_t)rp->r_pc)) {
883 #ifdef DEBUG
884 cmn_err(CE_WARN, "Opteron erratum 91 occurred: "
885 "prefetch at %p in pid %d generated a trap!",
886 (void *)rp->r_pc, p->p_pid);
887 #endif /* DEBUG */
888 goto out;
889 }
890 #endif /* OPTERON_ERRATUM_91 */
891
892 if (tudebug)
893 showregs(type, rp, addr);
894 /*
895 * In the case where both pagefault and grow fail,
896 * set the code to the value provided by pagefault.
897 * We map all errors returned from pagefault() to SIGSEGV.
898 */
899 bzero(&siginfo, sizeof (siginfo));
900 siginfo.si_addr = addr;
901 switch (FC_CODE(res)) {
902 case FC_HWERR:
903 case FC_NOSUPPORT:
904 siginfo.si_signo = SIGBUS;
905 siginfo.si_code = BUS_ADRERR;
906 fault = FLTACCESS;
907 break;
908 case FC_ALIGN:
909 siginfo.si_signo = SIGBUS;
910 siginfo.si_code = BUS_ADRALN;
911 fault = FLTACCESS;
912 break;
913 case FC_OBJERR:
914 if ((siginfo.si_errno = FC_ERRNO(res)) != EINTR) {
915 siginfo.si_signo = SIGBUS;
916 siginfo.si_code = BUS_OBJERR;
917 fault = FLTACCESS;
918 }
919 break;
920 default: /* FC_NOMAP or FC_PROT */
921 siginfo.si_signo = SIGSEGV;
922 siginfo.si_code =
923 (res == FC_NOMAP)? SEGV_MAPERR : SEGV_ACCERR;
924 fault = FLTBOUNDS;
925 break;
926 }
927 break;
928
929 case T_ILLINST + USER: /* invalid opcode fault */
930 /*
931 * If the syscall instruction is disabled due to LDT usage, a
932 * user program that attempts to execute it will trigger a #ud
933 * trap. Check for that case here. If this occurs on a CPU which
934 * doesn't even support syscall, the result of all of this will
935 * be to emulate that particular instruction.
936 */
937 if (p->p_ldt != NULL &&
938 ldt_rewrite_syscall(rp, p, X86FSET_ASYSC))
939 goto out;
940
941 #ifdef __amd64
942 /*
943 * Emulate the LAHF and SAHF instructions if needed.
944 * See the instr_is_lsahf function for details.
945 */
946 if (p->p_model == DATAMODEL_LP64 &&
947 instr_is_lsahf((caddr_t)rp->r_pc, &instr)) {
948 emulate_lsahf(rp, instr);
949 goto out;
950 }
951 #endif
952
953 /*FALLTHROUGH*/
954
955 if (tudebug)
956 showregs(type, rp, (caddr_t)0);
957 siginfo.si_signo = SIGILL;
958 siginfo.si_code = ILL_ILLOPC;
959 siginfo.si_addr = (caddr_t)rp->r_pc;
960 fault = FLTILL;
961 break;
962
963 case T_ZERODIV + USER: /* integer divide by zero */
964 if (tudebug && tudebugfpe)
965 showregs(type, rp, (caddr_t)0);
966 siginfo.si_signo = SIGFPE;
967 siginfo.si_code = FPE_INTDIV;
968 siginfo.si_addr = (caddr_t)rp->r_pc;
969 fault = FLTIZDIV;
970 break;
971
972 case T_OVFLW + USER: /* integer overflow */
973 if (tudebug && tudebugfpe)
974 showregs(type, rp, (caddr_t)0);
975 siginfo.si_signo = SIGFPE;
976 siginfo.si_code = FPE_INTOVF;
977 siginfo.si_addr = (caddr_t)rp->r_pc;
978 fault = FLTIOVF;
979 break;
980
981 case T_NOEXTFLT + USER: /* math coprocessor not available */
982 if (tudebug && tudebugfpe)
983 showregs(type, rp, addr);
984 if (fpnoextflt(rp)) {
985 siginfo.si_signo = SIGILL;
986 siginfo.si_code = ILL_ILLOPC;
987 siginfo.si_addr = (caddr_t)rp->r_pc;
988 fault = FLTILL;
989 }
990 break;
991
992 case T_EXTOVRFLT: /* extension overrun fault */
993 /* check if we took a kernel trap on behalf of user */
994 {
995 extern void ndptrap_frstor(void);
996 if (rp->r_pc != (uintptr_t)ndptrap_frstor) {
997 sti(); /* T_EXTOVRFLT comes in via cmninttrap */
998 (void) die(type, rp, addr, cpuid);
999 }
1000 type |= USER;
1001 }
1002 /*FALLTHROUGH*/
1003 case T_EXTOVRFLT + USER: /* extension overrun fault */
1004 if (tudebug && tudebugfpe)
1005 showregs(type, rp, addr);
1006 if (fpextovrflt(rp)) {
1007 siginfo.si_signo = SIGSEGV;
1008 siginfo.si_code = SEGV_MAPERR;
1009 siginfo.si_addr = (caddr_t)rp->r_pc;
1010 fault = FLTBOUNDS;
1011 }
1012 break;
1013
1014 case T_EXTERRFLT: /* x87 floating point exception pending */
1015 /* check if we took a kernel trap on behalf of user */
1016 {
1017 extern void ndptrap_frstor(void);
1018 if (rp->r_pc != (uintptr_t)ndptrap_frstor) {
1019 sti(); /* T_EXTERRFLT comes in via cmninttrap */
1020 (void) die(type, rp, addr, cpuid);
1021 }
1022 type |= USER;
1023 }
1024 /*FALLTHROUGH*/
1025
1026 case T_EXTERRFLT + USER: /* x87 floating point exception pending */
1027 if (tudebug && tudebugfpe)
1028 showregs(type, rp, addr);
1029 if (sicode = fpexterrflt(rp)) {
1030 siginfo.si_signo = SIGFPE;
1031 siginfo.si_code = sicode;
1032 siginfo.si_addr = (caddr_t)rp->r_pc;
1033 fault = FLTFPE;
1034 }
1035 break;
1036
1037 case T_SIMDFPE + USER: /* SSE and SSE2 exceptions */
1038 if (tudebug && tudebugsse)
1039 showregs(type, rp, addr);
1040 if (!is_x86_feature(x86_featureset, X86FSET_SSE) &&
1041 !is_x86_feature(x86_featureset, X86FSET_SSE2)) {
1042 /*
1043 * There are rumours that some user instructions
1044 * on older CPUs can cause this trap to occur; in
1045 * which case send a SIGILL instead of a SIGFPE.
1046 */
1047 siginfo.si_signo = SIGILL;
1048 siginfo.si_code = ILL_ILLTRP;
1049 siginfo.si_addr = (caddr_t)rp->r_pc;
1050 siginfo.si_trapno = type & ~USER;
1051 fault = FLTILL;
1052 } else if ((sicode = fpsimderrflt(rp)) != 0) {
1053 siginfo.si_signo = SIGFPE;
1054 siginfo.si_code = sicode;
1055 siginfo.si_addr = (caddr_t)rp->r_pc;
1056 fault = FLTFPE;
1057 }
1058
1059 sti(); /* The SIMD exception comes in via cmninttrap */
1060 break;
1061
1062 case T_BPTFLT: /* breakpoint trap */
1063 /*
1064 * Kernel breakpoint traps should only happen when kmdb is
1065 * active, and even then, it'll have interposed on the IDT, so
1066 * control won't get here. If it does, we've hit a breakpoint
1067 * without the debugger, which is very strange, and very
1068 * fatal.
1069 */
1070 if (tudebug && tudebugbpt)
1071 showregs(type, rp, (caddr_t)0);
1072
1073 (void) die(type, rp, addr, cpuid);
1074 break;
1075
1076 case T_SGLSTP: /* single step/hw breakpoint exception */
1077
1078 /* Now evaluate how we got here */
1079 if (lwp != NULL && (lwp->lwp_pcb.pcb_drstat & DR_SINGLESTEP)) {
1080 /*
1081 * i386 single-steps even through lcalls which
1082 * change the privilege level. So we take a trap at
1083 * the first instruction in privileged mode.
1084 *
1085 * Set a flag to indicate that upon completion of
1086 * the system call, deal with the single-step trap.
1087 *
1088 * The same thing happens for sysenter, too.
1089 */
1090 singlestep_twiddle = 0;
1091 if (rp->r_pc == (uintptr_t)sys_sysenter ||
1092 rp->r_pc == (uintptr_t)brand_sys_sysenter) {
1093 singlestep_twiddle = 1;
1094 #if defined(__amd64)
1095 /*
1096 * Since we are already on the kernel's
1097 * %gs, on 64-bit systems the sysenter case
1098 * needs to adjust the pc to avoid
1099 * executing the swapgs instruction at the
1100 * top of the handler.
1101 */
1102 if (rp->r_pc == (uintptr_t)sys_sysenter)
1103 rp->r_pc = (uintptr_t)
1104 _sys_sysenter_post_swapgs;
1105 else
1106 rp->r_pc = (uintptr_t)
1107 _brand_sys_sysenter_post_swapgs;
1108 #endif
1109 }
1110 #if defined(__i386)
1111 else if (rp->r_pc == (uintptr_t)sys_call ||
1112 rp->r_pc == (uintptr_t)brand_sys_call) {
1113 singlestep_twiddle = 1;
1114 }
1115 #endif
1116 else {
1117 /* not on sysenter/syscall; uregs available */
1118 if (tudebug && tudebugbpt)
1119 showregs(type, rp, (caddr_t)0);
1120 }
1121 if (singlestep_twiddle) {
1122 rp->r_ps &= ~PS_T; /* turn off trace */
1123 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
1124 ct->t_post_sys = 1;
1125 aston(curthread);
1126 goto cleanup;
1127 }
1128 }
1129 /* XXX - needs review on debugger interface? */
1130 if (boothowto & RB_DEBUG)
1131 debug_enter((char *)NULL);
1132 else
1133 (void) die(type, rp, addr, cpuid);
1134 break;
1135
1136 case T_NMIFLT: /* NMI interrupt */
1137 printf("Unexpected NMI in system mode\n");
1138 goto cleanup;
1139
1140 case T_NMIFLT + USER: /* NMI interrupt */
1141 printf("Unexpected NMI in user mode\n");
1142 break;
1143
1144 case T_GPFLT: /* general protection violation */
1145 /*
1146 * Any #GP that occurs during an on_trap .. no_trap bracket
1147 * with OT_DATA_ACCESS or OT_SEGMENT_ACCESS protection,
1148 * or in a on_fault .. no_fault bracket, is forgiven
1149 * and we trampoline. This protection is given regardless
1150 * of whether we are 32/64 bit etc - if a distinction is
1151 * required then define new on_trap protection types.
1152 *
1153 * On amd64, we can get a #gp from referencing addresses
1154 * in the virtual address hole e.g. from a copyin or in
1155 * update_sregs while updating user segment registers.
1156 *
1157 * On the 32-bit hypervisor we could also generate one in
1158 * mfn_to_pfn by reaching around or into where the hypervisor
1159 * lives which is protected by segmentation.
1160 */
1161
1162 /*
1163 * If we're under on_trap() protection (see <sys/ontrap.h>),
1164 * set ot_trap and trampoline back to the on_trap() call site
1165 * for OT_DATA_ACCESS or OT_SEGMENT_ACCESS.
1166 */
1167 if (ct->t_ontrap != NULL) {
1168 int ttype = ct->t_ontrap->ot_prot &
1169 (OT_DATA_ACCESS | OT_SEGMENT_ACCESS);
1170
1171 if (ttype != 0) {
1172 ct->t_ontrap->ot_trap |= ttype;
1173 if (tudebug)
1174 showregs(type, rp, (caddr_t)0);
1175 rp->r_pc = ct->t_ontrap->ot_trampoline;
1176 goto cleanup;
1177 }
1178 }
1179
1180 /*
1181 * If we're under lofault protection (copyin etc.),
1182 * longjmp back to lofault with an EFAULT.
1183 */
1184 if (ct->t_lofault) {
1185 /*
1186 * Fault is not resolvable, so just return to lofault
1187 */
1188 if (lodebug) {
1189 showregs(type, rp, addr);
1190 traceregs(rp);
1191 }
1192 rp->r_r0 = EFAULT;
1193 rp->r_pc = ct->t_lofault;
1194 goto cleanup;
1195 }
1196
1197 /*
1198 * We fall through to the next case, which repeats
1199 * the OT_SEGMENT_ACCESS check which we've already
1200 * done, so we'll always fall through to the
1201 * T_STKFLT case.
1202 */
1203 /*FALLTHROUGH*/
1204 case T_SEGFLT: /* segment not present fault */
1205 /*
1206 * One example of this is #NP in update_sregs while
1207 * attempting to update a user segment register
1208 * that points to a descriptor that is marked not
1209 * present.
1210 */
1211 if (ct->t_ontrap != NULL &&
1212 ct->t_ontrap->ot_prot & OT_SEGMENT_ACCESS) {
1213 ct->t_ontrap->ot_trap |= OT_SEGMENT_ACCESS;
1214 if (tudebug)
1215 showregs(type, rp, (caddr_t)0);
1216 rp->r_pc = ct->t_ontrap->ot_trampoline;
1217 goto cleanup;
1218 }
1219 /*FALLTHROUGH*/
1220 case T_STKFLT: /* stack fault */
1221 case T_TSSFLT: /* invalid TSS fault */
1222 if (tudebug)
1223 showregs(type, rp, (caddr_t)0);
1224 if (kern_gpfault(rp))
1225 (void) die(type, rp, addr, cpuid);
1226 goto cleanup;
1227
1228 /*
1229 * ONLY 32-bit PROCESSES can USE a PRIVATE LDT! 64-bit apps
1230 * should have no need for them, so we put a stop to it here.
1231 *
1232 * So: not-present fault is ONLY valid for 32-bit processes with
1233 * a private LDT trying to do a system call. Emulate it.
1234 *
1235 * #gp fault is ONLY valid for 32-bit processes also, which DO NOT
1236 * have a private LDT, and are trying to do a system call. Emulate it.
1237 */
1238
1239 case T_SEGFLT + USER: /* segment not present fault */
1240 case T_GPFLT + USER: /* general protection violation */
1241 #ifdef _SYSCALL32_IMPL
1242 if (p->p_model != DATAMODEL_NATIVE) {
1243 #endif /* _SYSCALL32_IMPL */
1244 if (instr_is_lcall_syscall((caddr_t)rp->r_pc)) {
1245 if (type == T_SEGFLT + USER)
1246 ASSERT(p->p_ldt != NULL);
1247
1248 if ((p->p_ldt == NULL && type == T_GPFLT + USER) ||
1249 type == T_SEGFLT + USER) {
1250
1251 /*
1252 * The user attempted a system call via the obsolete
1253 * call gate mechanism. Because the process doesn't have
1254 * an LDT (i.e. the ldtr contains 0), a #gp results.
1255 * Emulate the syscall here, just as we do above for a
1256 * #np trap.
1257 */
1258
1259 /*
1260 * Since this is a not-present trap, rp->r_pc points to
1261 * the trapping lcall instruction. We need to bump it
1262 * to the next insn so the app can continue on.
1263 */
1264 rp->r_pc += LCALLSIZE;
1265 lwp->lwp_regs = rp;
1266
1267 /*
1268 * Normally the microstate of the LWP is forced back to
1269 * LMS_USER by the syscall handlers. Emulate that
1270 * behavior here.
1271 */
1272 mstate = LMS_USER;
1273
1274 dosyscall();
1275 goto out;
1276 }
1277 }
1278 #ifdef _SYSCALL32_IMPL
1279 }
1280 #endif /* _SYSCALL32_IMPL */
1281 /*
1282 * If the current process is using a private LDT and the
1283 * trapping instruction is sysenter, the sysenter instruction
1284 * has been disabled on the CPU because it destroys segment
1285 * registers. If this is the case, rewrite the instruction to
1286 * be a safe system call and retry it. If this occurs on a CPU
1287 * which doesn't even support sysenter, the result of all of
1288 * this will be to emulate that particular instruction.
1289 */
1290 if (p->p_ldt != NULL &&
1291 ldt_rewrite_syscall(rp, p, X86FSET_SEP))
1292 goto out;
1293
1294 /*FALLTHROUGH*/
1295
1296 case T_BOUNDFLT + USER: /* bound fault */
1297 case T_STKFLT + USER: /* stack fault */
1298 case T_TSSFLT + USER: /* invalid TSS fault */
1299 if (tudebug)
1300 showregs(type, rp, (caddr_t)0);
1301 siginfo.si_signo = SIGSEGV;
1302 siginfo.si_code = SEGV_MAPERR;
1303 siginfo.si_addr = (caddr_t)rp->r_pc;
1304 fault = FLTBOUNDS;
1305 break;
1306
1307 case T_ALIGNMENT + USER: /* user alignment error (486) */
1308 if (tudebug)
1309 showregs(type, rp, (caddr_t)0);
1310 bzero(&siginfo, sizeof (siginfo));
1311 siginfo.si_signo = SIGBUS;
1312 siginfo.si_code = BUS_ADRALN;
1313 siginfo.si_addr = (caddr_t)rp->r_pc;
1314 fault = FLTACCESS;
1315 break;
1316
1317 case T_SGLSTP + USER: /* single step/hw breakpoint exception */
1318 if (tudebug && tudebugbpt)
1319 showregs(type, rp, (caddr_t)0);
1320
1321 /* Was it single-stepping? */
1322 if (lwp->lwp_pcb.pcb_drstat & DR_SINGLESTEP) {
1323 pcb_t *pcb = &lwp->lwp_pcb;
1324
1325 rp->r_ps &= ~PS_T;
1326 /*
1327 * If both NORMAL_STEP and WATCH_STEP are in effect,
1328 * give precedence to WATCH_STEP. If neither is set,
1329 * user must have set the PS_T bit in %efl; treat this
1330 * as NORMAL_STEP.
1331 */
1332 if ((fault = undo_watch_step(&siginfo)) == 0 &&
1333 ((pcb->pcb_flags & NORMAL_STEP) ||
1334 !(pcb->pcb_flags & WATCH_STEP))) {
1335 siginfo.si_signo = SIGTRAP;
1336 siginfo.si_code = TRAP_TRACE;
1337 siginfo.si_addr = (caddr_t)rp->r_pc;
1338 fault = FLTTRACE;
1339 }
1340 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1341 }
1342 break;
1343
1344 case T_BPTFLT + USER: /* breakpoint trap */
1345 if (tudebug && tudebugbpt)
1346 showregs(type, rp, (caddr_t)0);
1347 /*
1348 * int 3 (the breakpoint instruction) leaves the pc referring
1349 * to the address one byte after the breakpointed address.
1350 * If the P_PR_BPTADJ flag has been set via /proc, We adjust
1351 * it back so it refers to the breakpointed address.
1352 */
1353 if (p->p_proc_flag & P_PR_BPTADJ)
1354 rp->r_pc--;
1355 siginfo.si_signo = SIGTRAP;
1356 siginfo.si_code = TRAP_BRKPT;
1357 siginfo.si_addr = (caddr_t)rp->r_pc;
1358 fault = FLTBPT;
1359 break;
1360
1361 case T_AST:
1362 /*
1363 * This occurs only after the cs register has been made to
1364 * look like a kernel selector, either through debugging or
1365 * possibly by functions like setcontext(). The thread is
1366 * about to cause a general protection fault at common_iret()
1367 * in locore. We let that happen immediately instead of
1368 * doing the T_AST processing.
1369 */
1370 goto cleanup;
1371
1372 case T_AST + USER: /* profiling, resched, h/w error pseudo trap */
1373 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1374 proc_t *p = ttoproc(curthread);
1375 extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1376
1377 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1378 print_msg_hwerr(p->p_ct_process->conp_contract.ct_id,
1379 p);
1380 contract_process_hwerr(p->p_ct_process, p);
1381 siginfo.si_signo = SIGKILL;
1382 siginfo.si_code = SI_NOINFO;
1383 } else if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1384 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1385 if (kcpc_overflow_ast()) {
1386 /*
1387 * Signal performance counter overflow
1388 */
1389 if (tudebug)
1390 showregs(type, rp, (caddr_t)0);
1391 bzero(&siginfo, sizeof (siginfo));
1392 siginfo.si_signo = SIGEMT;
1393 siginfo.si_code = EMT_CPCOVF;
1394 siginfo.si_addr = (caddr_t)rp->r_pc;
1395 fault = FLTCPCOVF;
1396 }
1397 }
1398
1399 break;
1400 }
1401
1402 /*
1403 * We can't get here from a system trap
1404 */
1405 ASSERT(type & USER);
1406
1407 if (fault) {
1408 /* We took a fault so abort single step. */
1409 lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1410 /*
1411 * Remember the fault and fault adddress
1412 * for real-time (SIGPROF) profiling.
1413 */
1414 lwp->lwp_lastfault = fault;
1415 lwp->lwp_lastfaddr = siginfo.si_addr;
1416
1417 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, &siginfo);
1418
1419 /*
1420 * If a debugger has declared this fault to be an
1421 * event of interest, stop the lwp. Otherwise just
1422 * deliver the associated signal.
1423 */
1424 if (siginfo.si_signo != SIGKILL &&
1425 prismember(&p->p_fltmask, fault) &&
1426 stop_on_fault(fault, &siginfo) == 0)
1427 siginfo.si_signo = 0;
1428 }
1429
1430 if (siginfo.si_signo)
1431 trapsig(&siginfo, (fault != FLTFPE && fault != FLTCPCOVF));
1432
1433 if (lwp->lwp_oweupc)
1434 profil_tick(rp->r_pc);
1435
1436 if (ct->t_astflag | ct->t_sig_check) {
1437 /*
1438 * Turn off the AST flag before checking all the conditions that
1439 * may have caused an AST. This flag is on whenever a signal or
1440 * unusual condition should be handled after the next trap or
1441 * syscall.
1442 */
1443 astoff(ct);
1444 /*
1445 * If a single-step trap occurred on a syscall (see above)
1446 * recognize it now. Do this before checking for signals
1447 * because deferred_singlestep_trap() may generate a SIGTRAP to
1448 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
1449 */
1450 if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
1451 deferred_singlestep_trap((caddr_t)rp->r_pc);
1452
1453 ct->t_sig_check = 0;
1454
1455 mutex_enter(&p->p_lock);
1456 if (curthread->t_proc_flag & TP_CHANGEBIND) {
1457 timer_lwpbind();
1458 curthread->t_proc_flag &= ~TP_CHANGEBIND;
1459 }
1460 mutex_exit(&p->p_lock);
1461
1462 /*
1463 * for kaio requests that are on the per-process poll queue,
1464 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1465 * should copyout their result_t to user memory. by copying
1466 * out the result_t, the user can poll on memory waiting
1467 * for the kaio request to complete.
1468 */
1469 if (p->p_aio)
1470 aio_cleanup(0);
1471 /*
1472 * If this LWP was asked to hold, call holdlwp(), which will
1473 * stop. holdlwps() sets this up and calls pokelwps() which
1474 * sets the AST flag.
1475 *
1476 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1477 * through lwp_rtt(). That flag is set if the lwp_create(2)
1478 * syscall failed after creating the LWP.
1479 */
1480 if (ISHOLD(p))
1481 holdlwp();
1482
1483 /*
1484 * All code that sets signals and makes ISSIG evaluate true must
1485 * set t_astflag afterwards.
1486 */
1487 if (ISSIG_PENDING(ct, lwp, p)) {
1488 if (issig(FORREAL))
1489 psig();
1490 ct->t_sig_check = 1;
1491 }
1492
1493 if (ct->t_rprof != NULL) {
1494 realsigprof(0, 0, 0);
1495 ct->t_sig_check = 1;
1496 }
1497
1498 /*
1499 * /proc can't enable/disable the trace bit itself
1500 * because that could race with the call gate used by
1501 * system calls via "lcall". If that happened, an
1502 * invalid EFLAGS would result. prstep()/prnostep()
1503 * therefore schedule an AST for the purpose.
1504 */
1505 if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
1506 lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
1507 rp->r_ps |= PS_T;
1508 }
1509 if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
1510 lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
1511 rp->r_ps &= ~PS_T;
1512 }
1513 }
1514
1515 out: /* We can't get here from a system trap */
1516 ASSERT(type & USER);
1517
1518 if (ISHOLD(p))
1519 holdlwp();
1520
1521 /*
1522 * Set state to LWP_USER here so preempt won't give us a kernel
1523 * priority if it occurs after this point. Call CL_TRAPRET() to
1524 * restore the user-level priority.
1525 *
1526 * It is important that no locks (other than spinlocks) be entered
1527 * after this point before returning to user mode (unless lwp_state
1528 * is set back to LWP_SYS).
1529 */
1530 lwp->lwp_state = LWP_USER;
1531
1532 if (ct->t_trapret) {
1533 ct->t_trapret = 0;
1534 thread_lock(ct);
1535 CL_TRAPRET(ct);
1536 thread_unlock(ct);
1537 }
1538 if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1539 preempt();
1540 prunstop();
1541 (void) new_mstate(ct, mstate);
1542
1543 /* Kernel probe */
1544 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1545 tnf_microstate, state, LMS_USER);
1546
1547 return;
1548
1549 cleanup: /* system traps end up here */
1550 ASSERT(!(type & USER));
1551 }
1552
1553 /*
1554 * Patch non-zero to disable preemption of threads in the kernel.
1555 */
1556 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */
1557
1558 struct kpreempt_cnts { /* kernel preemption statistics */
1559 int kpc_idle; /* executing idle thread */
1560 int kpc_intr; /* executing interrupt thread */
1561 int kpc_clock; /* executing clock thread */
1562 int kpc_blocked; /* thread has blocked preemption (t_preempt) */
1563 int kpc_notonproc; /* thread is surrendering processor */
1564 int kpc_inswtch; /* thread has ratified scheduling decision */
1565 int kpc_prilevel; /* processor interrupt level is too high */
1566 int kpc_apreempt; /* asynchronous preemption */
1567 int kpc_spreempt; /* synchronous preemption */
1568 } kpreempt_cnts;
1569
1570 /*
1571 * kernel preemption: forced rescheduling, preempt the running kernel thread.
1572 * the argument is old PIL for an interrupt,
1573 * or the distingished value KPREEMPT_SYNC.
1574 */
1575 void
1576 kpreempt(int asyncspl)
1577 {
1578 kthread_t *ct = curthread;
1579
1580 if (IGNORE_KERNEL_PREEMPTION) {
1581 aston(CPU->cpu_dispthread);
1582 return;
1583 }
1584
1585 /*
1586 * Check that conditions are right for kernel preemption
1587 */
1588 do {
1589 if (ct->t_preempt) {
1590 /*
1591 * either a privileged thread (idle, panic, interrupt)
1592 * or will check when t_preempt is lowered
1593 * We need to specifically handle the case where
1594 * the thread is in the middle of swtch (resume has
1595 * been called) and has its t_preempt set
1596 * [idle thread and a thread which is in kpreempt
1597 * already] and then a high priority thread is
1598 * available in the local dispatch queue.
1599 * In this case the resumed thread needs to take a
1600 * trap so that it can call kpreempt. We achieve
1601 * this by using siron().
1602 * How do we detect this condition:
1603 * idle thread is running and is in the midst of
1604 * resume: curthread->t_pri == -1 && CPU->dispthread
1605 * != CPU->thread
1606 * Need to ensure that this happens only at high pil
1607 * resume is called at high pil
1608 * Only in resume_from_idle is the pil changed.
1609 */
1610 if (ct->t_pri < 0) {
1611 kpreempt_cnts.kpc_idle++;
1612 if (CPU->cpu_dispthread != CPU->cpu_thread)
1613 siron();
1614 } else if (ct->t_flag & T_INTR_THREAD) {
1615 kpreempt_cnts.kpc_intr++;
1616 if (ct->t_pil == CLOCK_LEVEL)
1617 kpreempt_cnts.kpc_clock++;
1618 } else {
1619 kpreempt_cnts.kpc_blocked++;
1620 if (CPU->cpu_dispthread != CPU->cpu_thread)
1621 siron();
1622 }
1623 aston(CPU->cpu_dispthread);
1624 return;
1625 }
1626 if (ct->t_state != TS_ONPROC ||
1627 ct->t_disp_queue != CPU->cpu_disp) {
1628 /* this thread will be calling swtch() shortly */
1629 kpreempt_cnts.kpc_notonproc++;
1630 if (CPU->cpu_thread != CPU->cpu_dispthread) {
1631 /* already in swtch(), force another */
1632 kpreempt_cnts.kpc_inswtch++;
1633 siron();
1634 }
1635 return;
1636 }
1637 if (getpil() >= DISP_LEVEL) {
1638 /*
1639 * We can't preempt this thread if it is at
1640 * a PIL >= DISP_LEVEL since it may be holding
1641 * a spin lock (like sched_lock).
1642 */
1643 siron(); /* check back later */
1644 kpreempt_cnts.kpc_prilevel++;
1645 return;
1646 }
1647 if (!interrupts_enabled()) {
1648 /*
1649 * Can't preempt while running with ints disabled
1650 */
1651 kpreempt_cnts.kpc_prilevel++;
1652 return;
1653 }
1654 if (asyncspl != KPREEMPT_SYNC)
1655 kpreempt_cnts.kpc_apreempt++;
1656 else
1657 kpreempt_cnts.kpc_spreempt++;
1658
1659 ct->t_preempt++;
1660 preempt();
1661 ct->t_preempt--;
1662 } while (CPU->cpu_kprunrun);
1663 }
1664
1665 /*
1666 * Print out debugging info.
1667 */
1668 static void
1669 showregs(uint_t type, struct regs *rp, caddr_t addr)
1670 {
1671 int s;
1672
1673 s = spl7();
1674 type &= ~USER;
1675 if (PTOU(curproc)->u_comm[0])
1676 printf("%s: ", PTOU(curproc)->u_comm);
1677 if (type < TRAP_TYPES)
1678 printf("#%s %s\n", trap_type_mnemonic[type], trap_type[type]);
1679 else
1680 switch (type) {
1681 case T_SYSCALL:
1682 printf("Syscall Trap:\n");
1683 break;
1684 case T_AST:
1685 printf("AST\n");
1686 break;
1687 default:
1688 printf("Bad Trap = %d\n", type);
1689 break;
1690 }
1691 if (type == T_PGFLT) {
1692 printf("Bad %s fault at addr=0x%lx\n",
1693 USERMODE(rp->r_cs) ? "user": "kernel", (uintptr_t)addr);
1694 } else if (addr) {
1695 printf("addr=0x%lx\n", (uintptr_t)addr);
1696 }
1697
1698 printf("pid=%d, pc=0x%lx, sp=0x%lx, eflags=0x%lx\n",
1699 (ttoproc(curthread) && ttoproc(curthread)->p_pidp) ?
1700 ttoproc(curthread)->p_pid : 0, rp->r_pc, rp->r_sp, rp->r_ps);
1701
1702 #if defined(__lint)
1703 /*
1704 * this clause can be deleted when lint bug 4870403 is fixed
1705 * (lint thinks that bit 32 is illegal in a %b format string)
1706 */
1707 printf("cr0: %x cr4: %b\n",
1708 (uint_t)getcr0(), (uint_t)getcr4(), FMT_CR4);
1709 #else
1710 printf("cr0: %b cr4: %b\n",
1711 (uint_t)getcr0(), FMT_CR0, (uint_t)getcr4(), FMT_CR4);
1712 #endif /* __lint */
1713
1714 printf("cr2: %lx", getcr2());
1715 #if !defined(__xpv)
1716 printf("cr3: %lx", getcr3());
1717 #if defined(__amd64)
1718 printf("cr8: %lx\n", getcr8());
1719 #endif
1720 #endif
1721 printf("\n");
1722
1723 dumpregs(rp);
1724 splx(s);
1725 }
1726
1727 static void
1728 dumpregs(struct regs *rp)
1729 {
1730 #if defined(__amd64)
1731 const char fmt[] = "\t%3s: %16lx %3s: %16lx %3s: %16lx\n";
1732
1733 printf(fmt, "rdi", rp->r_rdi, "rsi", rp->r_rsi, "rdx", rp->r_rdx);
1734 printf(fmt, "rcx", rp->r_rcx, " r8", rp->r_r8, " r9", rp->r_r9);
1735 printf(fmt, "rax", rp->r_rax, "rbx", rp->r_rbx, "rbp", rp->r_rbp);
1736 printf(fmt, "r10", rp->r_r10, "r11", rp->r_r11, "r12", rp->r_r12);
1737 printf(fmt, "r13", rp->r_r13, "r14", rp->r_r14, "r15", rp->r_r15);
1738
1739 printf(fmt, "fsb", rdmsr(MSR_AMD_FSBASE), "gsb", rdmsr(MSR_AMD_GSBASE),
1740 " ds", rp->r_ds);
1741 printf(fmt, " es", rp->r_es, " fs", rp->r_fs, " gs", rp->r_gs);
1742
1743 printf(fmt, "trp", rp->r_trapno, "err", rp->r_err, "rip", rp->r_rip);
1744 printf(fmt, " cs", rp->r_cs, "rfl", rp->r_rfl, "rsp", rp->r_rsp);
1745
1746 printf("\t%3s: %16lx\n", " ss", rp->r_ss);
1747
1748 #elif defined(__i386)
1749 const char fmt[] = "\t%3s: %8lx %3s: %8lx %3s: %8lx %3s: %8lx\n";
1750
1751 printf(fmt, " gs", rp->r_gs, " fs", rp->r_fs,
1752 " es", rp->r_es, " ds", rp->r_ds);
1753 printf(fmt, "edi", rp->r_edi, "esi", rp->r_esi,
1754 "ebp", rp->r_ebp, "esp", rp->r_esp);
1755 printf(fmt, "ebx", rp->r_ebx, "edx", rp->r_edx,
1756 "ecx", rp->r_ecx, "eax", rp->r_eax);
1757 printf(fmt, "trp", rp->r_trapno, "err", rp->r_err,
1758 "eip", rp->r_eip, " cs", rp->r_cs);
1759 printf("\t%3s: %8lx %3s: %8lx %3s: %8lx\n",
1760 "efl", rp->r_efl, "usp", rp->r_uesp, " ss", rp->r_ss);
1761
1762 #endif /* __i386 */
1763 }
1764
1765 /*
1766 * Test to see if the instruction is iret on i386 or iretq on amd64.
1767 *
1768 * On the hypervisor we can only test for nopop_sys_rtt_syscall. If true
1769 * then we are in the context of hypervisor's failsafe handler because it
1770 * tried to iret and failed due to a bad selector. See xen_failsafe_callback.
1771 */
1772 static int
1773 instr_is_iret(caddr_t pc)
1774 {
1775
1776 #if defined(__xpv)
1777 extern void nopop_sys_rtt_syscall(void);
1778 return ((pc == (caddr_t)nopop_sys_rtt_syscall) ? 1 : 0);
1779
1780 #else
1781
1782 #if defined(__amd64)
1783 static const uint8_t iret_insn[2] = { 0x48, 0xcf }; /* iretq */
1784
1785 #elif defined(__i386)
1786 static const uint8_t iret_insn[1] = { 0xcf }; /* iret */
1787 #endif /* __i386 */
1788 return (bcmp(pc, iret_insn, sizeof (iret_insn)) == 0);
1789
1790 #endif /* __xpv */
1791 }
1792
1793 #if defined(__i386)
1794
1795 /*
1796 * Test to see if the instruction is part of __SEGREGS_POP
1797 *
1798 * Note carefully the appallingly awful dependency between
1799 * the instruction sequence used in __SEGREGS_POP and these
1800 * instructions encoded here.
1801 */
1802 static int
1803 instr_is_segregs_pop(caddr_t pc)
1804 {
1805 static const uint8_t movw_0_esp_gs[4] = { 0x8e, 0x6c, 0x24, 0x0 };
1806 static const uint8_t movw_4_esp_fs[4] = { 0x8e, 0x64, 0x24, 0x4 };
1807 static const uint8_t movw_8_esp_es[4] = { 0x8e, 0x44, 0x24, 0x8 };
1808 static const uint8_t movw_c_esp_ds[4] = { 0x8e, 0x5c, 0x24, 0xc };
1809
1810 if (bcmp(pc, movw_0_esp_gs, sizeof (movw_0_esp_gs)) == 0 ||
1811 bcmp(pc, movw_4_esp_fs, sizeof (movw_4_esp_fs)) == 0 ||
1812 bcmp(pc, movw_8_esp_es, sizeof (movw_8_esp_es)) == 0 ||
1813 bcmp(pc, movw_c_esp_ds, sizeof (movw_c_esp_ds)) == 0)
1814 return (1);
1815
1816 return (0);
1817 }
1818
1819 #endif /* __i386 */
1820
1821 /*
1822 * Test to see if the instruction is part of _sys_rtt.
1823 *
1824 * Again on the hypervisor if we try to IRET to user land with a bad code
1825 * or stack selector we will get vectored through xen_failsafe_callback.
1826 * In which case we assume we got here via _sys_rtt since we only allow
1827 * IRET to user land to take place in _sys_rtt.
1828 */
1829 static int
1830 instr_is_sys_rtt(caddr_t pc)
1831 {
1832 extern void _sys_rtt(), _sys_rtt_end();
1833
1834 if ((uintptr_t)pc < (uintptr_t)_sys_rtt ||
1835 (uintptr_t)pc > (uintptr_t)_sys_rtt_end)
1836 return (0);
1837
1838 return (1);
1839 }
1840
1841 /*
1842 * Handle #gp faults in kernel mode.
1843 *
1844 * One legitimate way this can happen is if we attempt to update segment
1845 * registers to naughty values on the way out of the kernel.
1846 *
1847 * This can happen in a couple of ways: someone - either accidentally or
1848 * on purpose - creates (setcontext(2), lwp_create(2)) or modifies
1849 * (signal(2)) a ucontext that contains silly segment register values.
1850 * Or someone - either accidentally or on purpose - modifies the prgregset_t
1851 * of a subject process via /proc to contain silly segment register values.
1852 *
1853 * (The unfortunate part is that we can end up discovering the bad segment
1854 * register value in the middle of an 'iret' after we've popped most of the
1855 * stack. So it becomes quite difficult to associate an accurate ucontext
1856 * with the lwp, because the act of taking the #gp trap overwrites most of
1857 * what we were going to send the lwp.)
1858 *
1859 * OTOH if it turns out that's -not- the problem, and we're -not- an lwp
1860 * trying to return to user mode and we get a #gp fault, then we need
1861 * to die() -- which will happen if we return non-zero from this routine.
1862 */
1863 static int
1864 kern_gpfault(struct regs *rp)
1865 {
1866 kthread_t *t = curthread;
1867 proc_t *p = ttoproc(t);
1868 klwp_t *lwp = ttolwp(t);
1869 struct regs tmpregs, *trp = NULL;
1870 caddr_t pc = (caddr_t)rp->r_pc;
1871 int v;
1872 uint32_t auditing = AU_AUDITING();
1873
1874 /*
1875 * if we're not an lwp, or in the case of running native the
1876 * pc range is outside _sys_rtt, then we should immediately
1877 * be die()ing horribly.
1878 */
1879 if (lwp == NULL || !instr_is_sys_rtt(pc))
1880 return (1);
1881
1882 /*
1883 * So at least we're in the right part of the kernel.
1884 *
1885 * Disassemble the instruction at the faulting pc.
1886 * Once we know what it is, we carefully reconstruct the stack
1887 * based on the order in which the stack is deconstructed in
1888 * _sys_rtt. Ew.
1889 */
1890 if (instr_is_iret(pc)) {
1891 /*
1892 * We took the #gp while trying to perform the IRET.
1893 * This means that either %cs or %ss are bad.
1894 * All we know for sure is that most of the general
1895 * registers have been restored, including the
1896 * segment registers, and all we have left on the
1897 * topmost part of the lwp's stack are the
1898 * registers that the iretq was unable to consume.
1899 *
1900 * All the rest of the state was crushed by the #gp
1901 * which pushed -its- registers atop our old save area
1902 * (because we had to decrement the stack pointer, sigh) so
1903 * all that we can try and do is to reconstruct the
1904 * crushed frame from the #gp trap frame itself.
1905 */
1906 trp = &tmpregs;
1907 trp->r_ss = lwptoregs(lwp)->r_ss;
1908 trp->r_sp = lwptoregs(lwp)->r_sp;
1909 trp->r_ps = lwptoregs(lwp)->r_ps;
1910 trp->r_cs = lwptoregs(lwp)->r_cs;
1911 trp->r_pc = lwptoregs(lwp)->r_pc;
1912 bcopy(rp, trp, offsetof(struct regs, r_pc));
1913
1914 /*
1915 * Validate simple math
1916 */
1917 ASSERT(trp->r_pc == lwptoregs(lwp)->r_pc);
1918 ASSERT(trp->r_err == rp->r_err);
1919
1920
1921
1922 }
1923
1924 #if defined(__amd64)
1925 if (trp == NULL && lwp->lwp_pcb.pcb_rupdate != 0) {
1926
1927 /*
1928 * This is the common case -- we're trying to load
1929 * a bad segment register value in the only section
1930 * of kernel code that ever loads segment registers.
1931 *
1932 * We don't need to do anything at this point because
1933 * the pcb contains all the pending segment register
1934 * state, and the regs are still intact because we
1935 * didn't adjust the stack pointer yet. Given the fidelity
1936 * of all this, we could conceivably send a signal
1937 * to the lwp, rather than core-ing.
1938 */
1939 trp = lwptoregs(lwp);
1940 ASSERT((caddr_t)trp == (caddr_t)rp->r_sp);
1941 }
1942
1943 #elif defined(__i386)
1944
1945 if (trp == NULL && instr_is_segregs_pop(pc))
1946 trp = lwptoregs(lwp);
1947
1948 #endif /* __i386 */
1949
1950 if (trp == NULL)
1951 return (1);
1952
1953 /*
1954 * If we get to here, we're reasonably confident that we've
1955 * correctly decoded what happened on the way out of the kernel.
1956 * Rewrite the lwp's registers so that we can create a core dump
1957 * the (at least vaguely) represents the mcontext we were
1958 * being asked to restore when things went so terribly wrong.
1959 */
1960
1961 /*
1962 * Make sure that we have a meaningful %trapno and %err.
1963 */
1964 trp->r_trapno = rp->r_trapno;
1965 trp->r_err = rp->r_err;
1966
1967 if ((caddr_t)trp != (caddr_t)lwptoregs(lwp))
1968 bcopy(trp, lwptoregs(lwp), sizeof (*trp));
1969
1970
1971 mutex_enter(&p->p_lock);
1972 lwp->lwp_cursig = SIGSEGV;
1973 mutex_exit(&p->p_lock);
1974
1975 /*
1976 * Terminate all LWPs but don't discard them. If another lwp beat
1977 * us to the punch by calling exit(), evaporate now.
1978 */
1979 proc_is_exiting(p);
1980 if (exitlwps(1) != 0) {
1981 mutex_enter(&p->p_lock);
1982 lwp_exit();
1983 }
1984
1985 if (auditing) /* audit core dump */
1986 audit_core_start(SIGSEGV);
1987 v = core(SIGSEGV, B_FALSE);
1988 if (auditing) /* audit core dump */
1989 audit_core_finish(v ? CLD_KILLED : CLD_DUMPED);
1990 exit(v ? CLD_KILLED : CLD_DUMPED, SIGSEGV);
1991 return (0);
1992 }
1993
1994 /*
1995 * dump_tss() - Display the TSS structure
1996 */
1997
1998 #if !defined(__xpv)
1999 #if defined(__amd64)
2000
2001 static void
2002 dump_tss(void)
2003 {
2004 const char tss_fmt[] = "tss.%s:\t0x%p\n"; /* Format string */
2005 tss_t *tss = CPU->cpu_tss;
2006
2007 printf(tss_fmt, "tss_rsp0", (void *)tss->tss_rsp0);
2008 printf(tss_fmt, "tss_rsp1", (void *)tss->tss_rsp1);
2009 printf(tss_fmt, "tss_rsp2", (void *)tss->tss_rsp2);
2010
2011 printf(tss_fmt, "tss_ist1", (void *)tss->tss_ist1);
2012 printf(tss_fmt, "tss_ist2", (void *)tss->tss_ist2);
2013 printf(tss_fmt, "tss_ist3", (void *)tss->tss_ist3);
2014 printf(tss_fmt, "tss_ist4", (void *)tss->tss_ist4);
2015 printf(tss_fmt, "tss_ist5", (void *)tss->tss_ist5);
2016 printf(tss_fmt, "tss_ist6", (void *)tss->tss_ist6);
2017 printf(tss_fmt, "tss_ist7", (void *)tss->tss_ist7);
2018 }
2019
2020 #elif defined(__i386)
2021
2022 static void
2023 dump_tss(void)
2024 {
2025 const char tss_fmt[] = "tss.%s:\t0x%p\n"; /* Format string */
2026 tss_t *tss = CPU->cpu_tss;
2027
2028 printf(tss_fmt, "tss_link", (void *)(uintptr_t)tss->tss_link);
2029 printf(tss_fmt, "tss_esp0", (void *)(uintptr_t)tss->tss_esp0);
2030 printf(tss_fmt, "tss_ss0", (void *)(uintptr_t)tss->tss_ss0);
2031 printf(tss_fmt, "tss_esp1", (void *)(uintptr_t)tss->tss_esp1);
2032 printf(tss_fmt, "tss_ss1", (void *)(uintptr_t)tss->tss_ss1);
2033 printf(tss_fmt, "tss_esp2", (void *)(uintptr_t)tss->tss_esp2);
2034 printf(tss_fmt, "tss_ss2", (void *)(uintptr_t)tss->tss_ss2);
2035 printf(tss_fmt, "tss_cr3", (void *)(uintptr_t)tss->tss_cr3);
2036 printf(tss_fmt, "tss_eip", (void *)(uintptr_t)tss->tss_eip);
2037 printf(tss_fmt, "tss_eflags", (void *)(uintptr_t)tss->tss_eflags);
2038 printf(tss_fmt, "tss_eax", (void *)(uintptr_t)tss->tss_eax);
2039 printf(tss_fmt, "tss_ebx", (void *)(uintptr_t)tss->tss_ebx);
2040 printf(tss_fmt, "tss_ecx", (void *)(uintptr_t)tss->tss_ecx);
2041 printf(tss_fmt, "tss_edx", (void *)(uintptr_t)tss->tss_edx);
2042 printf(tss_fmt, "tss_esp", (void *)(uintptr_t)tss->tss_esp);
2043 }
2044
2045 #endif /* __amd64 */
2046 #endif /* !__xpv */
2047
2048 #if defined(TRAPTRACE)
2049
2050 int ttrace_nrec = 10; /* number of records to dump out */
2051 int ttrace_dump_nregs = 0; /* dump out this many records with regs too */
2052
2053 /*
2054 * Dump out the last ttrace_nrec traptrace records on each CPU
2055 */
2056 static void
2057 dump_ttrace(void)
2058 {
2059 trap_trace_ctl_t *ttc;
2060 trap_trace_rec_t *rec;
2061 uintptr_t current;
2062 int i, j, k;
2063 int n = NCPU;
2064 #if defined(__amd64)
2065 const char banner[] =
2066 "\ncpu address timestamp "
2067 "type vc handler pc\n";
2068 const char fmt1[] = "%3d %016lx %12llx ";
2069 #elif defined(__i386)
2070 const char banner[] =
2071 "\ncpu address timestamp type vc handler pc\n";
2072 const char fmt1[] = "%3d %08lx %12llx ";
2073 #endif
2074 const char fmt2[] = "%4s %3x ";
2075 const char fmt3[] = "%8s ";
2076
2077 if (ttrace_nrec == 0)
2078 return;
2079
2080 printf(banner);
2081
2082 for (i = 0; i < n; i++) {
2083 ttc = &trap_trace_ctl[i];
2084 if (ttc->ttc_first == NULL)
2085 continue;
2086
2087 current = ttc->ttc_next - sizeof (trap_trace_rec_t);
2088 for (j = 0; j < ttrace_nrec; j++) {
2089 struct sysent *sys;
2090 struct autovec *vec;
2091 extern struct av_head autovect[];
2092 int type;
2093 ulong_t off;
2094 char *sym, *stype;
2095
2096 if (current < ttc->ttc_first)
2097 current =
2098 ttc->ttc_limit - sizeof (trap_trace_rec_t);
2099
2100 if (current == NULL)
2101 continue;
2102
2103 rec = (trap_trace_rec_t *)current;
2104
2105 if (rec->ttr_stamp == 0)
2106 break;
2107
2108 printf(fmt1, i, (uintptr_t)rec, rec->ttr_stamp);
2109
2110 switch (rec->ttr_marker) {
2111 case TT_SYSCALL:
2112 case TT_SYSENTER:
2113 case TT_SYSC:
2114 case TT_SYSC64:
2115 #if defined(__amd64)
2116 sys = &sysent32[rec->ttr_sysnum];
2117 switch (rec->ttr_marker) {
2118 case TT_SYSC64:
2119 sys = &sysent[rec->ttr_sysnum];
2120 /*FALLTHROUGH*/
2121 #elif defined(__i386)
2122 sys = &sysent[rec->ttr_sysnum];
2123 switch (rec->ttr_marker) {
2124 case TT_SYSC64:
2125 #endif
2126 case TT_SYSC:
2127 stype = "sysc"; /* syscall */
2128 break;
2129 case TT_SYSCALL:
2130 stype = "lcal"; /* lcall */
2131 break;
2132 case TT_SYSENTER:
2133 stype = "syse"; /* sysenter */
2134 break;
2135 default:
2136 break;
2137 }
2138 printf(fmt2, "sysc", rec->ttr_sysnum);
2139 if (sys != NULL) {
2140 sym = kobj_getsymname(
2141 (uintptr_t)sys->sy_callc,
2142 &off);
2143 if (sym != NULL)
2144 printf(fmt3, sym);
2145 else
2146 printf("%p ", sys->sy_callc);
2147 } else {
2148 printf(fmt3, "unknown");
2149 }
2150 break;
2151
2152 case TT_INTERRUPT:
2153 printf(fmt2, "intr", rec->ttr_vector);
2154 if (get_intr_handler != NULL)
2155 vec = (struct autovec *)
2156 (*get_intr_handler)
2157 (rec->ttr_cpuid, rec->ttr_vector);
2158 else
2159 vec =
2160 autovect[rec->ttr_vector].avh_link;
2161
2162 if (vec != NULL) {
2163 sym = kobj_getsymname(
2164 (uintptr_t)vec->av_vector, &off);
2165 if (sym != NULL)
2166 printf(fmt3, sym);
2167 else
2168 printf("%p ", vec->av_vector);
2169 } else {
2170 printf(fmt3, "unknown ");
2171 }
2172 break;
2173
2174 case TT_TRAP:
2175 case TT_EVENT:
2176 type = rec->ttr_regs.r_trapno;
2177 printf(fmt2, "trap", type);
2178 if (type < TRAP_TYPES)
2179 printf(" #%s ",
2180 trap_type_mnemonic[type]);
2181 else
2182 switch (type) {
2183 case T_AST:
2184 printf(fmt3, "ast");
2185 break;
2186 default:
2187 printf(fmt3, "");
2188 break;
2189 }
2190 break;
2191
2192 default:
2193 break;
2194 }
2195
2196 sym = kobj_getsymname(rec->ttr_regs.r_pc, &off);
2197 if (sym != NULL)
2198 printf("%s+%lx\n", sym, off);
2199 else
2200 printf("%lx\n", rec->ttr_regs.r_pc);
2201
2202 if (ttrace_dump_nregs-- > 0) {
2203 int s;
2204
2205 if (rec->ttr_marker == TT_INTERRUPT)
2206 printf(
2207 "\t\tipl %x spl %x pri %x\n",
2208 rec->ttr_ipl,
2209 rec->ttr_spl,
2210 rec->ttr_pri);
2211
2212 dumpregs(&rec->ttr_regs);
2213
2214 printf("\t%3s: %p\n\n", " ct",
2215 (void *)rec->ttr_curthread);
2216
2217 /*
2218 * print out the pc stack that we recorded
2219 * at trap time (if any)
2220 */
2221 for (s = 0; s < rec->ttr_sdepth; s++) {
2222 uintptr_t fullpc;
2223
2224 if (s >= TTR_STACK_DEPTH) {
2225 printf("ttr_sdepth corrupt\n");
2226 break;
2227 }
2228
2229 fullpc = (uintptr_t)rec->ttr_stack[s];
2230
2231 sym = kobj_getsymname(fullpc, &off);
2232 if (sym != NULL)
2233 printf("-> %s+0x%lx()\n",
2234 sym, off);
2235 else
2236 printf("-> 0x%lx()\n", fullpc);
2237 }
2238 printf("\n");
2239 }
2240 current -= sizeof (trap_trace_rec_t);
2241 }
2242 }
2243 }
2244
2245 #endif /* TRAPTRACE */
2246
2247 void
2248 panic_showtrap(struct panic_trap_info *tip)
2249 {
2250 showregs(tip->trap_type, tip->trap_regs, tip->trap_addr);
2251
2252 #if defined(TRAPTRACE)
2253 dump_ttrace();
2254 #endif
2255
2256 #if !defined(__xpv)
2257 if (tip->trap_type == T_DBLFLT)
2258 dump_tss();
2259 #endif
2260 }
2261
2262 void
2263 panic_savetrap(panic_data_t *pdp, struct panic_trap_info *tip)
2264 {
2265 panic_saveregs(pdp, tip->trap_regs);
2266 }