1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  28 /*        All Rights Reserved   */
  29 
  30 #include <sys/param.h>
  31 #include <sys/types.h>
  32 #include <sys/bitmap.h>
  33 #include <sys/sysmacros.h>
  34 #include <sys/systm.h>
  35 #include <sys/cred.h>
  36 #include <sys/user.h>
  37 #include <sys/errno.h>
  38 #include <sys/proc.h>
  39 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
  40 #include <sys/signal.h>
  41 #include <sys/siginfo.h>
  42 #include <sys/fault.h>
  43 #include <sys/ucontext.h>
  44 #include <sys/procfs.h>
  45 #include <sys/wait.h>
  46 #include <sys/class.h>
  47 #include <sys/mman.h>
  48 #include <sys/procset.h>
  49 #include <sys/kmem.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/prsystm.h>
  52 #include <sys/debug.h>
  53 #include <vm/as.h>
  54 #include <sys/bitmap.h>
  55 #include <c2/audit.h>
  56 #include <sys/core.h>
  57 #include <sys/schedctl.h>
  58 #include <sys/contract/process_impl.h>
  59 #include <sys/cyclic.h>
  60 #include <sys/dtrace.h>
  61 #include <sys/sdt.h>
  62 
  63 const k_sigset_t nullsmask = {0, 0, 0};
  64 
  65 const k_sigset_t fillset =      /* MUST be contiguous */
  66         {FILLSET0, FILLSET1, FILLSET2};
  67 
  68 const k_sigset_t cantmask =
  69         {CANTMASK0, CANTMASK1, CANTMASK2};
  70 
  71 const k_sigset_t cantreset =
  72         {(sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGPWR)), 0, 0};
  73 
  74 const k_sigset_t ignoredefault =
  75         {(sigmask(SIGCONT)|sigmask(SIGCLD)|sigmask(SIGPWR)
  76         |sigmask(SIGWINCH)|sigmask(SIGURG)|sigmask(SIGWAITING)),
  77         (sigmask(SIGLWP)|sigmask(SIGCANCEL)|sigmask(SIGFREEZE)
  78         |sigmask(SIGTHAW)|sigmask(SIGXRES)|sigmask(SIGJVM1)
  79         |sigmask(SIGJVM2)), 0};
  80 
  81 const k_sigset_t stopdefault =
  82         {(sigmask(SIGSTOP)|sigmask(SIGTSTP)|sigmask(SIGTTOU)|sigmask(SIGTTIN)),
  83         0, 0};
  84 
  85 const k_sigset_t coredefault =
  86         {(sigmask(SIGQUIT)|sigmask(SIGILL)|sigmask(SIGTRAP)|sigmask(SIGIOT)
  87         |sigmask(SIGEMT)|sigmask(SIGFPE)|sigmask(SIGBUS)|sigmask(SIGSEGV)
  88         |sigmask(SIGSYS)|sigmask(SIGXCPU)|sigmask(SIGXFSZ)), 0, 0};
  89 
  90 const k_sigset_t holdvfork =
  91         {(sigmask(SIGTTOU)|sigmask(SIGTTIN)|sigmask(SIGTSTP)), 0, 0};
  92 
  93 static  int     isjobstop(int);
  94 static  void    post_sigcld(proc_t *, sigqueue_t *);
  95 
  96 /*
  97  * Internal variables for counting number of user thread stop requests posted.
  98  * They may not be accurate at some special situation such as that a virtually
  99  * stopped thread starts to run.
 100  */
 101 static int num_utstop;
 102 /*
 103  * Internal variables for broadcasting an event when all thread stop requests
 104  * are processed.
 105  */
 106 static kcondvar_t utstop_cv;
 107 
 108 static kmutex_t thread_stop_lock;
 109 void del_one_utstop(void);
 110 
 111 /*
 112  * Send the specified signal to the specified process.
 113  */
 114 void
 115 psignal(proc_t *p, int sig)
 116 {
 117         mutex_enter(&p->p_lock);
 118         sigtoproc(p, NULL, sig);
 119         mutex_exit(&p->p_lock);
 120 }
 121 
 122 /*
 123  * Send the specified signal to the specified thread.
 124  */
 125 void
 126 tsignal(kthread_t *t, int sig)
 127 {
 128         proc_t *p = ttoproc(t);
 129 
 130         mutex_enter(&p->p_lock);
 131         sigtoproc(p, t, sig);
 132         mutex_exit(&p->p_lock);
 133 }
 134 
 135 int
 136 signal_is_blocked(kthread_t *t, int sig)
 137 {
 138         return (sigismember(&t->t_hold, sig) ||
 139             (schedctl_sigblock(t) && !sigismember(&cantmask, sig)));
 140 }
 141 
 142 /*
 143  * Return true if the signal can safely be discarded on generation.
 144  * That is, if there is no need for the signal on the receiving end.
 145  * The answer is true if the process is a zombie or
 146  * if all of these conditions are true:
 147  *      the signal is being ignored
 148  *      the process is single-threaded
 149  *      the signal is not being traced by /proc
 150  *      the signal is not blocked by the process
 151  *      the signal is not being accepted via sigwait()
 152  */
 153 static int
 154 sig_discardable(proc_t *p, int sig)
 155 {
 156         kthread_t *t = p->p_tlist;
 157 
 158         return (t == NULL ||            /* if zombie or ... */
 159             (sigismember(&p->p_ignore, sig) &&   /* signal is ignored */
 160             t->t_forw == t &&                        /* and single-threaded */
 161             !tracing(p, sig) &&                 /* and no /proc tracing */
 162             !signal_is_blocked(t, sig) &&       /* and signal not blocked */
 163             !sigismember(&t->t_sigwait, sig)));  /* and not being accepted */
 164 }
 165 
 166 /*
 167  * Return true if this thread is going to eat this signal soon.
 168  * Note that, if the signal is SIGKILL, we force stopped threads to be
 169  * set running (to make SIGKILL be a sure kill), but only if the process
 170  * is not currently locked by /proc (the P_PR_LOCK flag).  Code in /proc
 171  * relies on the fact that a process will not change shape while P_PR_LOCK
 172  * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
 173  * We wish that we could simply call prbarrier() below, in sigtoproc(), to
 174  * ensure that the process is not locked by /proc, but prbarrier() drops
 175  * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
 176  */
 177 int
 178 eat_signal(kthread_t *t, int sig)
 179 {
 180         int rval = 0;
 181         ASSERT(THREAD_LOCK_HELD(t));
 182 
 183         /*
 184          * Do not do anything if the target thread has the signal blocked.
 185          */
 186         if (!signal_is_blocked(t, sig)) {
 187                 t->t_sig_check = 1;  /* have thread do an issig */
 188                 if (ISWAKEABLE(t) || ISWAITING(t)) {
 189                         setrun_locked(t);
 190                         rval = 1;
 191                 } else if (t->t_state == TS_STOPPED && sig == SIGKILL &&
 192                     !(ttoproc(t)->p_proc_flag & P_PR_LOCK)) {
 193                         ttoproc(t)->p_stopsig = 0;
 194                         t->t_dtrace_stop = 0;
 195                         t->t_schedflag |= TS_XSTART | TS_PSTART;
 196                         setrun_locked(t);
 197                 } else if (t != curthread && t->t_state == TS_ONPROC) {
 198                         aston(t);       /* make it do issig promptly */
 199                         if (t->t_cpu != CPU)
 200                                 poke_cpu(t->t_cpu->cpu_id);
 201                         rval = 1;
 202                 } else if (t->t_state == TS_RUN) {
 203                         rval = 1;
 204                 }
 205         }
 206 
 207         return (rval);
 208 }
 209 
 210 /*
 211  * Post a signal.
 212  * If a non-null thread pointer is passed, then post the signal
 213  * to the thread/lwp, otherwise post the signal to the process.
 214  */
 215 void
 216 sigtoproc(proc_t *p, kthread_t *t, int sig)
 217 {
 218         kthread_t *tt;
 219         int ext = !(curproc->p_flag & SSYS) &&
 220             (curproc->p_ct_process != p->p_ct_process);
 221 
 222         ASSERT(MUTEX_HELD(&p->p_lock));
 223 
 224         /* System processes don't get signals */
 225         if (sig <= 0 || sig >= NSIG || (p->p_flag & SSYS))
 226                 return;
 227 
 228         /*
 229          * Regardless of origin or directedness,
 230          * SIGKILL kills all lwps in the process immediately
 231          * and jobcontrol signals affect all lwps in the process.
 232          */
 233         if (sig == SIGKILL) {
 234                 p->p_flag |= SKILLED | (ext ? SEXTKILLED : 0);
 235                 t = NULL;
 236         } else if (sig == SIGCONT) {
 237                 /*
 238                  * The SSCONT flag will remain set until a stopping
 239                  * signal comes in (below).  This is harmless.
 240                  */
 241                 p->p_flag |= SSCONT;
 242                 sigdelq(p, NULL, SIGSTOP);
 243                 sigdelq(p, NULL, SIGTSTP);
 244                 sigdelq(p, NULL, SIGTTOU);
 245                 sigdelq(p, NULL, SIGTTIN);
 246                 sigdiffset(&p->p_sig, &stopdefault);
 247                 sigdiffset(&p->p_extsig, &stopdefault);
 248                 p->p_stopsig = 0;
 249                 if ((tt = p->p_tlist) != NULL) {
 250                         do {
 251                                 sigdelq(p, tt, SIGSTOP);
 252                                 sigdelq(p, tt, SIGTSTP);
 253                                 sigdelq(p, tt, SIGTTOU);
 254                                 sigdelq(p, tt, SIGTTIN);
 255                                 sigdiffset(&tt->t_sig, &stopdefault);
 256                                 sigdiffset(&tt->t_extsig, &stopdefault);
 257                         } while ((tt = tt->t_forw) != p->p_tlist);
 258                 }
 259                 if ((tt = p->p_tlist) != NULL) {
 260                         do {
 261                                 thread_lock(tt);
 262                                 if (tt->t_state == TS_STOPPED &&
 263                                     tt->t_whystop == PR_JOBCONTROL) {
 264                                         tt->t_schedflag |= TS_XSTART;
 265                                         setrun_locked(tt);
 266                                 }
 267                                 thread_unlock(tt);
 268                         } while ((tt = tt->t_forw) != p->p_tlist);
 269                 }
 270         } else if (sigismember(&stopdefault, sig)) {
 271                 /*
 272                  * This test has a race condition which we can't fix:
 273                  * By the time the stopping signal is received by
 274                  * the target process/thread, the signal handler
 275                  * and/or the detached state might have changed.
 276                  */
 277                 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
 278                     (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
 279                         p->p_flag &= ~SSCONT;
 280                 sigdelq(p, NULL, SIGCONT);
 281                 sigdelset(&p->p_sig, SIGCONT);
 282                 sigdelset(&p->p_extsig, SIGCONT);
 283                 if ((tt = p->p_tlist) != NULL) {
 284                         do {
 285                                 sigdelq(p, tt, SIGCONT);
 286                                 sigdelset(&tt->t_sig, SIGCONT);
 287                                 sigdelset(&tt->t_extsig, SIGCONT);
 288                         } while ((tt = tt->t_forw) != p->p_tlist);
 289                 }
 290         }
 291 
 292         if (sig_discardable(p, sig)) {
 293                 DTRACE_PROC3(signal__discard, kthread_t *, p->p_tlist,
 294                     proc_t *, p, int, sig);
 295                 return;
 296         }
 297 
 298         if (t != NULL) {
 299                 /*
 300                  * This is a directed signal, wake up the lwp.
 301                  */
 302                 sigaddset(&t->t_sig, sig);
 303                 if (ext)
 304                         sigaddset(&t->t_extsig, sig);
 305                 thread_lock(t);
 306                 (void) eat_signal(t, sig);
 307                 thread_unlock(t);
 308                 DTRACE_PROC2(signal__send, kthread_t *, t, int, sig);
 309         } else if ((tt = p->p_tlist) != NULL) {
 310                 /*
 311                  * Make sure that some lwp that already exists
 312                  * in the process fields the signal soon.
 313                  * Wake up an interruptibly sleeping lwp if necessary.
 314                  * For SIGKILL make all of the lwps see the signal;
 315                  * This is needed to guarantee a sure kill for processes
 316                  * with a mix of realtime and non-realtime threads.
 317                  */
 318                 int su = 0;
 319 
 320                 sigaddset(&p->p_sig, sig);
 321                 if (ext)
 322                         sigaddset(&p->p_extsig, sig);
 323                 do {
 324                         thread_lock(tt);
 325                         if (eat_signal(tt, sig) && sig != SIGKILL) {
 326                                 thread_unlock(tt);
 327                                 break;
 328                         }
 329                         if (SUSPENDED(tt))
 330                                 su++;
 331                         thread_unlock(tt);
 332                 } while ((tt = tt->t_forw) != p->p_tlist);
 333                 /*
 334                  * If the process is deadlocked, make somebody run and die.
 335                  */
 336                 if (sig == SIGKILL && p->p_stat != SIDL &&
 337                     p->p_lwprcnt == 0 && p->p_lwpcnt == su &&
 338                     !(p->p_proc_flag & P_PR_LOCK)) {
 339                         thread_lock(tt);
 340                         p->p_lwprcnt++;
 341                         tt->t_schedflag |= TS_CSTART;
 342                         setrun_locked(tt);
 343                         thread_unlock(tt);
 344                 }
 345 
 346                 DTRACE_PROC2(signal__send, kthread_t *, tt, int, sig);
 347         }
 348 }
 349 
 350 static int
 351 isjobstop(int sig)
 352 {
 353         proc_t *p = ttoproc(curthread);
 354 
 355         ASSERT(MUTEX_HELD(&p->p_lock));
 356 
 357         if (PTOU(curproc)->u_signal[sig-1] == SIG_DFL &&
 358             sigismember(&stopdefault, sig)) {
 359                 /*
 360                  * If SIGCONT has been posted since we promoted this signal
 361                  * from pending to current, then don't do a jobcontrol stop.
 362                  */
 363                 if (!(p->p_flag & SSCONT) &&
 364                     (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned) &&
 365                     curthread != p->p_agenttp) {
 366                         sigqueue_t *sqp;
 367 
 368                         stop(PR_JOBCONTROL, sig);
 369                         mutex_exit(&p->p_lock);
 370                         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
 371                         mutex_enter(&pidlock);
 372                         /*
 373                          * Only the first lwp to continue notifies the parent.
 374                          */
 375                         if (p->p_pidflag & CLDCONT)
 376                                 siginfofree(sqp);
 377                         else {
 378                                 p->p_pidflag |= CLDCONT;
 379                                 p->p_wcode = CLD_CONTINUED;
 380                                 p->p_wdata = SIGCONT;
 381                                 sigcld(p, sqp);
 382                         }
 383                         mutex_exit(&pidlock);
 384                         mutex_enter(&p->p_lock);
 385                 }
 386                 return (1);
 387         }
 388         return (0);
 389 }
 390 
 391 /*
 392  * Returns true if the current process has a signal to process, and
 393  * the signal is not held.  The signal to process is put in p_cursig.
 394  * This is asked at least once each time a process enters the system
 395  * (though this can usually be done without actually calling issig by
 396  * checking the pending signal masks).  A signal does not do anything
 397  * directly to a process; it sets a flag that asks the process to do
 398  * something to itself.
 399  *
 400  * The "why" argument indicates the allowable side-effects of the call:
 401  *
 402  * FORREAL:  Extract the next pending signal from p_sig into p_cursig;
 403  * stop the process if a stop has been requested or if a traced signal
 404  * is pending.
 405  *
 406  * JUSTLOOKING:  Don't stop the process, just indicate whether or not
 407  * a signal might be pending (FORREAL is needed to tell for sure).
 408  *
 409  * XXX: Changes to the logic in these routines should be propagated
 410  * to lm_sigispending().  See bug 1201594.
 411  */
 412 
 413 static int issig_forreal(void);
 414 static int issig_justlooking(void);
 415 
 416 int
 417 issig(int why)
 418 {
 419         ASSERT(why == FORREAL || why == JUSTLOOKING);
 420 
 421         return ((why == FORREAL)? issig_forreal() : issig_justlooking());
 422 }
 423 
 424 
 425 static int
 426 issig_justlooking(void)
 427 {
 428         kthread_t *t = curthread;
 429         klwp_t *lwp = ttolwp(t);
 430         proc_t *p = ttoproc(t);
 431         k_sigset_t set;
 432 
 433         /*
 434          * This function answers the question:
 435          * "Is there any reason to call issig_forreal()?"
 436          *
 437          * We have to answer the question w/o grabbing any locks
 438          * because we are (most likely) being called after we
 439          * put ourselves on the sleep queue.
 440          */
 441 
 442         if (t->t_dtrace_stop | t->t_dtrace_sig)
 443                 return (1);
 444 
 445         /*
 446          * Another piece of complexity in this process.  When single-stepping a
 447          * process, we don't want an intervening signal or TP_PAUSE request to
 448          * suspend the current thread.  Otherwise, the controlling process will
 449          * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
 450          * We will trigger any remaining signals when we re-enter the kernel on
 451          * the single step trap.
 452          */
 453         if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP)
 454                 return (0);
 455 
 456         if ((lwp->lwp_asleep && MUSTRETURN(p, t)) ||
 457             (p->p_flag & (SEXITLWPS|SKILLED)) ||
 458             (lwp->lwp_nostop == 0 &&
 459             (p->p_stopsig | (p->p_flag & (SHOLDFORK1|SHOLDWATCH)) |
 460             (t->t_proc_flag &
 461             (TP_PRSTOP|TP_HOLDLWP|TP_CHKPT|TP_PAUSE)))) ||
 462             lwp->lwp_cursig)
 463                 return (1);
 464 
 465         if (p->p_flag & SVFWAIT)
 466                 return (0);
 467         set = p->p_sig;
 468         sigorset(&set, &t->t_sig);
 469         if (schedctl_sigblock(t))       /* all blockable signals blocked */
 470                 sigandset(&set, &cantmask);
 471         else
 472                 sigdiffset(&set, &t->t_hold);
 473         if (p->p_flag & SVFORK)
 474                 sigdiffset(&set, &holdvfork);
 475 
 476         if (!sigisempty(&set)) {
 477                 int sig;
 478 
 479                 for (sig = 1; sig < NSIG; sig++) {
 480                         if (sigismember(&set, sig) &&
 481                             (tracing(p, sig) ||
 482                             sigismember(&t->t_sigwait, sig) ||
 483                             !sigismember(&p->p_ignore, sig))) {
 484                                 /*
 485                                  * Don't promote a signal that will stop
 486                                  * the process when lwp_nostop is set.
 487                                  */
 488                                 if (!lwp->lwp_nostop ||
 489                                     PTOU(p)->u_signal[sig-1] != SIG_DFL ||
 490                                     !sigismember(&stopdefault, sig))
 491                                         return (1);
 492                         }
 493                 }
 494         }
 495 
 496         return (0);
 497 }
 498 
 499 static int
 500 issig_forreal(void)
 501 {
 502         int sig = 0, ext = 0;
 503         kthread_t *t = curthread;
 504         klwp_t *lwp = ttolwp(t);
 505         proc_t *p = ttoproc(t);
 506         int toproc = 0;
 507         int sigcld_found = 0;
 508         int nostop_break = 0;
 509 
 510         ASSERT(t->t_state == TS_ONPROC);
 511 
 512         mutex_enter(&p->p_lock);
 513         schedctl_finish_sigblock(t);
 514 
 515         if (t->t_dtrace_stop | t->t_dtrace_sig) {
 516                 if (t->t_dtrace_stop) {
 517                         /*
 518                          * If DTrace's "stop" action has been invoked on us,
 519                          * set TP_PRSTOP.
 520                          */
 521                         t->t_proc_flag |= TP_PRSTOP;
 522                 }
 523 
 524                 if (t->t_dtrace_sig != 0) {
 525                         k_siginfo_t info;
 526 
 527                         /*
 528                          * Post the signal generated as the result of
 529                          * DTrace's "raise" action as a normal signal before
 530                          * the full-fledged signal checking begins.
 531                          */
 532                         bzero(&info, sizeof (info));
 533                         info.si_signo = t->t_dtrace_sig;
 534                         info.si_code = SI_DTRACE;
 535 
 536                         sigaddq(p, NULL, &info, KM_NOSLEEP);
 537 
 538                         t->t_dtrace_sig = 0;
 539                 }
 540         }
 541 
 542         for (;;) {
 543                 if (p->p_flag & (SEXITLWPS|SKILLED)) {
 544                         lwp->lwp_cursig = sig = SIGKILL;
 545                         lwp->lwp_extsig = ext = (p->p_flag & SEXTKILLED) != 0;
 546                         t->t_sig_check = 1;
 547                         break;
 548                 }
 549 
 550                 /*
 551                  * Another piece of complexity in this process.  When
 552                  * single-stepping a process, we don't want an intervening
 553                  * signal or TP_PAUSE request to suspend the current thread.
 554                  * Otherwise, the controlling process will hang beacuse we will
 555                  * be stopped with TS_PSTART set in t_schedflag.  We will
 556                  * trigger any remaining signals when we re-enter the kernel on
 557                  * the single step trap.
 558                  */
 559                 if (lwp->lwp_pcb.pcb_flags & NORMAL_STEP) {
 560                         sig = 0;
 561                         break;
 562                 }
 563 
 564                 /*
 565                  * Hold the lwp here for watchpoint manipulation.
 566                  */
 567                 if ((t->t_proc_flag & TP_PAUSE) && !lwp->lwp_nostop) {
 568                         stop(PR_SUSPENDED, SUSPEND_PAUSE);
 569                         continue;
 570                 }
 571 
 572                 if (lwp->lwp_asleep && MUSTRETURN(p, t)) {
 573                         if ((sig = lwp->lwp_cursig) != 0) {
 574                                 /*
 575                                  * Make sure we call ISSIG() in post_syscall()
 576                                  * to re-validate this current signal.
 577                                  */
 578                                 t->t_sig_check = 1;
 579                         }
 580                         break;
 581                 }
 582 
 583                 /*
 584                  * If the request is PR_CHECKPOINT, ignore the rest of signals
 585                  * or requests.  Honor other stop requests or signals later.
 586                  * Go back to top of loop here to check if an exit or hold
 587                  * event has occurred while stopped.
 588                  */
 589                 if ((t->t_proc_flag & TP_CHKPT) && !lwp->lwp_nostop) {
 590                         stop(PR_CHECKPOINT, 0);
 591                         continue;
 592                 }
 593 
 594                 /*
 595                  * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
 596                  * with signals or /proc.  Another lwp is executing fork1(),
 597                  * or is undergoing watchpoint activity (remapping a page),
 598                  * or is executing lwp_suspend() on this lwp.
 599                  * Again, go back to top of loop to check if an exit
 600                  * or hold event has occurred while stopped.
 601                  */
 602                 if (((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
 603                     (t->t_proc_flag & TP_HOLDLWP)) && !lwp->lwp_nostop) {
 604                         stop(PR_SUSPENDED, SUSPEND_NORMAL);
 605                         continue;
 606                 }
 607 
 608                 /*
 609                  * Honor requested stop before dealing with the
 610                  * current signal; a debugger may change it.
 611                  * Do not want to go back to loop here since this is a special
 612                  * stop that means: make incremental progress before the next
 613                  * stop. The danger is that returning to top of loop would most
 614                  * likely drop the thread right back here to stop soon after it
 615                  * was continued, violating the incremental progress request.
 616                  */
 617                 if ((t->t_proc_flag & TP_PRSTOP) && !lwp->lwp_nostop)
 618                         stop(PR_REQUESTED, 0);
 619 
 620                 /*
 621                  * If a debugger wants us to take a signal it will have
 622                  * left it in lwp->lwp_cursig.  If lwp_cursig has been cleared
 623                  * or if it's being ignored, we continue on looking for another
 624                  * signal.  Otherwise we return the specified signal, provided
 625                  * it's not a signal that causes a job control stop.
 626                  *
 627                  * When stopped on PR_JOBCONTROL, there is no current
 628                  * signal; we cancel lwp->lwp_cursig temporarily before
 629                  * calling isjobstop().  The current signal may be reset
 630                  * by a debugger while we are stopped in isjobstop().
 631                  *
 632                  * If the current thread is accepting the signal
 633                  * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
 634                  * we allow the signal to be accepted, even if it is
 635                  * being ignored, and without causing a job control stop.
 636                  */
 637                 if ((sig = lwp->lwp_cursig) != 0) {
 638                         ext = lwp->lwp_extsig;
 639                         lwp->lwp_cursig = 0;
 640                         lwp->lwp_extsig = 0;
 641                         if (sigismember(&t->t_sigwait, sig) ||
 642                             (!sigismember(&p->p_ignore, sig) &&
 643                             !isjobstop(sig))) {
 644                                 if (p->p_flag & (SEXITLWPS|SKILLED)) {
 645                                         sig = SIGKILL;
 646                                         ext = (p->p_flag & SEXTKILLED) != 0;
 647                                 }
 648                                 lwp->lwp_cursig = (uchar_t)sig;
 649                                 lwp->lwp_extsig = (uchar_t)ext;
 650                                 break;
 651                         }
 652                         /*
 653                          * The signal is being ignored or it caused a
 654                          * job-control stop.  If another current signal
 655                          * has not been established, return the current
 656                          * siginfo, if any, to the memory manager.
 657                          */
 658                         if (lwp->lwp_cursig == 0 && lwp->lwp_curinfo != NULL) {
 659                                 siginfofree(lwp->lwp_curinfo);
 660                                 lwp->lwp_curinfo = NULL;
 661                         }
 662                         /*
 663                          * Loop around again in case we were stopped
 664                          * on a job control signal and a /proc stop
 665                          * request was posted or another current signal
 666                          * was established while we were stopped.
 667                          */
 668                         continue;
 669                 }
 670 
 671                 if (p->p_stopsig && !lwp->lwp_nostop &&
 672                     curthread != p->p_agenttp) {
 673                         /*
 674                          * Some lwp in the process has already stopped
 675                          * showing PR_JOBCONTROL.  This is a stop in
 676                          * sympathy with the other lwp, even if this
 677                          * lwp is blocking the stopping signal.
 678                          */
 679                         stop(PR_JOBCONTROL, p->p_stopsig);
 680                         continue;
 681                 }
 682 
 683                 /*
 684                  * Loop on the pending signals until we find a
 685                  * non-held signal that is traced or not ignored.
 686                  * First check the signals pending for the lwp,
 687                  * then the signals pending for the process as a whole.
 688                  */
 689                 for (;;) {
 690                         if ((sig = fsig(&t->t_sig, t)) != 0) {
 691                                 toproc = 0;
 692                                 if (tracing(p, sig) ||
 693                                     sigismember(&t->t_sigwait, sig) ||
 694                                     !sigismember(&p->p_ignore, sig)) {
 695                                         if (sigismember(&t->t_extsig, sig))
 696                                                 ext = 1;
 697                                         break;
 698                                 }
 699                                 sigdelset(&t->t_sig, sig);
 700                                 sigdelset(&t->t_extsig, sig);
 701                                 sigdelq(p, t, sig);
 702                         } else if ((sig = fsig(&p->p_sig, t)) != 0) {
 703                                 if (sig == SIGCLD)
 704                                         sigcld_found = 1;
 705                                 toproc = 1;
 706                                 if (tracing(p, sig) ||
 707                                     sigismember(&t->t_sigwait, sig) ||
 708                                     !sigismember(&p->p_ignore, sig)) {
 709                                         if (sigismember(&p->p_extsig, sig))
 710                                                 ext = 1;
 711                                         break;
 712                                 }
 713                                 sigdelset(&p->p_sig, sig);
 714                                 sigdelset(&p->p_extsig, sig);
 715                                 sigdelq(p, NULL, sig);
 716                         } else {
 717                                 /* no signal was found */
 718                                 break;
 719                         }
 720                 }
 721 
 722                 if (sig == 0) { /* no signal was found */
 723                         if (p->p_flag & (SEXITLWPS|SKILLED)) {
 724                                 lwp->lwp_cursig = SIGKILL;
 725                                 sig = SIGKILL;
 726                                 ext = (p->p_flag & SEXTKILLED) != 0;
 727                         }
 728                         break;
 729                 }
 730 
 731                 /*
 732                  * If we have been informed not to stop (i.e., we are being
 733                  * called from within a network operation), then don't promote
 734                  * the signal at this time, just return the signal number.
 735                  * We will call issig() again later when it is safe.
 736                  *
 737                  * fsig() does not return a jobcontrol stopping signal
 738                  * with a default action of stopping the process if
 739                  * lwp_nostop is set, so we won't be causing a bogus
 740                  * EINTR by this action.  (Such a signal is eaten by
 741                  * isjobstop() when we loop around to do final checks.)
 742                  */
 743                 if (lwp->lwp_nostop) {
 744                         nostop_break = 1;
 745                         break;
 746                 }
 747 
 748                 /*
 749                  * Promote the signal from pending to current.
 750                  *
 751                  * Note that sigdeq() will set lwp->lwp_curinfo to NULL
 752                  * if no siginfo_t exists for this signal.
 753                  */
 754                 lwp->lwp_cursig = (uchar_t)sig;
 755                 lwp->lwp_extsig = (uchar_t)ext;
 756                 t->t_sig_check = 1;  /* so post_syscall will see signal */
 757                 ASSERT(lwp->lwp_curinfo == NULL);
 758                 sigdeq(p, toproc ? NULL : t, sig, &lwp->lwp_curinfo);
 759 
 760                 if (tracing(p, sig))
 761                         stop(PR_SIGNALLED, sig);
 762 
 763                 /*
 764                  * Loop around to check for requested stop before
 765                  * performing the usual current-signal actions.
 766                  */
 767         }
 768 
 769         mutex_exit(&p->p_lock);
 770 
 771         /*
 772          * If SIGCLD was dequeued from the process's signal queue,
 773          * search for other pending SIGCLD's from the list of children.
 774          */
 775         if (sigcld_found)
 776                 sigcld_repost();
 777 
 778         if (sig != 0)
 779                 (void) undo_watch_step(NULL);
 780 
 781         /*
 782          * If we have been blocked since the p_lock was dropped off
 783          * above, then this promoted signal might have been handled
 784          * already when we were on the way back from sleep queue, so
 785          * just ignore it.
 786          * If we have been informed not to stop, just return the signal
 787          * number. Also see comments above.
 788          */
 789         if (!nostop_break) {
 790                 sig = lwp->lwp_cursig;
 791         }
 792 
 793         return (sig != 0);
 794 }
 795 
 796 /*
 797  * Return true if the process is currently stopped showing PR_JOBCONTROL.
 798  * This is true only if all of the process's lwp's are so stopped.
 799  * If this is asked by one of the lwps in the process, exclude that lwp.
 800  */
 801 int
 802 jobstopped(proc_t *p)
 803 {
 804         kthread_t *t;
 805 
 806         ASSERT(MUTEX_HELD(&p->p_lock));
 807 
 808         if ((t = p->p_tlist) == NULL)
 809                 return (0);
 810 
 811         do {
 812                 thread_lock(t);
 813                 /* ignore current, zombie and suspended lwps in the test */
 814                 if (!(t == curthread || t->t_state == TS_ZOMB ||
 815                     SUSPENDED(t)) &&
 816                     (t->t_state != TS_STOPPED ||
 817                     t->t_whystop != PR_JOBCONTROL)) {
 818                         thread_unlock(t);
 819                         return (0);
 820                 }
 821                 thread_unlock(t);
 822         } while ((t = t->t_forw) != p->p_tlist);
 823 
 824         return (1);
 825 }
 826 
 827 /*
 828  * Put ourself (curthread) into the stopped state and notify tracers.
 829  */
 830 void
 831 stop(int why, int what)
 832 {
 833         kthread_t       *t = curthread;
 834         proc_t          *p = ttoproc(t);
 835         klwp_t          *lwp = ttolwp(t);
 836         kthread_t       *tx;
 837         lwpent_t        *lep;
 838         int             procstop;
 839         int             flags = TS_ALLSTART;
 840         hrtime_t        stoptime;
 841 
 842         /*
 843          * Can't stop a system process.
 844          */
 845         if (p == NULL || lwp == NULL || (p->p_flag & SSYS) || p->p_as == &kas)
 846                 return;
 847 
 848         ASSERT(MUTEX_HELD(&p->p_lock));
 849 
 850         if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
 851                 /*
 852                  * Don't stop an lwp with SIGKILL pending.
 853                  * Don't stop if the process or lwp is exiting.
 854                  */
 855                 if (lwp->lwp_cursig == SIGKILL ||
 856                     sigismember(&t->t_sig, SIGKILL) ||
 857                     sigismember(&p->p_sig, SIGKILL) ||
 858                     (t->t_proc_flag & TP_LWPEXIT) ||
 859                     (p->p_flag & (SEXITLWPS|SKILLED))) {
 860                         p->p_stopsig = 0;
 861                         t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
 862                         return;
 863                 }
 864         }
 865 
 866         /*
 867          * Make sure we don't deadlock on a recursive call to prstop().
 868          * prstop() sets the lwp_nostop flag.
 869          */
 870         if (lwp->lwp_nostop)
 871                 return;
 872 
 873         /*
 874          * Make sure the lwp is in an orderly state for inspection
 875          * by a debugger through /proc or for dumping via core().
 876          */
 877         schedctl_finish_sigblock(t);
 878         t->t_proc_flag |= TP_STOPPING;       /* must set before dropping p_lock */
 879         mutex_exit(&p->p_lock);
 880         stoptime = gethrtime();
 881         prstop(why, what);
 882         (void) undo_watch_step(NULL);
 883         mutex_enter(&p->p_lock);
 884         ASSERT(t->t_state == TS_ONPROC);
 885 
 886         switch (why) {
 887         case PR_CHECKPOINT:
 888                 /*
 889                  * The situation may have changed since we dropped
 890                  * and reacquired p->p_lock. Double-check now
 891                  * whether we should stop or not.
 892                  */
 893                 if (!(t->t_proc_flag & TP_CHKPT)) {
 894                         t->t_proc_flag &= ~TP_STOPPING;
 895                         return;
 896                 }
 897                 t->t_proc_flag &= ~TP_CHKPT;
 898                 flags &= ~TS_RESUME;
 899                 break;
 900 
 901         case PR_JOBCONTROL:
 902                 ASSERT(what == SIGSTOP || what == SIGTSTP ||
 903                     what == SIGTTIN || what == SIGTTOU);
 904                 flags &= ~TS_XSTART;
 905                 break;
 906 
 907         case PR_SUSPENDED:
 908                 ASSERT(what == SUSPEND_NORMAL || what == SUSPEND_PAUSE);
 909                 /*
 910                  * The situation may have changed since we dropped
 911                  * and reacquired p->p_lock.  Double-check now
 912                  * whether we should stop or not.
 913                  */
 914                 if (what == SUSPEND_PAUSE) {
 915                         if (!(t->t_proc_flag & TP_PAUSE)) {
 916                                 t->t_proc_flag &= ~TP_STOPPING;
 917                                 return;
 918                         }
 919                         flags &= ~TS_UNPAUSE;
 920                 } else {
 921                         if (!((t->t_proc_flag & TP_HOLDLWP) ||
 922                             (p->p_flag & (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)))) {
 923                                 t->t_proc_flag &= ~TP_STOPPING;
 924                                 return;
 925                         }
 926                         /*
 927                          * If SHOLDFORK is in effect and we are stopping
 928                          * while asleep (not at the top of the stack),
 929                          * we return now to allow the hold to take effect
 930                          * when we reach the top of the kernel stack.
 931                          */
 932                         if (lwp->lwp_asleep && (p->p_flag & SHOLDFORK)) {
 933                                 t->t_proc_flag &= ~TP_STOPPING;
 934                                 return;
 935                         }
 936                         flags &= ~TS_CSTART;
 937                 }
 938                 break;
 939 
 940         default:        /* /proc stop */
 941                 flags &= ~TS_PSTART;
 942                 /*
 943                  * Do synchronous stop unless the async-stop flag is set.
 944                  * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
 945                  * then no debugger is present and we also do synchronous stop.
 946                  */
 947                 if ((why != PR_REQUESTED || t->t_dtrace_stop) &&
 948                     !(p->p_proc_flag & P_PR_ASYNC)) {
 949                         int notify;
 950 
 951                         for (tx = t->t_forw; tx != t; tx = tx->t_forw) {
 952                                 notify = 0;
 953                                 thread_lock(tx);
 954                                 if (ISTOPPED(tx) ||
 955                                     (tx->t_proc_flag & TP_PRSTOP)) {
 956                                         thread_unlock(tx);
 957                                         continue;
 958                                 }
 959                                 tx->t_proc_flag |= TP_PRSTOP;
 960                                 tx->t_sig_check = 1;
 961                                 if (tx->t_state == TS_SLEEP &&
 962                                     (tx->t_flag & T_WAKEABLE)) {
 963                                         /*
 964                                          * Don't actually wake it up if it's
 965                                          * in one of the lwp_*() syscalls.
 966                                          * Mark it virtually stopped and
 967                                          * notify /proc waiters (below).
 968                                          */
 969                                         if (tx->t_wchan0 == NULL)
 970                                                 setrun_locked(tx);
 971                                         else {
 972                                                 tx->t_proc_flag |= TP_PRVSTOP;
 973                                                 tx->t_stoptime = stoptime;
 974                                                 notify = 1;
 975                                         }
 976                                 }
 977 
 978                                 /* Move waiting thread to run queue */
 979                                 if (ISWAITING(tx))
 980                                         setrun_locked(tx);
 981 
 982                                 /*
 983                                  * force the thread into the kernel
 984                                  * if it is not already there.
 985                                  */
 986                                 if (tx->t_state == TS_ONPROC &&
 987                                     tx->t_cpu != CPU)
 988                                         poke_cpu(tx->t_cpu->cpu_id);
 989                                 thread_unlock(tx);
 990                                 lep = p->p_lwpdir[tx->t_dslot].ld_entry;
 991                                 if (notify && lep->le_trace)
 992                                         prnotify(lep->le_trace);
 993                         }
 994                         /*
 995                          * We do this just in case one of the threads we asked
 996                          * to stop is in holdlwps() (called from cfork()) or
 997                          * lwp_suspend().
 998                          */
 999                         cv_broadcast(&p->p_holdlwps);
1000                 }
1001                 break;
1002         }
1003 
1004         t->t_stoptime = stoptime;
1005 
1006         if (why == PR_JOBCONTROL || (why == PR_SUSPENDED && p->p_stopsig)) {
1007                 /*
1008                  * Determine if the whole process is jobstopped.
1009                  */
1010                 if (jobstopped(p)) {
1011                         sigqueue_t *sqp;
1012                         int sig;
1013 
1014                         if ((sig = p->p_stopsig) == 0)
1015                                 p->p_stopsig = (uchar_t)(sig = what);
1016                         mutex_exit(&p->p_lock);
1017                         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1018                         mutex_enter(&pidlock);
1019                         /*
1020                          * The last lwp to stop notifies the parent.
1021                          * Turn off the CLDCONT flag now so the first
1022                          * lwp to continue knows what to do.
1023                          */
1024                         p->p_pidflag &= ~CLDCONT;
1025                         p->p_wcode = CLD_STOPPED;
1026                         p->p_wdata = sig;
1027                         sigcld(p, sqp);
1028                         /*
1029                          * Grab p->p_lock before releasing pidlock so the
1030                          * parent and the child don't have a race condition.
1031                          */
1032                         mutex_enter(&p->p_lock);
1033                         mutex_exit(&pidlock);
1034                         p->p_stopsig = 0;
1035                 } else if (why == PR_JOBCONTROL && p->p_stopsig == 0) {
1036                         /*
1037                          * Set p->p_stopsig and wake up sleeping lwps
1038                          * so they will stop in sympathy with this lwp.
1039                          */
1040                         p->p_stopsig = (uchar_t)what;
1041                         pokelwps(p);
1042                         /*
1043                          * We do this just in case one of the threads we asked
1044                          * to stop is in holdlwps() (called from cfork()) or
1045                          * lwp_suspend().
1046                          */
1047                         cv_broadcast(&p->p_holdlwps);
1048                 }
1049         }
1050 
1051         if (why != PR_JOBCONTROL && why != PR_CHECKPOINT) {
1052                 /*
1053                  * Do process-level notification when all lwps are
1054                  * either stopped on events of interest to /proc
1055                  * or are stopped showing PR_SUSPENDED or are zombies.
1056                  */
1057                 procstop = 1;
1058                 for (tx = t->t_forw; procstop && tx != t; tx = tx->t_forw) {
1059                         if (VSTOPPED(tx))
1060                                 continue;
1061                         thread_lock(tx);
1062                         switch (tx->t_state) {
1063                         case TS_ZOMB:
1064                                 break;
1065                         case TS_STOPPED:
1066                                 /* neither ISTOPPED nor SUSPENDED? */
1067                                 if ((tx->t_schedflag &
1068                                     (TS_CSTART | TS_UNPAUSE | TS_PSTART)) ==
1069                                     (TS_CSTART | TS_UNPAUSE | TS_PSTART))
1070                                         procstop = 0;
1071                                 break;
1072                         case TS_SLEEP:
1073                                 /* not paused for watchpoints? */
1074                                 if (!(tx->t_flag & T_WAKEABLE) ||
1075                                     tx->t_wchan0 == NULL ||
1076                                     !(tx->t_proc_flag & TP_PAUSE))
1077                                         procstop = 0;
1078                                 break;
1079                         default:
1080                                 procstop = 0;
1081                                 break;
1082                         }
1083                         thread_unlock(tx);
1084                 }
1085                 if (procstop) {
1086                         /* there must not be any remapped watched pages now */
1087                         ASSERT(p->p_mapcnt == 0);
1088                         if (p->p_proc_flag & P_PR_PTRACE) {
1089                                 /* ptrace() compatibility */
1090                                 mutex_exit(&p->p_lock);
1091                                 mutex_enter(&pidlock);
1092                                 p->p_wcode = CLD_TRAPPED;
1093                                 p->p_wdata = (why == PR_SIGNALLED)?
1094                                     what : SIGTRAP;
1095                                 cv_broadcast(&p->p_parent->p_cv);
1096                                 /*
1097                                  * Grab p->p_lock before releasing pidlock so
1098                                  * parent and child don't have a race condition.
1099                                  */
1100                                 mutex_enter(&p->p_lock);
1101                                 mutex_exit(&pidlock);
1102                         }
1103                         if (p->p_trace)                      /* /proc */
1104                                 prnotify(p->p_trace);
1105                         cv_broadcast(&pr_pid_cv[p->p_slot]); /* pauselwps() */
1106                         cv_broadcast(&p->p_holdlwps);    /* holdwatch() */
1107                 }
1108                 if (why != PR_SUSPENDED) {
1109                         lep = p->p_lwpdir[t->t_dslot].ld_entry;
1110                         if (lep->le_trace)           /* /proc */
1111                                 prnotify(lep->le_trace);
1112                         /*
1113                          * Special notification for creation of the agent lwp.
1114                          */
1115                         if (t == p->p_agenttp &&
1116                             (t->t_proc_flag & TP_PRSTOP) &&
1117                             p->p_trace)
1118                                 prnotify(p->p_trace);
1119                         /*
1120                          * The situation may have changed since we dropped
1121                          * and reacquired p->p_lock. Double-check now
1122                          * whether we should stop or not.
1123                          */
1124                         if (!(t->t_proc_flag & TP_STOPPING)) {
1125                                 if (t->t_proc_flag & TP_PRSTOP)
1126                                         t->t_proc_flag |= TP_STOPPING;
1127                         }
1128                         t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP);
1129                         prnostep(lwp);
1130                 }
1131         }
1132 
1133         if (why == PR_SUSPENDED) {
1134 
1135                 /*
1136                  * We always broadcast in the case of SUSPEND_PAUSE.  This is
1137                  * because checks for TP_PAUSE take precedence over checks for
1138                  * SHOLDWATCH.  If a thread is trying to stop because of
1139                  * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1140                  * waiting for the rest of the threads to enter a stopped state.
1141                  * If we are stopping for a SUSPEND_PAUSE, we may be the last
1142                  * lwp and not know it, so broadcast just in case.
1143                  */
1144                 if (what == SUSPEND_PAUSE ||
1145                     --p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP))
1146                         cv_broadcast(&p->p_holdlwps);
1147 
1148         }
1149 
1150         /*
1151          * Need to do this here (rather than after the thread is officially
1152          * stopped) because we can't call mutex_enter from a stopped thread.
1153          */
1154         if (why == PR_CHECKPOINT)
1155                 del_one_utstop();
1156 
1157         thread_lock(t);
1158         ASSERT((t->t_schedflag & TS_ALLSTART) == 0);
1159         t->t_schedflag |= flags;
1160         t->t_whystop = (short)why;
1161         t->t_whatstop = (short)what;
1162         CL_STOP(t, why, what);
1163         (void) new_mstate(t, LMS_STOPPED);
1164         thread_stop(t);                 /* set stop state and drop lock */
1165 
1166         if (why != PR_SUSPENDED && why != PR_CHECKPOINT) {
1167                 /*
1168                  * We may have gotten a SIGKILL or a SIGCONT when
1169                  * we released p->p_lock; make one last check.
1170                  * Also check for a /proc run-on-last-close.
1171                  */
1172                 if (sigismember(&t->t_sig, SIGKILL) ||
1173                     sigismember(&p->p_sig, SIGKILL) ||
1174                     (t->t_proc_flag & TP_LWPEXIT) ||
1175                     (p->p_flag & (SEXITLWPS|SKILLED))) {
1176                         p->p_stopsig = 0;
1177                         thread_lock(t);
1178                         t->t_schedflag |= TS_XSTART | TS_PSTART;
1179                         setrun_locked(t);
1180                         thread_unlock_nopreempt(t);
1181                 } else if (why == PR_JOBCONTROL) {
1182                         if (p->p_flag & SSCONT) {
1183                                 /*
1184                                  * This resulted from a SIGCONT posted
1185                                  * while we were not holding p->p_lock.
1186                                  */
1187                                 p->p_stopsig = 0;
1188                                 thread_lock(t);
1189                                 t->t_schedflag |= TS_XSTART;
1190                                 setrun_locked(t);
1191                                 thread_unlock_nopreempt(t);
1192                         }
1193                 } else if (!(t->t_proc_flag & TP_STOPPING)) {
1194                         /*
1195                          * This resulted from a /proc run-on-last-close.
1196                          */
1197                         thread_lock(t);
1198                         t->t_schedflag |= TS_PSTART;
1199                         setrun_locked(t);
1200                         thread_unlock_nopreempt(t);
1201                 }
1202         }
1203 
1204         t->t_proc_flag &= ~TP_STOPPING;
1205         mutex_exit(&p->p_lock);
1206 
1207         swtch();
1208         setallwatch();  /* reestablish any watchpoints set while stopped */
1209         mutex_enter(&p->p_lock);
1210         prbarrier(p);   /* barrier against /proc locking */
1211 }
1212 
1213 /* Interface for resetting user thread stop count. */
1214 void
1215 utstop_init(void)
1216 {
1217         mutex_enter(&thread_stop_lock);
1218         num_utstop = 0;
1219         mutex_exit(&thread_stop_lock);
1220 }
1221 
1222 /* Interface for registering a user thread stop request. */
1223 void
1224 add_one_utstop(void)
1225 {
1226         mutex_enter(&thread_stop_lock);
1227         num_utstop++;
1228         mutex_exit(&thread_stop_lock);
1229 }
1230 
1231 /* Interface for cancelling a user thread stop request */
1232 void
1233 del_one_utstop(void)
1234 {
1235         mutex_enter(&thread_stop_lock);
1236         num_utstop--;
1237         if (num_utstop == 0)
1238                 cv_broadcast(&utstop_cv);
1239         mutex_exit(&thread_stop_lock);
1240 }
1241 
1242 /* Interface to wait for all user threads to be stopped */
1243 void
1244 utstop_timedwait(clock_t ticks)
1245 {
1246         mutex_enter(&thread_stop_lock);
1247         if (num_utstop > 0)
1248                 (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
1249                     TR_CLOCK_TICK);
1250         mutex_exit(&thread_stop_lock);
1251 }
1252 
1253 /*
1254  * Perform the action specified by the current signal.
1255  * The usual sequence is:
1256  *      if (issig())
1257  *              psig();
1258  * The signal bit has already been cleared by issig(),
1259  * the current signal number has been stored in lwp_cursig,
1260  * and the current siginfo is now referenced by lwp_curinfo.
1261  */
1262 void
1263 psig(void)
1264 {
1265         kthread_t *t = curthread;
1266         proc_t *p = ttoproc(t);
1267         klwp_t *lwp = ttolwp(t);
1268         void (*func)();
1269         int sig, rc, code, ext;
1270         pid_t pid = -1;
1271         id_t ctid = 0;
1272         zoneid_t zoneid = -1;
1273         sigqueue_t *sqp = NULL;
1274         uint32_t auditing = AU_AUDITING();
1275 
1276         mutex_enter(&p->p_lock);
1277         schedctl_finish_sigblock(t);
1278         code = CLD_KILLED;
1279 
1280         if (p->p_flag & SEXITLWPS) {
1281                 lwp_exit();
1282                 return;                 /* not reached */
1283         }
1284         sig = lwp->lwp_cursig;
1285         ext = lwp->lwp_extsig;
1286 
1287         ASSERT(sig < NSIG);
1288 
1289         /*
1290          * Re-check lwp_cursig after we acquire p_lock.  Since p_lock was
1291          * dropped between issig() and psig(), a debugger may have cleared
1292          * lwp_cursig via /proc in the intervening window.
1293          */
1294         if (sig == 0) {
1295                 if (lwp->lwp_curinfo) {
1296                         siginfofree(lwp->lwp_curinfo);
1297                         lwp->lwp_curinfo = NULL;
1298                 }
1299                 if (t->t_flag & T_TOMASK) {      /* sigsuspend or pollsys */
1300                         t->t_flag &= ~T_TOMASK;
1301                         t->t_hold = lwp->lwp_sigoldmask;
1302                 }
1303                 mutex_exit(&p->p_lock);
1304                 return;
1305         }
1306         func = PTOU(curproc)->u_signal[sig-1];
1307 
1308         /*
1309          * The signal disposition could have changed since we promoted
1310          * this signal from pending to current (we dropped p->p_lock).
1311          * This can happen only in a multi-threaded process.
1312          */
1313         if (sigismember(&p->p_ignore, sig) ||
1314             (func == SIG_DFL && sigismember(&stopdefault, sig))) {
1315                 lwp->lwp_cursig = 0;
1316                 lwp->lwp_extsig = 0;
1317                 if (lwp->lwp_curinfo) {
1318                         siginfofree(lwp->lwp_curinfo);
1319                         lwp->lwp_curinfo = NULL;
1320                 }
1321                 if (t->t_flag & T_TOMASK) {      /* sigsuspend or pollsys */
1322                         t->t_flag &= ~T_TOMASK;
1323                         t->t_hold = lwp->lwp_sigoldmask;
1324                 }
1325                 mutex_exit(&p->p_lock);
1326                 return;
1327         }
1328 
1329         /*
1330          * We check lwp_curinfo first since pr_setsig can actually
1331          * stuff a sigqueue_t there for SIGKILL.
1332          */
1333         if (lwp->lwp_curinfo) {
1334                 sqp = lwp->lwp_curinfo;
1335         } else if (sig == SIGKILL && p->p_killsqp) {
1336                 sqp = p->p_killsqp;
1337         }
1338 
1339         if (sqp != NULL) {
1340                 if (SI_FROMUSER(&sqp->sq_info)) {
1341                         pid = sqp->sq_info.si_pid;
1342                         ctid = sqp->sq_info.si_ctid;
1343                         zoneid = sqp->sq_info.si_zoneid;
1344                 }
1345                 /*
1346                  * If we have a sigqueue_t, its sq_external value
1347                  * trumps the lwp_extsig value.  It is theoretically
1348                  * possible to make lwp_extsig reflect reality, but it
1349                  * would unnecessarily complicate things elsewhere.
1350                  */
1351                 ext = sqp->sq_external;
1352         }
1353 
1354         if (func == SIG_DFL) {
1355                 mutex_exit(&p->p_lock);
1356                 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1357                     NULL, void (*)(void), func);
1358         } else {
1359                 k_siginfo_t *sip = NULL;
1360 
1361                 /*
1362                  * If DTrace user-land tracing is active, give DTrace a
1363                  * chance to defer the signal until after tracing is
1364                  * complete.
1365                  */
1366                 if (t->t_dtrace_on && dtrace_safe_defer_signal()) {
1367                         mutex_exit(&p->p_lock);
1368                         return;
1369                 }
1370 
1371                 /*
1372                  * save siginfo pointer here, in case the
1373                  * the signal's reset bit is on
1374                  *
1375                  * The presence of a current signal prevents paging
1376                  * from succeeding over a network.  We copy the current
1377                  * signal information to the side and cancel the current
1378                  * signal so that sendsig() will succeed.
1379                  */
1380                 if (sigismember(&p->p_siginfo, sig)) {
1381                         sip = &lwp->lwp_siginfo;
1382                         if (sqp) {
1383                                 bcopy(&sqp->sq_info, sip, sizeof (*sip));
1384                                 /*
1385                                  * If we were interrupted out of a system call
1386                                  * due to pthread_cancel(), inform libc.
1387                                  */
1388                                 if (sig == SIGCANCEL &&
1389                                     sip->si_code == SI_LWP &&
1390                                     t->t_sysnum != 0)
1391                                         schedctl_cancel_eintr();
1392                         } else if (sig == SIGPROF && sip->si_signo == SIGPROF &&
1393                             t->t_rprof != NULL && t->t_rprof->rp_anystate) {
1394                                 /* EMPTY */;
1395                         } else {
1396                                 bzero(sip, sizeof (*sip));
1397                                 sip->si_signo = sig;
1398                                 sip->si_code = SI_NOINFO;
1399                         }
1400                 }
1401 
1402                 if (t->t_flag & T_TOMASK)
1403                         t->t_flag &= ~T_TOMASK;
1404                 else
1405                         lwp->lwp_sigoldmask = t->t_hold;
1406                 sigorset(&t->t_hold, &PTOU(curproc)->u_sigmask[sig-1]);
1407                 if (!sigismember(&PTOU(curproc)->u_signodefer, sig))
1408                         sigaddset(&t->t_hold, sig);
1409                 if (sigismember(&PTOU(curproc)->u_sigresethand, sig))
1410                         setsigact(sig, SIG_DFL, &nullsmask, 0);
1411 
1412                 DTRACE_PROC3(signal__handle, int, sig, k_siginfo_t *,
1413                     sip, void (*)(void), func);
1414 
1415                 lwp->lwp_cursig = 0;
1416                 lwp->lwp_extsig = 0;
1417                 if (lwp->lwp_curinfo) {
1418                         /* p->p_killsqp is freed by freeproc */
1419                         siginfofree(lwp->lwp_curinfo);
1420                         lwp->lwp_curinfo = NULL;
1421                 }
1422                 mutex_exit(&p->p_lock);
1423                 lwp->lwp_ru.nsignals++;
1424 
1425                 if (p->p_model == DATAMODEL_NATIVE)
1426                         rc = sendsig(sig, sip, func);
1427 #ifdef _SYSCALL32_IMPL
1428                 else
1429                         rc = sendsig32(sig, sip, func);
1430 #endif  /* _SYSCALL32_IMPL */
1431                 if (rc)
1432                         return;
1433                 sig = lwp->lwp_cursig = SIGSEGV;
1434                 ext = 0;        /* lwp_extsig was set above */
1435                 pid = -1;
1436                 ctid = 0;
1437         }
1438 
1439         if (sigismember(&coredefault, sig)) {
1440                 /*
1441                  * Terminate all LWPs but don't discard them.
1442                  * If another lwp beat us to the punch by calling exit(),
1443                  * evaporate now.
1444                  */
1445                 proc_is_exiting(p);
1446                 if (exitlwps(1) != 0) {
1447                         mutex_enter(&p->p_lock);
1448                         lwp_exit();
1449                 }
1450                 /* if we got a SIGKILL from anywhere, no core dump */
1451                 if (p->p_flag & SKILLED) {
1452                         sig = SIGKILL;
1453                         ext = (p->p_flag & SEXTKILLED) != 0;
1454                 } else {
1455                         if (auditing)           /* audit core dump */
1456                                 audit_core_start(sig);
1457                         if (core(sig, ext) == 0)
1458                                 code = CLD_DUMPED;
1459                         if (auditing)           /* audit core dump */
1460                                 audit_core_finish(code);
1461                 }
1462         }
1463 
1464         /*
1465          * Generate a contract event once if the process is killed
1466          * by a signal.
1467          */
1468         if (ext) {
1469                 proc_is_exiting(p);
1470                 if (exitlwps(0) != 0) {
1471                         mutex_enter(&p->p_lock);
1472                         lwp_exit();
1473                 }
1474                 contract_process_sig(p->p_ct_process, p, sig, pid, ctid,
1475                     zoneid);
1476         }
1477 
1478         exit(code, sig);
1479 }
1480 
1481 /*
1482  * Find next unheld signal in ssp for thread t.
1483  */
1484 int
1485 fsig(k_sigset_t *ssp, kthread_t *t)
1486 {
1487         proc_t *p = ttoproc(t);
1488         user_t *up = PTOU(p);
1489         int i;
1490         k_sigset_t temp;
1491 
1492         ASSERT(MUTEX_HELD(&p->p_lock));
1493 
1494         /*
1495          * Don't promote any signals for the parent of a vfork()d
1496          * child that hasn't yet released the parent's memory.
1497          */
1498         if (p->p_flag & SVFWAIT)
1499                 return (0);
1500 
1501         temp = *ssp;
1502         sigdiffset(&temp, &t->t_hold);
1503 
1504         /*
1505          * Don't promote stopping signals (except SIGSTOP) for a child
1506          * of vfork() that hasn't yet released the parent's memory.
1507          */
1508         if (p->p_flag & SVFORK)
1509                 sigdiffset(&temp, &holdvfork);
1510 
1511         /*
1512          * Don't promote a signal that will stop
1513          * the process when lwp_nostop is set.
1514          */
1515         if (ttolwp(t)->lwp_nostop) {
1516                 sigdelset(&temp, SIGSTOP);
1517                 if (!p->p_pgidp->pid_pgorphaned) {
1518                         if (up->u_signal[SIGTSTP-1] == SIG_DFL)
1519                                 sigdelset(&temp, SIGTSTP);
1520                         if (up->u_signal[SIGTTIN-1] == SIG_DFL)
1521                                 sigdelset(&temp, SIGTTIN);
1522                         if (up->u_signal[SIGTTOU-1] == SIG_DFL)
1523                                 sigdelset(&temp, SIGTTOU);
1524                 }
1525         }
1526 
1527         /*
1528          * Choose SIGKILL and SIGPROF before all other pending signals.
1529          * The rest are promoted in signal number order.
1530          */
1531         if (sigismember(&temp, SIGKILL))
1532                 return (SIGKILL);
1533         if (sigismember(&temp, SIGPROF))
1534                 return (SIGPROF);
1535 
1536         for (i = 0; i < sizeof (temp) / sizeof (temp.__sigbits[0]); i++) {
1537                 if (temp.__sigbits[i])
1538                         return ((i * NBBY * sizeof (temp.__sigbits[0])) +
1539                             lowbit(temp.__sigbits[i]));
1540         }
1541 
1542         return (0);
1543 }
1544 
1545 void
1546 setsigact(int sig, void (*disp)(), const k_sigset_t *mask, int flags)
1547 {
1548         proc_t *p = ttoproc(curthread);
1549         kthread_t *t;
1550 
1551         ASSERT(MUTEX_HELD(&p->p_lock));
1552 
1553         PTOU(curproc)->u_signal[sig - 1] = disp;
1554 
1555         /*
1556          * Honor the SA_SIGINFO flag if the signal is being caught.
1557          * Force the SA_SIGINFO flag if the signal is not being caught.
1558          * This is necessary to make sigqueue() and sigwaitinfo() work
1559          * properly together when the signal is set to default or is
1560          * being temporarily ignored.
1561          */
1562         if ((flags & SA_SIGINFO) || disp == SIG_DFL || disp == SIG_IGN)
1563                 sigaddset(&p->p_siginfo, sig);
1564         else
1565                 sigdelset(&p->p_siginfo, sig);
1566 
1567         if (disp != SIG_DFL && disp != SIG_IGN) {
1568                 sigdelset(&p->p_ignore, sig);
1569                 PTOU(curproc)->u_sigmask[sig - 1] = *mask;
1570                 if (!sigismember(&cantreset, sig)) {
1571                         if (flags & SA_RESETHAND)
1572                                 sigaddset(&PTOU(curproc)->u_sigresethand, sig);
1573                         else
1574                                 sigdelset(&PTOU(curproc)->u_sigresethand, sig);
1575                 }
1576                 if (flags & SA_NODEFER)
1577                         sigaddset(&PTOU(curproc)->u_signodefer, sig);
1578                 else
1579                         sigdelset(&PTOU(curproc)->u_signodefer, sig);
1580                 if (flags & SA_RESTART)
1581                         sigaddset(&PTOU(curproc)->u_sigrestart, sig);
1582                 else
1583                         sigdelset(&PTOU(curproc)->u_sigrestart, sig);
1584                 if (flags & SA_ONSTACK)
1585                         sigaddset(&PTOU(curproc)->u_sigonstack, sig);
1586                 else
1587                         sigdelset(&PTOU(curproc)->u_sigonstack, sig);
1588         } else if (disp == SIG_IGN ||
1589             (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
1590                 /*
1591                  * Setting the signal action to SIG_IGN results in the
1592                  * discarding of all pending signals of that signal number.
1593                  * Setting the signal action to SIG_DFL does the same *only*
1594                  * if the signal's default behavior is to be ignored.
1595                  */
1596                 sigaddset(&p->p_ignore, sig);
1597                 sigdelset(&p->p_sig, sig);
1598                 sigdelset(&p->p_extsig, sig);
1599                 sigdelq(p, NULL, sig);
1600                 t = p->p_tlist;
1601                 do {
1602                         sigdelset(&t->t_sig, sig);
1603                         sigdelset(&t->t_extsig, sig);
1604                         sigdelq(p, t, sig);
1605                 } while ((t = t->t_forw) != p->p_tlist);
1606         } else {
1607                 /*
1608                  * The signal action is being set to SIG_DFL and the default
1609                  * behavior is to do something: make sure it is not ignored.
1610                  */
1611                 sigdelset(&p->p_ignore, sig);
1612         }
1613 
1614         if (sig == SIGCLD) {
1615                 if (flags & SA_NOCLDWAIT)
1616                         p->p_flag |= SNOWAIT;
1617                 else
1618                         p->p_flag &= ~SNOWAIT;
1619 
1620                 if (flags & SA_NOCLDSTOP)
1621                         p->p_flag &= ~SJCTL;
1622                 else
1623                         p->p_flag |= SJCTL;
1624 
1625                 if ((p->p_flag & SNOWAIT) || disp == SIG_IGN) {
1626                         proc_t *cp, *tp;
1627 
1628                         mutex_exit(&p->p_lock);
1629                         mutex_enter(&pidlock);
1630                         for (cp = p->p_child; cp != NULL; cp = tp) {
1631                                 tp = cp->p_sibling;
1632                                 if (cp->p_stat == SZOMB &&
1633                                     !(cp->p_pidflag & CLDWAITPID))
1634                                         freeproc(cp);
1635                         }
1636                         mutex_exit(&pidlock);
1637                         mutex_enter(&p->p_lock);
1638                 }
1639         }
1640 }
1641 
1642 /*
1643  * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1644  * Called from exec_common() for a process undergoing execve()
1645  * and from cfork() for a newly-created child of vfork().
1646  * In the vfork() case, 'p' is not the current process.
1647  * In both cases, there is only one thread in the process.
1648  */
1649 void
1650 sigdefault(proc_t *p)
1651 {
1652         kthread_t *t = p->p_tlist;
1653         struct user *up = PTOU(p);
1654         int sig;
1655 
1656         ASSERT(MUTEX_HELD(&p->p_lock));
1657 
1658         for (sig = 1; sig < NSIG; sig++) {
1659                 if (up->u_signal[sig - 1] != SIG_DFL &&
1660                     up->u_signal[sig - 1] != SIG_IGN) {
1661                         up->u_signal[sig - 1] = SIG_DFL;
1662                         sigemptyset(&up->u_sigmask[sig - 1]);
1663                         if (sigismember(&ignoredefault, sig)) {
1664                                 sigdelq(p, NULL, sig);
1665                                 sigdelq(p, t, sig);
1666                         }
1667                         if (sig == SIGCLD)
1668                                 p->p_flag &= ~(SNOWAIT|SJCTL);
1669                 }
1670         }
1671         sigorset(&p->p_ignore, &ignoredefault);
1672         sigfillset(&p->p_siginfo);
1673         sigdiffset(&p->p_siginfo, &cantmask);
1674         sigdiffset(&p->p_sig, &ignoredefault);
1675         sigdiffset(&p->p_extsig, &ignoredefault);
1676         sigdiffset(&t->t_sig, &ignoredefault);
1677         sigdiffset(&t->t_extsig, &ignoredefault);
1678 }
1679 
1680 void
1681 sigcld(proc_t *cp, sigqueue_t *sqp)
1682 {
1683         proc_t *pp = cp->p_parent;
1684 
1685         ASSERT(MUTEX_HELD(&pidlock));
1686 
1687         switch (cp->p_wcode) {
1688         case CLD_EXITED:
1689         case CLD_DUMPED:
1690         case CLD_KILLED:
1691                 ASSERT(cp->p_stat == SZOMB);
1692                 /*
1693                  * The broadcast on p_srwchan_cv is a kludge to
1694                  * wakeup a possible thread in uadmin(A_SHUTDOWN).
1695                  */
1696                 cv_broadcast(&cp->p_srwchan_cv);
1697 
1698                 /*
1699                  * Add to newstate list of the parent
1700                  */
1701                 add_ns(pp, cp);
1702 
1703                 cv_broadcast(&pp->p_cv);
1704                 if ((pp->p_flag & SNOWAIT) ||
1705                     PTOU(pp)->u_signal[SIGCLD - 1] == SIG_IGN) {
1706                         if (!(cp->p_pidflag & CLDWAITPID))
1707                                 freeproc(cp);
1708                 } else if (!(cp->p_pidflag & CLDNOSIGCHLD)) {
1709                         post_sigcld(cp, sqp);
1710                         sqp = NULL;
1711                 }
1712                 break;
1713 
1714         case CLD_STOPPED:
1715         case CLD_CONTINUED:
1716                 cv_broadcast(&pp->p_cv);
1717                 if (pp->p_flag & SJCTL) {
1718                         post_sigcld(cp, sqp);
1719                         sqp = NULL;
1720                 }
1721                 break;
1722         }
1723 
1724         if (sqp)
1725                 siginfofree(sqp);
1726 }
1727 
1728 /*
1729  * Common code called from sigcld() and from
1730  * waitid() and issig_forreal() via sigcld_repost().
1731  * Give the parent process a SIGCLD if it does not have one pending,
1732  * else mark the child process so a SIGCLD can be posted later.
1733  */
1734 static void
1735 post_sigcld(proc_t *cp, sigqueue_t *sqp)
1736 {
1737         proc_t *pp = cp->p_parent;
1738         k_siginfo_t info;
1739 
1740         ASSERT(MUTEX_HELD(&pidlock));
1741         mutex_enter(&pp->p_lock);
1742 
1743         /*
1744          * If a SIGCLD is pending, then just mark the child process
1745          * so that its SIGCLD will be posted later, when the first
1746          * SIGCLD is taken off the queue or when the parent is ready
1747          * to receive it or accept it, if ever.
1748          */
1749         if (sigismember(&pp->p_sig, SIGCLD)) {
1750                 cp->p_pidflag |= CLDPEND;
1751         } else {
1752                 cp->p_pidflag &= ~CLDPEND;
1753                 if (sqp == NULL) {
1754                         /*
1755                          * This can only happen when the parent is init.
1756                          * (See call to sigcld(q, NULL) in exit().)
1757                          * Use KM_NOSLEEP to avoid deadlock.
1758                          */
1759                         ASSERT(pp == proc_init);
1760                         winfo(cp, &info, 0);
1761                         sigaddq(pp, NULL, &info, KM_NOSLEEP);
1762                 } else {
1763                         winfo(cp, &sqp->sq_info, 0);
1764                         sigaddqa(pp, NULL, sqp);
1765                         sqp = NULL;
1766                 }
1767         }
1768 
1769         mutex_exit(&pp->p_lock);
1770 
1771         if (sqp)
1772                 siginfofree(sqp);
1773 }
1774 
1775 /*
1776  * Search for a child that has a pending SIGCLD for us, the parent.
1777  * The queue of SIGCLD signals is implied by the list of children.
1778  * We post the SIGCLD signals one at a time so they don't get lost.
1779  * When one is dequeued, another is enqueued, until there are no more.
1780  */
1781 void
1782 sigcld_repost()
1783 {
1784         proc_t *pp = curproc;
1785         proc_t *cp;
1786         sigqueue_t *sqp;
1787 
1788         sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1789         mutex_enter(&pidlock);
1790         for (cp = pp->p_child; cp; cp = cp->p_sibling) {
1791                 if (cp->p_pidflag & CLDPEND) {
1792                         post_sigcld(cp, sqp);
1793                         mutex_exit(&pidlock);
1794                         return;
1795                 }
1796         }
1797         mutex_exit(&pidlock);
1798         kmem_free(sqp, sizeof (sigqueue_t));
1799 }
1800 
1801 /*
1802  * count number of sigqueue send by sigaddqa()
1803  */
1804 void
1805 sigqsend(int cmd, proc_t *p, kthread_t *t, sigqueue_t *sigqp)
1806 {
1807         sigqhdr_t *sqh;
1808 
1809         sqh = (sigqhdr_t *)sigqp->sq_backptr;
1810         ASSERT(sqh);
1811 
1812         mutex_enter(&sqh->sqb_lock);
1813         sqh->sqb_sent++;
1814         mutex_exit(&sqh->sqb_lock);
1815 
1816         if (cmd == SN_SEND)
1817                 sigaddqa(p, t, sigqp);
1818         else
1819                 siginfofree(sigqp);
1820 }
1821 
1822 int
1823 sigsendproc(proc_t *p, sigsend_t *pv)
1824 {
1825         struct cred *cr;
1826         proc_t *myprocp = curproc;
1827 
1828         ASSERT(MUTEX_HELD(&pidlock));
1829 
1830         if (p->p_pid == 1 && pv->sig && sigismember(&cantmask, pv->sig))
1831                 return (EPERM);
1832 
1833         cr = CRED();
1834 
1835         if (pv->checkperm == 0 ||
1836             (pv->sig == SIGCONT && p->p_sessp == myprocp->p_sessp) ||
1837             prochasprocperm(p, myprocp, cr)) {
1838                 pv->perm++;
1839                 if (pv->sig) {
1840                         /* Make sure we should be setting si_pid and friends */
1841                         ASSERT(pv->sicode <= 0);
1842                         if (SI_CANQUEUE(pv->sicode)) {
1843                                 sigqueue_t *sqp;
1844 
1845                                 mutex_enter(&myprocp->p_lock);
1846                                 sqp = sigqalloc(myprocp->p_sigqhdr);
1847                                 mutex_exit(&myprocp->p_lock);
1848                                 if (sqp == NULL)
1849                                         return (EAGAIN);
1850                                 sqp->sq_info.si_signo = pv->sig;
1851                                 sqp->sq_info.si_code = pv->sicode;
1852                                 sqp->sq_info.si_pid = myprocp->p_pid;
1853                                 sqp->sq_info.si_ctid = PRCTID(myprocp);
1854                                 sqp->sq_info.si_zoneid = getzoneid();
1855                                 sqp->sq_info.si_uid = crgetruid(cr);
1856                                 sqp->sq_info.si_value = pv->value;
1857                                 mutex_enter(&p->p_lock);
1858                                 sigqsend(SN_SEND, p, NULL, sqp);
1859                                 mutex_exit(&p->p_lock);
1860                         } else {
1861                                 k_siginfo_t info;
1862                                 bzero(&info, sizeof (info));
1863                                 info.si_signo = pv->sig;
1864                                 info.si_code = pv->sicode;
1865                                 info.si_pid = myprocp->p_pid;
1866                                 info.si_ctid = PRCTID(myprocp);
1867                                 info.si_zoneid = getzoneid();
1868                                 info.si_uid = crgetruid(cr);
1869                                 mutex_enter(&p->p_lock);
1870                                 /*
1871                                  * XXX: Should be KM_SLEEP but
1872                                  * we have to avoid deadlock.
1873                                  */
1874                                 sigaddq(p, NULL, &info, KM_NOSLEEP);
1875                                 mutex_exit(&p->p_lock);
1876                         }
1877                 }
1878         }
1879 
1880         return (0);
1881 }
1882 
1883 int
1884 sigsendset(procset_t *psp, sigsend_t *pv)
1885 {
1886         int error;
1887 
1888         error = dotoprocs(psp, sigsendproc, (char *)pv);
1889         if (error == 0 && pv->perm == 0)
1890                 return (EPERM);
1891 
1892         return (error);
1893 }
1894 
1895 /*
1896  * Dequeue a queued siginfo structure.
1897  * If a non-null thread pointer is passed then dequeue from
1898  * the thread queue, otherwise dequeue from the process queue.
1899  */
1900 void
1901 sigdeq(proc_t *p, kthread_t *t, int sig, sigqueue_t **qpp)
1902 {
1903         sigqueue_t **psqp, *sqp;
1904 
1905         ASSERT(MUTEX_HELD(&p->p_lock));
1906 
1907         *qpp = NULL;
1908 
1909         if (t != NULL) {
1910                 sigdelset(&t->t_sig, sig);
1911                 sigdelset(&t->t_extsig, sig);
1912                 psqp = &t->t_sigqueue;
1913         } else {
1914                 sigdelset(&p->p_sig, sig);
1915                 sigdelset(&p->p_extsig, sig);
1916                 psqp = &p->p_sigqueue;
1917         }
1918 
1919         for (;;) {
1920                 if ((sqp = *psqp) == NULL)
1921                         return;
1922                 if (sqp->sq_info.si_signo == sig)
1923                         break;
1924                 else
1925                         psqp = &sqp->sq_next;
1926         }
1927         *qpp = sqp;
1928         *psqp = sqp->sq_next;
1929         for (sqp = *psqp; sqp; sqp = sqp->sq_next) {
1930                 if (sqp->sq_info.si_signo == sig) {
1931                         if (t != (kthread_t *)NULL) {
1932                                 sigaddset(&t->t_sig, sig);
1933                                 t->t_sig_check = 1;
1934                         } else {
1935                                 sigaddset(&p->p_sig, sig);
1936                                 set_proc_ast(p);
1937                         }
1938                         break;
1939                 }
1940         }
1941 }
1942 
1943 /*
1944  * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
1945  */
1946 void
1947 sigcld_delete(k_siginfo_t *ip)
1948 {
1949         proc_t *p = curproc;
1950         int another_sigcld = 0;
1951         sigqueue_t **psqp, *sqp;
1952 
1953         ASSERT(ip->si_signo == SIGCLD);
1954 
1955         mutex_enter(&p->p_lock);
1956 
1957         if (!sigismember(&p->p_sig, SIGCLD)) {
1958                 mutex_exit(&p->p_lock);
1959                 return;
1960         }
1961 
1962         psqp = &p->p_sigqueue;
1963         for (;;) {
1964                 if ((sqp = *psqp) == NULL) {
1965                         mutex_exit(&p->p_lock);
1966                         return;
1967                 }
1968                 if (sqp->sq_info.si_signo == SIGCLD) {
1969                         if (sqp->sq_info.si_pid == ip->si_pid &&
1970                             sqp->sq_info.si_code == ip->si_code &&
1971                             sqp->sq_info.si_status == ip->si_status)
1972                                 break;
1973                         another_sigcld = 1;
1974                 }
1975                 psqp = &sqp->sq_next;
1976         }
1977         *psqp = sqp->sq_next;
1978 
1979         siginfofree(sqp);
1980 
1981         for (sqp = *psqp; !another_sigcld && sqp; sqp = sqp->sq_next) {
1982                 if (sqp->sq_info.si_signo == SIGCLD)
1983                         another_sigcld = 1;
1984         }
1985 
1986         if (!another_sigcld) {
1987                 sigdelset(&p->p_sig, SIGCLD);
1988                 sigdelset(&p->p_extsig, SIGCLD);
1989         }
1990 
1991         mutex_exit(&p->p_lock);
1992 }
1993 
1994 /*
1995  * Delete queued siginfo structures.
1996  * If a non-null thread pointer is passed then delete from
1997  * the thread queue, otherwise delete from the process queue.
1998  */
1999 void
2000 sigdelq(proc_t *p, kthread_t *t, int sig)
2001 {
2002         sigqueue_t **psqp, *sqp;
2003 
2004         /*
2005          * We must be holding p->p_lock unless the process is
2006          * being reaped or has failed to get started on fork.
2007          */
2008         ASSERT(MUTEX_HELD(&p->p_lock) ||
2009             p->p_stat == SIDL || p->p_stat == SZOMB);
2010 
2011         if (t != (kthread_t *)NULL)
2012                 psqp = &t->t_sigqueue;
2013         else
2014                 psqp = &p->p_sigqueue;
2015 
2016         while (*psqp) {
2017                 sqp = *psqp;
2018                 if (sig == 0 || sqp->sq_info.si_signo == sig) {
2019                         *psqp = sqp->sq_next;
2020                         siginfofree(sqp);
2021                 } else
2022                         psqp = &sqp->sq_next;
2023         }
2024 }
2025 
2026 /*
2027  * Insert a siginfo structure into a queue.
2028  * If a non-null thread pointer is passed then add to the thread queue,
2029  * otherwise add to the process queue.
2030  *
2031  * The function sigaddqins() is called with sigqueue already allocated.
2032  * It is called from sigaddqa() and sigaddq() below.
2033  *
2034  * The value of si_code implicitly indicates whether sigp is to be
2035  * explicitly queued, or to be queued to depth one.
2036  */
2037 static void
2038 sigaddqins(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2039 {
2040         sigqueue_t **psqp;
2041         int sig = sigqp->sq_info.si_signo;
2042 
2043         sigqp->sq_external = (curproc != &p0) &&
2044             (curproc->p_ct_process != p->p_ct_process);
2045 
2046         /*
2047          * issig_forreal() doesn't bother dequeueing signals if SKILLED
2048          * is set, and even if it did, we would want to avoid situation
2049          * (which would be unique to SIGKILL) where one thread dequeued
2050          * the sigqueue_t and another executed psig().  So we create a
2051          * separate stash for SIGKILL's sigqueue_t.  Because a second
2052          * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2053          * if (and only if) it was non-extracontractual.
2054          */
2055         if (sig == SIGKILL) {
2056                 if (p->p_killsqp == NULL || !p->p_killsqp->sq_external) {
2057                         if (p->p_killsqp != NULL)
2058                                 siginfofree(p->p_killsqp);
2059                         p->p_killsqp = sigqp;
2060                         sigqp->sq_next = NULL;
2061                 } else {
2062                         siginfofree(sigqp);
2063                 }
2064                 return;
2065         }
2066 
2067         ASSERT(sig >= 1 && sig < NSIG);
2068         if (t != NULL)  /* directed to a thread */
2069                 psqp = &t->t_sigqueue;
2070         else            /* directed to a process */
2071                 psqp = &p->p_sigqueue;
2072         if (SI_CANQUEUE(sigqp->sq_info.si_code) &&
2073             sigismember(&p->p_siginfo, sig)) {
2074                 for (; *psqp != NULL; psqp = &(*psqp)->sq_next)
2075                                 ;
2076         } else {
2077                 for (; *psqp != NULL; psqp = &(*psqp)->sq_next) {
2078                         if ((*psqp)->sq_info.si_signo == sig) {
2079                                 siginfofree(sigqp);
2080                                 return;
2081                         }
2082                 }
2083         }
2084         *psqp = sigqp;
2085         sigqp->sq_next = NULL;
2086 }
2087 
2088 /*
2089  * The function sigaddqa() is called with sigqueue already allocated.
2090  * If signal is ignored, discard but guarantee KILL and generation semantics.
2091  * It is called from sigqueue() and other places.
2092  */
2093 void
2094 sigaddqa(proc_t *p, kthread_t *t, sigqueue_t *sigqp)
2095 {
2096         int sig = sigqp->sq_info.si_signo;
2097 
2098         ASSERT(MUTEX_HELD(&p->p_lock));
2099         ASSERT(sig >= 1 && sig < NSIG);
2100 
2101         if (sig_discardable(p, sig))
2102                 siginfofree(sigqp);
2103         else
2104                 sigaddqins(p, t, sigqp);
2105 
2106         sigtoproc(p, t, sig);
2107 }
2108 
2109 /*
2110  * Allocate the sigqueue_t structure and call sigaddqins().
2111  */
2112 void
2113 sigaddq(proc_t *p, kthread_t *t, k_siginfo_t *infop, int km_flags)
2114 {
2115         sigqueue_t *sqp;
2116         int sig = infop->si_signo;
2117 
2118         ASSERT(MUTEX_HELD(&p->p_lock));
2119         ASSERT(sig >= 1 && sig < NSIG);
2120 
2121         /*
2122          * If the signal will be discarded by sigtoproc() or
2123          * if the process isn't requesting siginfo and it isn't
2124          * blocking the signal (it *could* change it's mind while
2125          * the signal is pending) then don't bother creating one.
2126          */
2127         if (!sig_discardable(p, sig) &&
2128             (sigismember(&p->p_siginfo, sig) ||
2129             (curproc->p_ct_process != p->p_ct_process) ||
2130             (sig == SIGCLD && SI_FROMKERNEL(infop))) &&
2131             ((sqp = kmem_alloc(sizeof (sigqueue_t), km_flags)) != NULL)) {
2132                 bcopy(infop, &sqp->sq_info, sizeof (k_siginfo_t));
2133                 sqp->sq_func = NULL;
2134                 sqp->sq_next = NULL;
2135                 sigaddqins(p, t, sqp);
2136         }
2137         sigtoproc(p, t, sig);
2138 }
2139 
2140 /*
2141  * Handle stop-on-fault processing for the debugger.  Returns 0
2142  * if the fault is cleared during the stop, nonzero if it isn't.
2143  */
2144 int
2145 stop_on_fault(uint_t fault, k_siginfo_t *sip)
2146 {
2147         proc_t *p = ttoproc(curthread);
2148         klwp_t *lwp = ttolwp(curthread);
2149 
2150         ASSERT(prismember(&p->p_fltmask, fault));
2151 
2152         /*
2153          * Record current fault and siginfo structure so debugger can
2154          * find it.
2155          */
2156         mutex_enter(&p->p_lock);
2157         lwp->lwp_curflt = (uchar_t)fault;
2158         lwp->lwp_siginfo = *sip;
2159 
2160         stop(PR_FAULTED, fault);
2161 
2162         fault = lwp->lwp_curflt;
2163         lwp->lwp_curflt = 0;
2164         mutex_exit(&p->p_lock);
2165         return (fault);
2166 }
2167 
2168 void
2169 sigorset(k_sigset_t *s1, const k_sigset_t *s2)
2170 {
2171         s1->__sigbits[0] |= s2->__sigbits[0];
2172         s1->__sigbits[1] |= s2->__sigbits[1];
2173         s1->__sigbits[2] |= s2->__sigbits[2];
2174 }
2175 
2176 void
2177 sigandset(k_sigset_t *s1, const k_sigset_t *s2)
2178 {
2179         s1->__sigbits[0] &= s2->__sigbits[0];
2180         s1->__sigbits[1] &= s2->__sigbits[1];
2181         s1->__sigbits[2] &= s2->__sigbits[2];
2182 }
2183 
2184 void
2185 sigdiffset(k_sigset_t *s1, const k_sigset_t *s2)
2186 {
2187         s1->__sigbits[0] &= ~(s2->__sigbits[0]);
2188         s1->__sigbits[1] &= ~(s2->__sigbits[1]);
2189         s1->__sigbits[2] &= ~(s2->__sigbits[2]);
2190 }
2191 
2192 /*
2193  * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2194  * if there are any signals the thread might take on return from the kernel.
2195  * If ksigset_t's were a single word, we would do:
2196  *      return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2197  */
2198 int
2199 sigcheck(proc_t *p, kthread_t *t)
2200 {
2201         sc_shared_t *tdp = t->t_schedctl;
2202 
2203         /*
2204          * If signals are blocked via the schedctl interface
2205          * then we only check for the unmaskable signals.
2206          * The unmaskable signal numbers should all be contained
2207          * in __sigbits[0] and we assume this for speed.
2208          */
2209 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2210         if (tdp != NULL && tdp->sc_sigblock)
2211                 return ((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2212                     CANTMASK0);
2213 #else
2214 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2215 #endif
2216 
2217 /* see uts/common/sys/signal.h for why this must be true */
2218 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2219         return (((p->p_sig.__sigbits[0] | t->t_sig.__sigbits[0]) &
2220             ~t->t_hold.__sigbits[0]) |
2221             ((p->p_sig.__sigbits[1] | t->t_sig.__sigbits[1]) &
2222             ~t->t_hold.__sigbits[1]) |
2223             (((p->p_sig.__sigbits[2] | t->t_sig.__sigbits[2]) &
2224             ~t->t_hold.__sigbits[2]) & FILLSET2));
2225 #else
2226 #error "fix me: MAXSIG out of bounds"
2227 #endif
2228 }
2229 
2230 void
2231 sigintr(k_sigset_t *smask, int intable)
2232 {
2233         proc_t *p;
2234         int owned;
2235         k_sigset_t lmask;               /* local copy of cantmask */
2236         klwp_t *lwp = ttolwp(curthread);
2237 
2238         /*
2239          * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2240          *    and SIGTERM. (Preserving the existing masks).
2241          *    This function supports the -intr nfs and ufs mount option.
2242          */
2243 
2244         /*
2245          * don't do kernel threads
2246          */
2247         if (lwp == NULL)
2248                 return;
2249 
2250         /*
2251          * get access to signal mask
2252          */
2253         p = ttoproc(curthread);
2254         owned = mutex_owned(&p->p_lock); /* this is filthy */
2255         if (!owned)
2256                 mutex_enter(&p->p_lock);
2257 
2258         /*
2259          * remember the current mask
2260          */
2261         schedctl_finish_sigblock(curthread);
2262         *smask = curthread->t_hold;
2263 
2264         /*
2265          * mask out all signals
2266          */
2267         sigfillset(&curthread->t_hold);
2268 
2269         /*
2270          * Unmask the non-maskable signals (e.g., KILL), as long as
2271          * they aren't already masked (which could happen at exit).
2272          * The first sigdiffset sets lmask to (cantmask & ~curhold).  The
2273          * second sets the current hold mask to (~0 & ~lmask), which reduces
2274          * to (~cantmask | curhold).
2275          */
2276         lmask = cantmask;
2277         sigdiffset(&lmask, smask);
2278         sigdiffset(&curthread->t_hold, &lmask);
2279 
2280         /*
2281          * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2282          * Re-enable INT if it's originally enabled and the NFS mount option
2283          * nointr is not set.
2284          */
2285         if (!sigismember(smask, SIGHUP))
2286                 sigdelset(&curthread->t_hold, SIGHUP);
2287         if (!sigismember(smask, SIGINT) && intable)
2288                 sigdelset(&curthread->t_hold, SIGINT);
2289         if (!sigismember(smask, SIGQUIT))
2290                 sigdelset(&curthread->t_hold, SIGQUIT);
2291         if (!sigismember(smask, SIGTERM))
2292                 sigdelset(&curthread->t_hold, SIGTERM);
2293 
2294         /*
2295          * release access to signal mask
2296          */
2297         if (!owned)
2298                 mutex_exit(&p->p_lock);
2299 
2300         /*
2301          * Indicate that this lwp is not to be stopped.
2302          */
2303         lwp->lwp_nostop++;
2304 
2305 }
2306 
2307 void
2308 sigunintr(k_sigset_t *smask)
2309 {
2310         proc_t *p;
2311         int owned;
2312         klwp_t *lwp = ttolwp(curthread);
2313 
2314         /*
2315          * Reset previous mask (See sigintr() above)
2316          */
2317         if (lwp != NULL) {
2318                 lwp->lwp_nostop--;   /* restore lwp stoppability */
2319                 p = ttoproc(curthread);
2320                 owned = mutex_owned(&p->p_lock); /* this is filthy */
2321                 if (!owned)
2322                         mutex_enter(&p->p_lock);
2323                 curthread->t_hold = *smask;
2324                 /* so unmasked signals will be seen */
2325                 curthread->t_sig_check = 1;
2326                 if (!owned)
2327                         mutex_exit(&p->p_lock);
2328         }
2329 }
2330 
2331 void
2332 sigreplace(k_sigset_t *newmask, k_sigset_t *oldmask)
2333 {
2334         proc_t  *p;
2335         int owned;
2336         /*
2337          * Save current signal mask in oldmask, then
2338          * set it to newmask.
2339          */
2340         if (ttolwp(curthread) != NULL) {
2341                 p = ttoproc(curthread);
2342                 owned = mutex_owned(&p->p_lock); /* this is filthy */
2343                 if (!owned)
2344                         mutex_enter(&p->p_lock);
2345                 schedctl_finish_sigblock(curthread);
2346                 if (oldmask != NULL)
2347                         *oldmask = curthread->t_hold;
2348                 curthread->t_hold = *newmask;
2349                 curthread->t_sig_check = 1;
2350                 if (!owned)
2351                         mutex_exit(&p->p_lock);
2352         }
2353 }
2354 
2355 /*
2356  * Return true if the signal number is in range
2357  * and the signal code specifies signal queueing.
2358  */
2359 int
2360 sigwillqueue(int sig, int code)
2361 {
2362         if (sig >= 0 && sig < NSIG) {
2363                 switch (code) {
2364                 case SI_QUEUE:
2365                 case SI_TIMER:
2366                 case SI_ASYNCIO:
2367                 case SI_MESGQ:
2368                         return (1);
2369                 }
2370         }
2371         return (0);
2372 }
2373 
2374 #ifndef UCHAR_MAX
2375 #define UCHAR_MAX       255
2376 #endif
2377 
2378 /*
2379  * The entire pool (with maxcount entries) is pre-allocated at
2380  * the first sigqueue/signotify call.
2381  */
2382 sigqhdr_t *
2383 sigqhdralloc(size_t size, uint_t maxcount)
2384 {
2385         size_t i;
2386         sigqueue_t *sq, *next;
2387         sigqhdr_t *sqh;
2388 
2389         i = (maxcount * size) + sizeof (sigqhdr_t);
2390         ASSERT(maxcount <= UCHAR_MAX && i <= USHRT_MAX);
2391         sqh = kmem_alloc(i, KM_SLEEP);
2392         sqh->sqb_count = (uchar_t)maxcount;
2393         sqh->sqb_maxcount = (uchar_t)maxcount;
2394         sqh->sqb_size = (ushort_t)i;
2395         sqh->sqb_pexited = 0;
2396         sqh->sqb_sent = 0;
2397         sqh->sqb_free = sq = (sigqueue_t *)(sqh + 1);
2398         for (i = maxcount - 1; i != 0; i--) {
2399                 next = (sigqueue_t *)((uintptr_t)sq + size);
2400                 sq->sq_next = next;
2401                 sq = next;
2402         }
2403         sq->sq_next = NULL;
2404         cv_init(&sqh->sqb_cv, NULL, CV_DEFAULT, NULL);
2405         mutex_init(&sqh->sqb_lock, NULL, MUTEX_DEFAULT, NULL);
2406         return (sqh);
2407 }
2408 
2409 static void sigqrel(sigqueue_t *);
2410 
2411 /*
2412  * allocate a sigqueue/signotify structure from the per process
2413  * pre-allocated pool.
2414  */
2415 sigqueue_t *
2416 sigqalloc(sigqhdr_t *sqh)
2417 {
2418         sigqueue_t *sq = NULL;
2419 
2420         ASSERT(MUTEX_HELD(&curproc->p_lock));
2421 
2422         if (sqh != NULL) {
2423                 mutex_enter(&sqh->sqb_lock);
2424                 if (sqh->sqb_count > 0) {
2425                         sqh->sqb_count--;
2426                         sq = sqh->sqb_free;
2427                         sqh->sqb_free = sq->sq_next;
2428                         mutex_exit(&sqh->sqb_lock);
2429                         bzero(&sq->sq_info, sizeof (k_siginfo_t));
2430                         sq->sq_backptr = sqh;
2431                         sq->sq_func = sigqrel;
2432                         sq->sq_next = NULL;
2433                         sq->sq_external = 0;
2434                 } else {
2435                         mutex_exit(&sqh->sqb_lock);
2436                 }
2437         }
2438         return (sq);
2439 }
2440 
2441 /*
2442  * Return a sigqueue structure back to the pre-allocated pool.
2443  */
2444 static void
2445 sigqrel(sigqueue_t *sq)
2446 {
2447         sigqhdr_t *sqh;
2448 
2449         /* make sure that p_lock of the affected process is held */
2450 
2451         sqh = (sigqhdr_t *)sq->sq_backptr;
2452         mutex_enter(&sqh->sqb_lock);
2453         if (sqh->sqb_pexited && sqh->sqb_sent == 1) {
2454                 mutex_exit(&sqh->sqb_lock);
2455                 cv_destroy(&sqh->sqb_cv);
2456                 mutex_destroy(&sqh->sqb_lock);
2457                 kmem_free(sqh, sqh->sqb_size);
2458         } else {
2459                 sqh->sqb_count++;
2460                 sqh->sqb_sent--;
2461                 sq->sq_next = sqh->sqb_free;
2462                 sq->sq_backptr = NULL;
2463                 sqh->sqb_free = sq;
2464                 cv_signal(&sqh->sqb_cv);
2465                 mutex_exit(&sqh->sqb_lock);
2466         }
2467 }
2468 
2469 /*
2470  * Free up the pre-allocated sigqueue headers of sigqueue pool
2471  * and signotify pool, if possible.
2472  * Called only by the owning process during exec() and exit().
2473  */
2474 void
2475 sigqfree(proc_t *p)
2476 {
2477         ASSERT(MUTEX_HELD(&p->p_lock));
2478 
2479         if (p->p_sigqhdr != NULL) {  /* sigqueue pool */
2480                 sigqhdrfree(p->p_sigqhdr);
2481                 p->p_sigqhdr = NULL;
2482         }
2483         if (p->p_signhdr != NULL) {  /* signotify pool */
2484                 sigqhdrfree(p->p_signhdr);
2485                 p->p_signhdr = NULL;
2486         }
2487 }
2488 
2489 /*
2490  * Free up the pre-allocated header and sigq pool if possible.
2491  */
2492 void
2493 sigqhdrfree(sigqhdr_t *sqh)
2494 {
2495         mutex_enter(&sqh->sqb_lock);
2496         if (sqh->sqb_sent == 0) {
2497                 mutex_exit(&sqh->sqb_lock);
2498                 cv_destroy(&sqh->sqb_cv);
2499                 mutex_destroy(&sqh->sqb_lock);
2500                 kmem_free(sqh, sqh->sqb_size);
2501         } else {
2502                 sqh->sqb_pexited = 1;
2503                 mutex_exit(&sqh->sqb_lock);
2504         }
2505 }
2506 
2507 /*
2508  * Free up a single sigqueue structure.
2509  * No other code should free a sigqueue directly.
2510  */
2511 void
2512 siginfofree(sigqueue_t *sqp)
2513 {
2514         if (sqp != NULL) {
2515                 if (sqp->sq_func != NULL)
2516                         (sqp->sq_func)(sqp);
2517                 else
2518                         kmem_free(sqp, sizeof (sigqueue_t));
2519         }
2520 }
2521 
2522 /*
2523  * Generate a synchronous signal caused by a hardware
2524  * condition encountered by an lwp.  Called from trap().
2525  */
2526 void
2527 trapsig(k_siginfo_t *ip, int restartable)
2528 {
2529         proc_t *p = ttoproc(curthread);
2530         int sig = ip->si_signo;
2531         sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
2532 
2533         ASSERT(sig > 0 && sig < NSIG);
2534 
2535         if (curthread->t_dtrace_on)
2536                 dtrace_safe_synchronous_signal();
2537 
2538         mutex_enter(&p->p_lock);
2539         schedctl_finish_sigblock(curthread);
2540         /*
2541          * Avoid a possible infinite loop if the lwp is holding the
2542          * signal generated by a trap of a restartable instruction or
2543          * if the signal so generated is being ignored by the process.
2544          */
2545         if (restartable &&
2546             (sigismember(&curthread->t_hold, sig) ||
2547             p->p_user.u_signal[sig-1] == SIG_IGN)) {
2548                 sigdelset(&curthread->t_hold, sig);
2549                 p->p_user.u_signal[sig-1] = SIG_DFL;
2550                 sigdelset(&p->p_ignore, sig);
2551         }
2552         bcopy(ip, &sqp->sq_info, sizeof (k_siginfo_t));
2553         sigaddqa(p, curthread, sqp);
2554         mutex_exit(&p->p_lock);
2555 }
2556 
2557 /*
2558  * Dispatch the real time profiling signal in the traditional way,
2559  * honoring all of the /proc tracing mechanism built into issig().
2560  */
2561 static void
2562 realsigprof_slow(int sysnum, int nsysarg, int error)
2563 {
2564         kthread_t *t = curthread;
2565         proc_t *p = ttoproc(t);
2566         klwp_t *lwp = ttolwp(t);
2567         k_siginfo_t *sip = &lwp->lwp_siginfo;
2568         void (*func)();
2569 
2570         mutex_enter(&p->p_lock);
2571         func = PTOU(p)->u_signal[SIGPROF - 1];
2572         if (p->p_rprof_cyclic == CYCLIC_NONE ||
2573             func == SIG_DFL || func == SIG_IGN) {
2574                 bzero(t->t_rprof, sizeof (*t->t_rprof));
2575                 mutex_exit(&p->p_lock);
2576                 return;
2577         }
2578         if (sigismember(&t->t_hold, SIGPROF)) {
2579                 mutex_exit(&p->p_lock);
2580                 return;
2581         }
2582         sip->si_signo = SIGPROF;
2583         sip->si_code = PROF_SIG;
2584         sip->si_errno = error;
2585         hrt2ts(gethrtime(), &sip->si_tstamp);
2586         sip->si_syscall = sysnum;
2587         sip->si_nsysarg = nsysarg;
2588         sip->si_fault = lwp->lwp_lastfault;
2589         sip->si_faddr = lwp->lwp_lastfaddr;
2590         lwp->lwp_lastfault = 0;
2591         lwp->lwp_lastfaddr = NULL;
2592         sigtoproc(p, t, SIGPROF);
2593         mutex_exit(&p->p_lock);
2594         ASSERT(lwp->lwp_cursig == 0);
2595         if (issig(FORREAL))
2596                 psig();
2597         sip->si_signo = 0;
2598         bzero(t->t_rprof, sizeof (*t->t_rprof));
2599 }
2600 
2601 /*
2602  * We are not tracing the SIGPROF signal, or doing any other unnatural
2603  * acts, like watchpoints, so dispatch the real time profiling signal
2604  * directly, bypassing all of the overhead built into issig().
2605  */
2606 static void
2607 realsigprof_fast(int sysnum, int nsysarg, int error)
2608 {
2609         kthread_t *t = curthread;
2610         proc_t *p = ttoproc(t);
2611         klwp_t *lwp = ttolwp(t);
2612         k_siginfo_t *sip = &lwp->lwp_siginfo;
2613         void (*func)();
2614         int rc;
2615         int code;
2616 
2617         /*
2618          * We don't need to acquire p->p_lock here;
2619          * we are manipulating thread-private data.
2620          */
2621         func = PTOU(p)->u_signal[SIGPROF - 1];
2622         if (p->p_rprof_cyclic == CYCLIC_NONE ||
2623             func == SIG_DFL || func == SIG_IGN) {
2624                 bzero(t->t_rprof, sizeof (*t->t_rprof));
2625                 return;
2626         }
2627         if (lwp->lwp_cursig != 0 ||
2628             lwp->lwp_curinfo != NULL ||
2629             sigismember(&t->t_hold, SIGPROF)) {
2630                 return;
2631         }
2632         sip->si_signo = SIGPROF;
2633         sip->si_code = PROF_SIG;
2634         sip->si_errno = error;
2635         hrt2ts(gethrtime(), &sip->si_tstamp);
2636         sip->si_syscall = sysnum;
2637         sip->si_nsysarg = nsysarg;
2638         sip->si_fault = lwp->lwp_lastfault;
2639         sip->si_faddr = lwp->lwp_lastfaddr;
2640         lwp->lwp_lastfault = 0;
2641         lwp->lwp_lastfaddr = NULL;
2642         if (t->t_flag & T_TOMASK)
2643                 t->t_flag &= ~T_TOMASK;
2644         else
2645                 lwp->lwp_sigoldmask = t->t_hold;
2646         sigorset(&t->t_hold, &PTOU(p)->u_sigmask[SIGPROF - 1]);
2647         if (!sigismember(&PTOU(p)->u_signodefer, SIGPROF))
2648                 sigaddset(&t->t_hold, SIGPROF);
2649         lwp->lwp_extsig = 0;
2650         lwp->lwp_ru.nsignals++;
2651         if (p->p_model == DATAMODEL_NATIVE)
2652                 rc = sendsig(SIGPROF, sip, func);
2653 #ifdef _SYSCALL32_IMPL
2654         else
2655                 rc = sendsig32(SIGPROF, sip, func);
2656 #endif  /* _SYSCALL32_IMPL */
2657         sip->si_signo = 0;
2658         bzero(t->t_rprof, sizeof (*t->t_rprof));
2659         if (rc == 0) {
2660                 /*
2661                  * sendsig() failed; we must dump core with a SIGSEGV.
2662                  * See psig().  This code is copied from there.
2663                  */
2664                 lwp->lwp_cursig = SIGSEGV;
2665                 code = CLD_KILLED;
2666                 proc_is_exiting(p);
2667                 if (exitlwps(1) != 0) {
2668                         mutex_enter(&p->p_lock);
2669                         lwp_exit();
2670                 }
2671                 if (audit_active == C2AUDIT_LOADED)
2672                         audit_core_start(SIGSEGV);
2673                 if (core(SIGSEGV, 0) == 0)
2674                         code = CLD_DUMPED;
2675                 if (audit_active == C2AUDIT_LOADED)
2676                         audit_core_finish(code);
2677                 exit(code, SIGSEGV);
2678         }
2679 }
2680 
2681 /*
2682  * Arrange for the real time profiling signal to be dispatched.
2683  */
2684 void
2685 realsigprof(int sysnum, int nsysarg, int error)
2686 {
2687         kthread_t *t = curthread;
2688         proc_t *p = ttoproc(t);
2689 
2690         if (t->t_rprof->rp_anystate == 0)
2691                 return;
2692 
2693         schedctl_finish_sigblock(t);
2694 
2695         /* test for any activity that requires p->p_lock */
2696         if (tracing(p, SIGPROF) || pr_watch_active(p) ||
2697             sigismember(&PTOU(p)->u_sigresethand, SIGPROF)) {
2698                 /* do it the classic slow way */
2699                 realsigprof_slow(sysnum, nsysarg, error);
2700         } else {
2701                 /* do it the cheating-a-little fast way */
2702                 realsigprof_fast(sysnum, nsysarg, error);
2703         }
2704 }
2705 
2706 #ifdef _SYSCALL32_IMPL
2707 
2708 /*
2709  * It's tricky to transmit a sigval between 32-bit and 64-bit
2710  * process, since in the 64-bit world, a pointer and an integer
2711  * are different sizes.  Since we're constrained by the standards
2712  * world not to change the types, and it's unclear how useful it is
2713  * to send pointers between address spaces this way, we preserve
2714  * the 'int' interpretation for 32-bit processes interoperating
2715  * with 64-bit processes.  The full semantics (pointers or integers)
2716  * are available for N-bit processes interoperating with N-bit
2717  * processes.
2718  */
2719 void
2720 siginfo_kto32(const k_siginfo_t *src, siginfo32_t *dest)
2721 {
2722         bzero(dest, sizeof (*dest));
2723 
2724         /*
2725          * The absolute minimum content is si_signo and si_code.
2726          */
2727         dest->si_signo = src->si_signo;
2728         if ((dest->si_code = src->si_code) == SI_NOINFO)
2729                 return;
2730 
2731         /*
2732          * A siginfo generated by user level is structured
2733          * differently from one generated by the kernel.
2734          */
2735         if (SI_FROMUSER(src)) {
2736                 dest->si_pid = src->si_pid;
2737                 dest->si_ctid = src->si_ctid;
2738                 dest->si_zoneid = src->si_zoneid;
2739                 dest->si_uid = src->si_uid;
2740                 if (SI_CANQUEUE(src->si_code))
2741                         dest->si_value.sival_int =
2742                             (int32_t)src->si_value.sival_int;
2743                 return;
2744         }
2745 
2746         dest->si_errno = src->si_errno;
2747 
2748         switch (src->si_signo) {
2749         default:
2750                 dest->si_pid = src->si_pid;
2751                 dest->si_ctid = src->si_ctid;
2752                 dest->si_zoneid = src->si_zoneid;
2753                 dest->si_uid = src->si_uid;
2754                 dest->si_value.sival_int = (int32_t)src->si_value.sival_int;
2755                 break;
2756         case SIGCLD:
2757                 dest->si_pid = src->si_pid;
2758                 dest->si_ctid = src->si_ctid;
2759                 dest->si_zoneid = src->si_zoneid;
2760                 dest->si_status = src->si_status;
2761                 dest->si_stime = src->si_stime;
2762                 dest->si_utime = src->si_utime;
2763                 break;
2764         case SIGSEGV:
2765         case SIGBUS:
2766         case SIGILL:
2767         case SIGTRAP:
2768         case SIGFPE:
2769         case SIGEMT:
2770                 dest->si_addr = (caddr32_t)(uintptr_t)src->si_addr;
2771                 dest->si_trapno = src->si_trapno;
2772                 dest->si_pc = (caddr32_t)(uintptr_t)src->si_pc;
2773                 break;
2774         case SIGPOLL:
2775         case SIGXFSZ:
2776                 dest->si_fd = src->si_fd;
2777                 dest->si_band = src->si_band;
2778                 break;
2779         case SIGPROF:
2780                 dest->si_faddr = (caddr32_t)(uintptr_t)src->si_faddr;
2781                 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2782                 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2783                 dest->si_syscall = src->si_syscall;
2784                 dest->si_nsysarg = src->si_nsysarg;
2785                 dest->si_fault = src->si_fault;
2786                 break;
2787         }
2788 }
2789 
2790 void
2791 siginfo_32tok(const siginfo32_t *src, k_siginfo_t *dest)
2792 {
2793         bzero(dest, sizeof (*dest));
2794 
2795         /*
2796          * The absolute minimum content is si_signo and si_code.
2797          */
2798         dest->si_signo = src->si_signo;
2799         if ((dest->si_code = src->si_code) == SI_NOINFO)
2800                 return;
2801 
2802         /*
2803          * A siginfo generated by user level is structured
2804          * differently from one generated by the kernel.
2805          */
2806         if (SI_FROMUSER(src)) {
2807                 dest->si_pid = src->si_pid;
2808                 dest->si_ctid = src->si_ctid;
2809                 dest->si_zoneid = src->si_zoneid;
2810                 dest->si_uid = src->si_uid;
2811                 if (SI_CANQUEUE(src->si_code))
2812                         dest->si_value.sival_int =
2813                             (int)src->si_value.sival_int;
2814                 return;
2815         }
2816 
2817         dest->si_errno = src->si_errno;
2818 
2819         switch (src->si_signo) {
2820         default:
2821                 dest->si_pid = src->si_pid;
2822                 dest->si_ctid = src->si_ctid;
2823                 dest->si_zoneid = src->si_zoneid;
2824                 dest->si_uid = src->si_uid;
2825                 dest->si_value.sival_int = (int)src->si_value.sival_int;
2826                 break;
2827         case SIGCLD:
2828                 dest->si_pid = src->si_pid;
2829                 dest->si_ctid = src->si_ctid;
2830                 dest->si_zoneid = src->si_zoneid;
2831                 dest->si_status = src->si_status;
2832                 dest->si_stime = src->si_stime;
2833                 dest->si_utime = src->si_utime;
2834                 break;
2835         case SIGSEGV:
2836         case SIGBUS:
2837         case SIGILL:
2838         case SIGTRAP:
2839         case SIGFPE:
2840         case SIGEMT:
2841                 dest->si_addr = (void *)(uintptr_t)src->si_addr;
2842                 dest->si_trapno = src->si_trapno;
2843                 dest->si_pc = (void *)(uintptr_t)src->si_pc;
2844                 break;
2845         case SIGPOLL:
2846         case SIGXFSZ:
2847                 dest->si_fd = src->si_fd;
2848                 dest->si_band = src->si_band;
2849                 break;
2850         case SIGPROF:
2851                 dest->si_faddr = (void *)(uintptr_t)src->si_faddr;
2852                 dest->si_tstamp.tv_sec = src->si_tstamp.tv_sec;
2853                 dest->si_tstamp.tv_nsec = src->si_tstamp.tv_nsec;
2854                 dest->si_syscall = src->si_syscall;
2855                 dest->si_nsysarg = src->si_nsysarg;
2856                 dest->si_fault = src->si_fault;
2857                 break;
2858         }
2859 }
2860 
2861 #endif /* _SYSCALL32_IMPL */