1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/param.h> 29 #include <sys/sysmacros.h> 30 #include <sys/signal.h> 31 #include <sys/stack.h> 32 #include <sys/pcb.h> 33 #include <sys/user.h> 34 #include <sys/systm.h> 35 #include <sys/sysinfo.h> 36 #include <sys/errno.h> 37 #include <sys/cmn_err.h> 38 #include <sys/cred.h> 39 #include <sys/resource.h> 40 #include <sys/task.h> 41 #include <sys/project.h> 42 #include <sys/proc.h> 43 #include <sys/debug.h> 44 #include <sys/disp.h> 45 #include <sys/class.h> 46 #include <vm/seg_kmem.h> 47 #include <vm/seg_kp.h> 48 #include <sys/machlock.h> 49 #include <sys/kmem.h> 50 #include <sys/varargs.h> 51 #include <sys/turnstile.h> 52 #include <sys/poll.h> 53 #include <sys/vtrace.h> 54 #include <sys/callb.h> 55 #include <c2/audit.h> 56 #include <sys/tnf.h> 57 #include <sys/sobject.h> 58 #include <sys/cpupart.h> 59 #include <sys/pset.h> 60 #include <sys/door.h> 61 #include <sys/spl.h> 62 #include <sys/copyops.h> 63 #include <sys/rctl.h> 64 #include <sys/brand.h> 65 #include <sys/pool.h> 66 #include <sys/zone.h> 67 #include <sys/tsol/label.h> 68 #include <sys/tsol/tndb.h> 69 #include <sys/cpc_impl.h> 70 #include <sys/sdt.h> 71 #include <sys/reboot.h> 72 #include <sys/kdi.h> 73 #include <sys/schedctl.h> 74 #include <sys/waitq.h> 75 #include <sys/cpucaps.h> 76 #include <sys/kiconv.h> 77 78 struct kmem_cache *thread_cache; /* cache of free threads */ 79 struct kmem_cache *lwp_cache; /* cache of free lwps */ 80 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 81 82 /* 83 * allthreads is only for use by kmem_readers. All kernel loops can use 84 * the current thread as a start/end point. 85 */ 86 kthread_t *allthreads = &t0; /* circular list of all threads */ 87 88 static kcondvar_t reaper_cv; /* synchronization var */ 89 kthread_t *thread_deathrow; /* circular list of reapable threads */ 90 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 91 kmutex_t reaplock; /* protects lwp and thread deathrows */ 92 int thread_reapcnt = 0; /* number of threads on deathrow */ 93 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 94 int reaplimit = 16; /* delay reaping until reaplimit */ 95 96 thread_free_lock_t *thread_free_lock; 97 /* protects tick thread from reaper */ 98 99 extern int nthread; 100 101 /* System Scheduling classes. */ 102 id_t syscid; /* system scheduling class ID */ 103 id_t sysdccid = CLASS_UNUSED; /* reset when SDC loads */ 104 105 void *segkp_thread; /* cookie for segkp pool */ 106 107 int lwp_cache_sz = 32; 108 int t_cache_sz = 8; 109 static kt_did_t next_t_id = 1; 110 111 /* Default mode for thread binding to CPUs and processor sets */ 112 int default_binding_mode = TB_ALLHARD; 113 114 /* 115 * Min/Max stack sizes for stack size parameters 116 */ 117 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 118 #define MIN_STKSIZE DEFAULTSTKSZ 119 120 /* 121 * default_stksize overrides lwp_default_stksize if it is set. 122 */ 123 int default_stksize; 124 int lwp_default_stksize; 125 126 static zone_key_t zone_thread_key; 127 128 unsigned int kmem_stackinfo; /* stackinfo feature on-off */ 129 kmem_stkinfo_t *kmem_stkinfo_log; /* stackinfo circular log */ 130 static kmutex_t kmem_stkinfo_lock; /* protects kmem_stkinfo_log */ 131 132 /* 133 * forward declarations for internal thread specific data (tsd) 134 */ 135 static void *tsd_realloc(void *, size_t, size_t); 136 137 void thread_reaper(void); 138 139 /* forward declarations for stackinfo feature */ 140 static void stkinfo_begin(kthread_t *); 141 static void stkinfo_end(kthread_t *); 142 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t); 143 144 /*ARGSUSED*/ 145 static int 146 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 147 { 148 bzero(buf, sizeof (turnstile_t)); 149 return (0); 150 } 151 152 /*ARGSUSED*/ 153 static void 154 turnstile_destructor(void *buf, void *cdrarg) 155 { 156 turnstile_t *ts = buf; 157 158 ASSERT(ts->ts_free == NULL); 159 ASSERT(ts->ts_waiters == 0); 160 ASSERT(ts->ts_inheritor == NULL); 161 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 162 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 163 } 164 165 void 166 thread_init(void) 167 { 168 kthread_t *tp; 169 extern char sys_name[]; 170 extern void idle(); 171 struct cpu *cpu = CPU; 172 int i; 173 kmutex_t *lp; 174 175 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 176 thread_free_lock = 177 kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP); 178 for (i = 0; i < THREAD_FREE_NUM; i++) { 179 lp = &thread_free_lock[i].tf_lock; 180 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL); 181 } 182 183 #if defined(__i386) || defined(__amd64) 184 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 185 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 186 187 /* 188 * "struct _klwp" includes a "struct pcb", which includes a 189 * "struct fpu", which needs to be 64-byte aligned on amd64 190 * (and even on i386) for xsave/xrstor. 191 */ 192 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 193 64, NULL, NULL, NULL, NULL, NULL, 0); 194 #else 195 /* 196 * Allocate thread structures from static_arena. This prevents 197 * issues where a thread tries to relocate its own thread 198 * structure and touches it after the mapping has been suspended. 199 */ 200 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 201 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 202 203 lwp_stk_cache_init(); 204 205 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 206 0, NULL, NULL, NULL, NULL, NULL, 0); 207 #endif 208 209 turnstile_cache = kmem_cache_create("turnstile_cache", 210 sizeof (turnstile_t), 0, 211 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 212 213 label_init(); 214 cred_init(); 215 216 /* 217 * Initialize various resource management facilities. 218 */ 219 rctl_init(); 220 cpucaps_init(); 221 /* 222 * Zone_init() should be called before project_init() so that project ID 223 * for the first project is initialized correctly. 224 */ 225 zone_init(); 226 project_init(); 227 brand_init(); 228 kiconv_init(); 229 task_init(); 230 tcache_init(); 231 pool_init(); 232 233 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 234 235 /* 236 * Originally, we had two parameters to set default stack 237 * size: one for lwp's (lwp_default_stksize), and one for 238 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 239 * Now we have a third parameter that overrides both if it is 240 * set to a legal stack size, called default_stksize. 241 */ 242 243 if (default_stksize == 0) { 244 default_stksize = DEFAULTSTKSZ; 245 } else if (default_stksize % PAGESIZE != 0 || 246 default_stksize > MAX_STKSIZE || 247 default_stksize < MIN_STKSIZE) { 248 cmn_err(CE_WARN, "Illegal stack size. Using %d", 249 (int)DEFAULTSTKSZ); 250 default_stksize = DEFAULTSTKSZ; 251 } else { 252 lwp_default_stksize = default_stksize; 253 } 254 255 if (lwp_default_stksize == 0) { 256 lwp_default_stksize = default_stksize; 257 } else if (lwp_default_stksize % PAGESIZE != 0 || 258 lwp_default_stksize > MAX_STKSIZE || 259 lwp_default_stksize < MIN_STKSIZE) { 260 cmn_err(CE_WARN, "Illegal stack size. Using %d", 261 default_stksize); 262 lwp_default_stksize = default_stksize; 263 } 264 265 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 266 lwp_default_stksize, 267 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 268 269 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 270 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 271 272 (void) getcid(sys_name, &syscid); 273 curthread->t_cid = syscid; /* current thread is t0 */ 274 275 /* 276 * Set up the first CPU's idle thread. 277 * It runs whenever the CPU has nothing worthwhile to do. 278 */ 279 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 280 cpu->cpu_idle_thread = tp; 281 tp->t_preempt = 1; 282 tp->t_disp_queue = cpu->cpu_disp; 283 ASSERT(tp->t_disp_queue != NULL); 284 tp->t_bound_cpu = cpu; 285 tp->t_affinitycnt = 1; 286 287 /* 288 * Registering a thread in the callback table is usually 289 * done in the initialization code of the thread. In this 290 * case, we do it right after thread creation to avoid 291 * blocking idle thread while registering itself. It also 292 * avoids the possibility of reregistration in case a CPU 293 * restarts its idle thread. 294 */ 295 CALLB_CPR_INIT_SAFE(tp, "idle"); 296 297 /* 298 * Create the thread_reaper daemon. From this point on, exited 299 * threads will get reaped. 300 */ 301 (void) thread_create(NULL, 0, (void (*)())thread_reaper, 302 NULL, 0, &p0, TS_RUN, minclsyspri); 303 304 /* 305 * Finish initializing the kernel memory allocator now that 306 * thread_create() is available. 307 */ 308 kmem_thread_init(); 309 310 if (boothowto & RB_DEBUG) 311 kdi_dvec_thravail(); 312 } 313 314 /* 315 * Create a thread. 316 * 317 * thread_create() blocks for memory if necessary. It never fails. 318 * 319 * If stk is NULL, the thread is created at the base of the stack 320 * and cannot be swapped. 321 */ 322 kthread_t * 323 thread_create( 324 caddr_t stk, 325 size_t stksize, 326 void (*proc)(), 327 void *arg, 328 size_t len, 329 proc_t *pp, 330 int state, 331 pri_t pri) 332 { 333 kthread_t *t; 334 extern struct classfuncs sys_classfuncs; 335 turnstile_t *ts; 336 337 /* 338 * Every thread keeps a turnstile around in case it needs to block. 339 * The only reason the turnstile is not simply part of the thread 340 * structure is that we may have to break the association whenever 341 * more than one thread blocks on a given synchronization object. 342 * From a memory-management standpoint, turnstiles are like the 343 * "attached mblks" that hang off dblks in the streams allocator. 344 */ 345 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 346 347 if (stk == NULL) { 348 /* 349 * alloc both thread and stack in segkp chunk 350 */ 351 352 if (stksize < default_stksize) 353 stksize = default_stksize; 354 355 if (stksize == default_stksize) { 356 stk = (caddr_t)segkp_cache_get(segkp_thread); 357 } else { 358 stksize = roundup(stksize, PAGESIZE); 359 stk = (caddr_t)segkp_get(segkp, stksize, 360 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 361 } 362 363 ASSERT(stk != NULL); 364 365 /* 366 * The machine-dependent mutex code may require that 367 * thread pointers (since they may be used for mutex owner 368 * fields) have certain alignment requirements. 369 * PTR24_ALIGN is the size of the alignment quanta. 370 * XXX - assumes stack grows toward low addresses. 371 */ 372 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 373 cmn_err(CE_PANIC, "thread_create: proposed stack size" 374 " too small to hold thread."); 375 #ifdef STACK_GROWTH_DOWN 376 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 377 stksize &= -PTR24_ALIGN; /* make thread aligned */ 378 t = (kthread_t *)(stk + stksize); 379 bzero(t, sizeof (kthread_t)); 380 if (audit_active) 381 audit_thread_create(t); 382 t->t_stk = stk + stksize; 383 t->t_stkbase = stk; 384 #else /* stack grows to larger addresses */ 385 stksize -= SA(sizeof (kthread_t)); 386 t = (kthread_t *)(stk); 387 bzero(t, sizeof (kthread_t)); 388 t->t_stk = stk + sizeof (kthread_t); 389 t->t_stkbase = stk + stksize + sizeof (kthread_t); 390 #endif /* STACK_GROWTH_DOWN */ 391 t->t_flag |= T_TALLOCSTK; 392 t->t_swap = stk; 393 } else { 394 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 395 bzero(t, sizeof (kthread_t)); 396 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 397 if (audit_active) 398 audit_thread_create(t); 399 /* 400 * Initialize t_stk to the kernel stack pointer to use 401 * upon entry to the kernel 402 */ 403 #ifdef STACK_GROWTH_DOWN 404 t->t_stk = stk + stksize; 405 t->t_stkbase = stk; 406 #else 407 t->t_stk = stk; /* 3b2-like */ 408 t->t_stkbase = stk + stksize; 409 #endif /* STACK_GROWTH_DOWN */ 410 } 411 412 if (kmem_stackinfo != 0) { 413 stkinfo_begin(t); 414 } 415 416 t->t_ts = ts; 417 418 /* 419 * p_cred could be NULL if it thread_create is called before cred_init 420 * is called in main. 421 */ 422 mutex_enter(&pp->p_crlock); 423 if (pp->p_cred) 424 crhold(t->t_cred = pp->p_cred); 425 mutex_exit(&pp->p_crlock); 426 t->t_start = gethrestime_sec(); 427 t->t_startpc = proc; 428 t->t_procp = pp; 429 t->t_clfuncs = &sys_classfuncs.thread; 430 t->t_cid = syscid; 431 t->t_pri = pri; 432 t->t_schedflag = 0; 433 t->t_bind_cpu = PBIND_NONE; 434 t->t_bindflag = (uchar_t)default_binding_mode; 435 t->t_bind_pset = PS_NONE; 436 t->t_plockp = &pp->p_lock; 437 t->t_copyops = NULL; 438 t->t_taskq = NULL; 439 t->t_anttime = 0; 440 t->t_hatdepth = 0; 441 442 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 443 444 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 445 #ifndef NPROBE 446 /* Kernel probe */ 447 tnf_thread_create(t); 448 #endif /* NPROBE */ 449 LOCK_INIT_CLEAR(&t->t_lock); 450 451 /* 452 * Callers who give us a NULL proc must do their own 453 * stack initialization. e.g. lwp_create() 454 */ 455 if (proc != NULL) { 456 t->t_stk = thread_stk_init(t->t_stk); 457 thread_load(t, proc, arg, len); 458 } 459 460 /* 461 * Put a hold on project0. If this thread is actually in a 462 * different project, then t_proj will be changed later in 463 * lwp_create(). All kernel-only threads must be in project 0. 464 */ 465 t->t_proj = project_hold(proj0p); 466 467 lgrp_affinity_init(&t->t_lgrp_affinity); 468 469 mutex_enter(&pidlock); 470 nthread++; 471 t->t_did = next_t_id++; 472 t->t_prev = curthread->t_prev; 473 t->t_next = curthread; 474 475 /* 476 * Add the thread to the list of all threads, and initialize 477 * its t_cpu pointer. We need to block preemption since 478 * cpu_offline walks the thread list looking for threads 479 * with t_cpu pointing to the CPU being offlined. We want 480 * to make sure that the list is consistent and that if t_cpu 481 * is set, the thread is on the list. 482 */ 483 kpreempt_disable(); 484 curthread->t_prev->t_next = t; 485 curthread->t_prev = t; 486 487 /* 488 * Threads should never have a NULL t_cpu pointer so assign it 489 * here. If the thread is being created with state TS_RUN a 490 * better CPU may be chosen when it is placed on the run queue. 491 * 492 * We need to keep kernel preemption disabled when setting all 493 * three fields to keep them in sync. Also, always create in 494 * the default partition since that's where kernel threads go 495 * (if this isn't a kernel thread, t_cpupart will be changed 496 * in lwp_create before setting the thread runnable). 497 */ 498 t->t_cpupart = &cp_default; 499 500 /* 501 * For now, affiliate this thread with the root lgroup. 502 * Since the kernel does not (presently) allocate its memory 503 * in a locality aware fashion, the root is an appropriate home. 504 * If this thread is later associated with an lwp, it will have 505 * it's lgroup re-assigned at that time. 506 */ 507 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 508 509 /* 510 * Inherit the current cpu. If this cpu isn't part of the chosen 511 * lgroup, a new cpu will be chosen by cpu_choose when the thread 512 * is ready to run. 513 */ 514 if (CPU->cpu_part == &cp_default) 515 t->t_cpu = CPU; 516 else 517 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 518 t->t_pri, NULL); 519 520 t->t_disp_queue = t->t_cpu->cpu_disp; 521 kpreempt_enable(); 522 523 /* 524 * Initialize thread state and the dispatcher lock pointer. 525 * Need to hold onto pidlock to block allthreads walkers until 526 * the state is set. 527 */ 528 switch (state) { 529 case TS_RUN: 530 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 531 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 532 CL_SETRUN(t); 533 thread_unlock(t); 534 break; 535 536 case TS_ONPROC: 537 THREAD_ONPROC(t, t->t_cpu); 538 break; 539 540 case TS_FREE: 541 /* 542 * Free state will be used for intr threads. 543 * The interrupt routine must set the thread dispatcher 544 * lock pointer (t_lockp) if starting on a CPU 545 * other than the current one. 546 */ 547 THREAD_FREEINTR(t, CPU); 548 break; 549 550 case TS_STOPPED: 551 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 552 break; 553 554 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 555 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 556 } 557 mutex_exit(&pidlock); 558 return (t); 559 } 560 561 /* 562 * Move thread to project0 and take care of project reference counters. 563 */ 564 void 565 thread_rele(kthread_t *t) 566 { 567 kproject_t *kpj; 568 569 thread_lock(t); 570 571 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 572 kpj = ttoproj(t); 573 t->t_proj = proj0p; 574 575 thread_unlock(t); 576 577 if (kpj != proj0p) { 578 project_rele(kpj); 579 (void) project_hold(proj0p); 580 } 581 } 582 583 void 584 thread_exit(void) 585 { 586 kthread_t *t = curthread; 587 588 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 589 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 590 591 tsd_exit(); /* Clean up this thread's TSD */ 592 593 kcpc_passivate(); /* clean up performance counter state */ 594 595 /* 596 * No kernel thread should have called poll() without arranging 597 * calling pollcleanup() here. 598 */ 599 ASSERT(t->t_pollstate == NULL); 600 ASSERT(t->t_schedctl == NULL); 601 if (t->t_door) 602 door_slam(); /* in case thread did an upcall */ 603 604 #ifndef NPROBE 605 /* Kernel probe */ 606 if (t->t_tnf_tpdp) 607 tnf_thread_exit(); 608 #endif /* NPROBE */ 609 610 thread_rele(t); 611 t->t_preempt++; 612 613 /* 614 * remove thread from the all threads list so that 615 * death-row can use the same pointers. 616 */ 617 mutex_enter(&pidlock); 618 t->t_next->t_prev = t->t_prev; 619 t->t_prev->t_next = t->t_next; 620 ASSERT(allthreads != t); /* t0 never exits */ 621 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 622 mutex_exit(&pidlock); 623 624 if (t->t_ctx != NULL) 625 exitctx(t); 626 if (t->t_procp->p_pctx != NULL) 627 exitpctx(t->t_procp); 628 629 if (kmem_stackinfo != 0) { 630 stkinfo_end(t); 631 } 632 633 t->t_state = TS_ZOMB; /* set zombie thread */ 634 635 swtch_from_zombie(); /* give up the CPU */ 636 /* NOTREACHED */ 637 } 638 639 /* 640 * Check to see if the specified thread is active (defined as being on 641 * the thread list). This is certainly a slow way to do this; if there's 642 * ever a reason to speed it up, we could maintain a hash table of active 643 * threads indexed by their t_did. 644 */ 645 static kthread_t * 646 did_to_thread(kt_did_t tid) 647 { 648 kthread_t *t; 649 650 ASSERT(MUTEX_HELD(&pidlock)); 651 for (t = curthread->t_next; t != curthread; t = t->t_next) { 652 if (t->t_did == tid) 653 break; 654 } 655 if (t->t_did == tid) 656 return (t); 657 else 658 return (NULL); 659 } 660 661 /* 662 * Wait for specified thread to exit. Returns immediately if the thread 663 * could not be found, meaning that it has either already exited or never 664 * existed. 665 */ 666 void 667 thread_join(kt_did_t tid) 668 { 669 kthread_t *t; 670 671 ASSERT(tid != curthread->t_did); 672 ASSERT(tid != t0.t_did); 673 674 mutex_enter(&pidlock); 675 /* 676 * Make sure we check that the thread is on the thread list 677 * before blocking on it; otherwise we could end up blocking on 678 * a cv that's already been freed. In other words, don't cache 679 * the thread pointer across calls to cv_wait. 680 * 681 * The choice of loop invariant means that whenever a thread 682 * is taken off the allthreads list, a cv_broadcast must be 683 * performed on that thread's t_joincv to wake up any waiters. 684 * The broadcast doesn't have to happen right away, but it 685 * shouldn't be postponed indefinitely (e.g., by doing it in 686 * thread_free which may only be executed when the deathrow 687 * queue is processed. 688 */ 689 while (t = did_to_thread(tid)) 690 cv_wait(&t->t_joincv, &pidlock); 691 mutex_exit(&pidlock); 692 } 693 694 void 695 thread_free_prevent(kthread_t *t) 696 { 697 kmutex_t *lp; 698 699 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 700 mutex_enter(lp); 701 } 702 703 void 704 thread_free_allow(kthread_t *t) 705 { 706 kmutex_t *lp; 707 708 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 709 mutex_exit(lp); 710 } 711 712 static void 713 thread_free_barrier(kthread_t *t) 714 { 715 kmutex_t *lp; 716 717 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 718 mutex_enter(lp); 719 mutex_exit(lp); 720 } 721 722 void 723 thread_free(kthread_t *t) 724 { 725 boolean_t allocstk = (t->t_flag & T_TALLOCSTK); 726 klwp_t *lwp = t->t_lwp; 727 caddr_t swap = t->t_swap; 728 729 ASSERT(t != &t0 && t->t_state == TS_FREE); 730 ASSERT(t->t_door == NULL); 731 ASSERT(t->t_schedctl == NULL); 732 ASSERT(t->t_pollstate == NULL); 733 734 t->t_pri = 0; 735 t->t_pc = 0; 736 t->t_sp = 0; 737 t->t_wchan0 = NULL; 738 t->t_wchan = NULL; 739 if (t->t_cred != NULL) { 740 crfree(t->t_cred); 741 t->t_cred = 0; 742 } 743 if (t->t_pdmsg) { 744 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 745 t->t_pdmsg = NULL; 746 } 747 if (audit_active) 748 audit_thread_free(t); 749 #ifndef NPROBE 750 if (t->t_tnf_tpdp) 751 tnf_thread_free(t); 752 #endif /* NPROBE */ 753 if (t->t_cldata) { 754 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 755 } 756 if (t->t_rprof != NULL) { 757 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 758 t->t_rprof = NULL; 759 } 760 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 761 if (lwp) 762 lwp_freeregs(lwp, 0); 763 if (t->t_ctx) 764 freectx(t, 0); 765 t->t_stk = NULL; 766 if (lwp) 767 lwp_stk_fini(lwp); 768 lock_clear(&t->t_lock); 769 770 if (t->t_ts->ts_waiters > 0) 771 panic("thread_free: turnstile still active"); 772 773 kmem_cache_free(turnstile_cache, t->t_ts); 774 775 free_afd(&t->t_activefd); 776 777 /* 778 * Barrier for the tick accounting code. The tick accounting code 779 * holds this lock to keep the thread from going away while it's 780 * looking at it. 781 */ 782 thread_free_barrier(t); 783 784 ASSERT(ttoproj(t) == proj0p); 785 project_rele(ttoproj(t)); 786 787 lgrp_affinity_free(&t->t_lgrp_affinity); 788 789 mutex_enter(&pidlock); 790 nthread--; 791 mutex_exit(&pidlock); 792 793 /* 794 * Free thread, lwp and stack. This needs to be done carefully, since 795 * if T_TALLOCSTK is set, the thread is part of the stack. 796 */ 797 t->t_lwp = NULL; 798 t->t_swap = NULL; 799 800 if (swap) { 801 segkp_release(segkp, swap); 802 } 803 if (lwp) { 804 kmem_cache_free(lwp_cache, lwp); 805 } 806 if (!allocstk) { 807 kmem_cache_free(thread_cache, t); 808 } 809 } 810 811 /* 812 * Removes threads associated with the given zone from a deathrow queue. 813 * tp is a pointer to the head of the deathrow queue, and countp is a 814 * pointer to the current deathrow count. Returns a linked list of 815 * threads removed from the list. 816 */ 817 static kthread_t * 818 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 819 { 820 kthread_t *tmp, *list = NULL; 821 cred_t *cr; 822 823 ASSERT(MUTEX_HELD(&reaplock)); 824 while (*tp != NULL) { 825 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 826 tmp = *tp; 827 *tp = tmp->t_forw; 828 tmp->t_forw = list; 829 list = tmp; 830 (*countp)--; 831 } else { 832 tp = &(*tp)->t_forw; 833 } 834 } 835 return (list); 836 } 837 838 static void 839 thread_reap_list(kthread_t *t) 840 { 841 kthread_t *next; 842 843 while (t != NULL) { 844 next = t->t_forw; 845 thread_free(t); 846 t = next; 847 } 848 } 849 850 /* ARGSUSED */ 851 static void 852 thread_zone_destroy(zoneid_t zoneid, void *unused) 853 { 854 kthread_t *t, *l; 855 856 mutex_enter(&reaplock); 857 /* 858 * Pull threads and lwps associated with zone off deathrow lists. 859 */ 860 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 861 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 862 mutex_exit(&reaplock); 863 864 /* 865 * Guard against race condition in mutex_owner_running: 866 * thread=owner(mutex) 867 * <interrupt> 868 * thread exits mutex 869 * thread exits 870 * thread reaped 871 * thread struct freed 872 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 873 * A cross call to all cpus will cause the interrupt handler 874 * to reset the PC if it is in mutex_owner_running, refreshing 875 * stale thread pointers. 876 */ 877 mutex_sync(); /* sync with mutex code */ 878 879 /* 880 * Reap threads 881 */ 882 thread_reap_list(t); 883 884 /* 885 * Reap lwps 886 */ 887 thread_reap_list(l); 888 } 889 890 /* 891 * cleanup zombie threads that are on deathrow. 892 */ 893 void 894 thread_reaper() 895 { 896 kthread_t *t, *l; 897 callb_cpr_t cprinfo; 898 899 /* 900 * Register callback to clean up threads when zone is destroyed. 901 */ 902 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 903 904 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 905 for (;;) { 906 mutex_enter(&reaplock); 907 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 908 CALLB_CPR_SAFE_BEGIN(&cprinfo); 909 cv_wait(&reaper_cv, &reaplock); 910 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 911 } 912 /* 913 * mutex_sync() needs to be called when reaping, but 914 * not too often. We limit reaping rate to once 915 * per second. Reaplimit is max rate at which threads can 916 * be freed. Does not impact thread destruction/creation. 917 */ 918 t = thread_deathrow; 919 l = lwp_deathrow; 920 thread_deathrow = NULL; 921 lwp_deathrow = NULL; 922 thread_reapcnt = 0; 923 lwp_reapcnt = 0; 924 mutex_exit(&reaplock); 925 926 /* 927 * Guard against race condition in mutex_owner_running: 928 * thread=owner(mutex) 929 * <interrupt> 930 * thread exits mutex 931 * thread exits 932 * thread reaped 933 * thread struct freed 934 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 935 * A cross call to all cpus will cause the interrupt handler 936 * to reset the PC if it is in mutex_owner_running, refreshing 937 * stale thread pointers. 938 */ 939 mutex_sync(); /* sync with mutex code */ 940 /* 941 * Reap threads 942 */ 943 thread_reap_list(t); 944 945 /* 946 * Reap lwps 947 */ 948 thread_reap_list(l); 949 delay(hz); 950 } 951 } 952 953 /* 954 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto 955 * thread_deathrow. The thread's state is changed already TS_FREE to indicate 956 * that is reapable. The thread already holds the reaplock, and was already 957 * freed. 958 */ 959 void 960 reapq_move_lq_to_tq(kthread_t *t) 961 { 962 ASSERT(t->t_state == TS_FREE); 963 ASSERT(MUTEX_HELD(&reaplock)); 964 t->t_forw = thread_deathrow; 965 thread_deathrow = t; 966 thread_reapcnt++; 967 if (lwp_reapcnt + thread_reapcnt > reaplimit) 968 cv_signal(&reaper_cv); /* wake the reaper */ 969 } 970 971 /* 972 * This is called by resume() to put a zombie thread onto deathrow. 973 * The thread's state is changed to TS_FREE to indicate that is reapable. 974 * This is called from the idle thread so it must not block - just spin. 975 */ 976 void 977 reapq_add(kthread_t *t) 978 { 979 mutex_enter(&reaplock); 980 981 /* 982 * lwp_deathrow contains threads with lwp linkage and 983 * swappable thread stacks which have the default stacksize. 984 * These threads' lwps and stacks may be reused by lwp_create(). 985 * 986 * Anything else goes on thread_deathrow(), where it will eventually 987 * be thread_free()d. 988 */ 989 if (t->t_flag & T_LWPREUSE) { 990 ASSERT(ttolwp(t) != NULL); 991 t->t_forw = lwp_deathrow; 992 lwp_deathrow = t; 993 lwp_reapcnt++; 994 } else { 995 t->t_forw = thread_deathrow; 996 thread_deathrow = t; 997 thread_reapcnt++; 998 } 999 if (lwp_reapcnt + thread_reapcnt > reaplimit) 1000 cv_signal(&reaper_cv); /* wake the reaper */ 1001 t->t_state = TS_FREE; 1002 lock_clear(&t->t_lock); 1003 1004 /* 1005 * Before we return, we need to grab and drop the thread lock for 1006 * the dead thread. At this point, the current thread is the idle 1007 * thread, and the dead thread's CPU lock points to the current 1008 * CPU -- and we must grab and drop the lock to synchronize with 1009 * a racing thread walking a blocking chain that the zombie thread 1010 * was recently in. By this point, that blocking chain is (by 1011 * definition) stale: the dead thread is not holding any locks, and 1012 * is therefore not in any blocking chains -- but if we do not regrab 1013 * our lock before freeing the dead thread's data structures, the 1014 * thread walking the (stale) blocking chain will die on memory 1015 * corruption when it attempts to drop the dead thread's lock. We 1016 * only need do this once because there is no way for the dead thread 1017 * to ever again be on a blocking chain: once we have grabbed and 1018 * dropped the thread lock, we are guaranteed that anyone that could 1019 * have seen this thread in a blocking chain can no longer see it. 1020 */ 1021 thread_lock(t); 1022 thread_unlock(t); 1023 1024 mutex_exit(&reaplock); 1025 } 1026 1027 /* 1028 * Install thread context ops for the current thread. 1029 */ 1030 void 1031 installctx( 1032 kthread_t *t, 1033 void *arg, 1034 void (*save)(void *), 1035 void (*restore)(void *), 1036 void (*fork)(void *, void *), 1037 void (*lwp_create)(void *, void *), 1038 void (*exit)(void *), 1039 void (*free)(void *, int)) 1040 { 1041 struct ctxop *ctx; 1042 1043 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 1044 ctx->save_op = save; 1045 ctx->restore_op = restore; 1046 ctx->fork_op = fork; 1047 ctx->lwp_create_op = lwp_create; 1048 ctx->exit_op = exit; 1049 ctx->free_op = free; 1050 ctx->arg = arg; 1051 ctx->next = t->t_ctx; 1052 t->t_ctx = ctx; 1053 } 1054 1055 /* 1056 * Remove the thread context ops from a thread. 1057 */ 1058 int 1059 removectx( 1060 kthread_t *t, 1061 void *arg, 1062 void (*save)(void *), 1063 void (*restore)(void *), 1064 void (*fork)(void *, void *), 1065 void (*lwp_create)(void *, void *), 1066 void (*exit)(void *), 1067 void (*free)(void *, int)) 1068 { 1069 struct ctxop *ctx, *prev_ctx; 1070 1071 /* 1072 * The incoming kthread_t (which is the thread for which the 1073 * context ops will be removed) should be one of the following: 1074 * 1075 * a) the current thread, 1076 * 1077 * b) a thread of a process that's being forked (SIDL), 1078 * 1079 * c) a thread that belongs to the same process as the current 1080 * thread and for which the current thread is the agent thread, 1081 * 1082 * d) a thread that is TS_STOPPED which is indicative of it 1083 * being (if curthread is not an agent) a thread being created 1084 * as part of an lwp creation. 1085 */ 1086 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 1087 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1088 1089 /* 1090 * Serialize modifications to t->t_ctx to prevent the agent thread 1091 * and the target thread from racing with each other during lwp exit. 1092 */ 1093 mutex_enter(&t->t_ctx_lock); 1094 prev_ctx = NULL; 1095 kpreempt_disable(); 1096 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 1097 if (ctx->save_op == save && ctx->restore_op == restore && 1098 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 1099 ctx->exit_op == exit && ctx->free_op == free && 1100 ctx->arg == arg) { 1101 if (prev_ctx) 1102 prev_ctx->next = ctx->next; 1103 else 1104 t->t_ctx = ctx->next; 1105 mutex_exit(&t->t_ctx_lock); 1106 if (ctx->free_op != NULL) 1107 (ctx->free_op)(ctx->arg, 0); 1108 kmem_free(ctx, sizeof (struct ctxop)); 1109 kpreempt_enable(); 1110 return (1); 1111 } 1112 prev_ctx = ctx; 1113 } 1114 mutex_exit(&t->t_ctx_lock); 1115 kpreempt_enable(); 1116 1117 return (0); 1118 } 1119 1120 void 1121 savectx(kthread_t *t) 1122 { 1123 struct ctxop *ctx; 1124 1125 ASSERT(t == curthread); 1126 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1127 if (ctx->save_op != NULL) 1128 (ctx->save_op)(ctx->arg); 1129 } 1130 1131 void 1132 restorectx(kthread_t *t) 1133 { 1134 struct ctxop *ctx; 1135 1136 ASSERT(t == curthread); 1137 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1138 if (ctx->restore_op != NULL) 1139 (ctx->restore_op)(ctx->arg); 1140 } 1141 1142 void 1143 forkctx(kthread_t *t, kthread_t *ct) 1144 { 1145 struct ctxop *ctx; 1146 1147 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1148 if (ctx->fork_op != NULL) 1149 (ctx->fork_op)(t, ct); 1150 } 1151 1152 /* 1153 * Note that this operator is only invoked via the _lwp_create 1154 * system call. The system may have other reasons to create lwps 1155 * e.g. the agent lwp or the doors unreferenced lwp. 1156 */ 1157 void 1158 lwp_createctx(kthread_t *t, kthread_t *ct) 1159 { 1160 struct ctxop *ctx; 1161 1162 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1163 if (ctx->lwp_create_op != NULL) 1164 (ctx->lwp_create_op)(t, ct); 1165 } 1166 1167 /* 1168 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1169 * needed when the thread/LWP leaves the processor for the last time. This 1170 * routine is not intended to deal with freeing memory; freectx() is used for 1171 * that purpose during thread_free(). This routine is provided to allow for 1172 * clean-up that can't wait until thread_free(). 1173 */ 1174 void 1175 exitctx(kthread_t *t) 1176 { 1177 struct ctxop *ctx; 1178 1179 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1180 if (ctx->exit_op != NULL) 1181 (ctx->exit_op)(t); 1182 } 1183 1184 /* 1185 * freectx is called from thread_free() and exec() to get 1186 * rid of old thread context ops. 1187 */ 1188 void 1189 freectx(kthread_t *t, int isexec) 1190 { 1191 struct ctxop *ctx; 1192 1193 kpreempt_disable(); 1194 while ((ctx = t->t_ctx) != NULL) { 1195 t->t_ctx = ctx->next; 1196 if (ctx->free_op != NULL) 1197 (ctx->free_op)(ctx->arg, isexec); 1198 kmem_free(ctx, sizeof (struct ctxop)); 1199 } 1200 kpreempt_enable(); 1201 } 1202 1203 /* 1204 * freectx_ctx is called from lwp_create() when lwp is reused from 1205 * lwp_deathrow and its thread structure is added to thread_deathrow. 1206 * The thread structure to which this ctx was attached may be already 1207 * freed by the thread reaper so free_op implementations shouldn't rely 1208 * on thread structure to which this ctx was attached still being around. 1209 */ 1210 void 1211 freectx_ctx(struct ctxop *ctx) 1212 { 1213 struct ctxop *nctx; 1214 1215 ASSERT(ctx != NULL); 1216 1217 kpreempt_disable(); 1218 do { 1219 nctx = ctx->next; 1220 if (ctx->free_op != NULL) 1221 (ctx->free_op)(ctx->arg, 0); 1222 kmem_free(ctx, sizeof (struct ctxop)); 1223 } while ((ctx = nctx) != NULL); 1224 kpreempt_enable(); 1225 } 1226 1227 /* 1228 * Set the thread running; arrange for it to be swapped in if necessary. 1229 */ 1230 void 1231 setrun_locked(kthread_t *t) 1232 { 1233 ASSERT(THREAD_LOCK_HELD(t)); 1234 if (t->t_state == TS_SLEEP) { 1235 /* 1236 * Take off sleep queue. 1237 */ 1238 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1239 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1240 /* 1241 * Already on dispatcher queue. 1242 */ 1243 return; 1244 } else if (t->t_state == TS_WAIT) { 1245 waitq_setrun(t); 1246 } else if (t->t_state == TS_STOPPED) { 1247 /* 1248 * All of the sending of SIGCONT (TC_XSTART) and /proc 1249 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1250 * requested that the thread be run. 1251 * Just calling setrun() is not sufficient to set a stopped 1252 * thread running. TP_TXSTART is always set if the thread 1253 * is not stopped by a jobcontrol stop signal. 1254 * TP_TPSTART is always set if /proc is not controlling it. 1255 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1256 * The thread won't be stopped unless one of these 1257 * three mechanisms did it. 1258 * 1259 * These flags must be set before calling setrun_locked(t). 1260 * They can't be passed as arguments because the streams 1261 * code calls setrun() indirectly and the mechanism for 1262 * doing so admits only one argument. Note that the 1263 * thread must be locked in order to change t_schedflags. 1264 */ 1265 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1266 return; 1267 /* 1268 * Process is no longer stopped (a thread is running). 1269 */ 1270 t->t_whystop = 0; 1271 t->t_whatstop = 0; 1272 /* 1273 * Strictly speaking, we do not have to clear these 1274 * flags here; they are cleared on entry to stop(). 1275 * However, they are confusing when doing kernel 1276 * debugging or when they are revealed by ps(1). 1277 */ 1278 t->t_schedflag &= ~TS_ALLSTART; 1279 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1280 ASSERT(t->t_lockp == &transition_lock); 1281 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1282 /* 1283 * Let the class put the process on the dispatcher queue. 1284 */ 1285 CL_SETRUN(t); 1286 } 1287 } 1288 1289 void 1290 setrun(kthread_t *t) 1291 { 1292 thread_lock(t); 1293 setrun_locked(t); 1294 thread_unlock(t); 1295 } 1296 1297 /* 1298 * Unpin an interrupted thread. 1299 * When an interrupt occurs, the interrupt is handled on the stack 1300 * of an interrupt thread, taken from a pool linked to the CPU structure. 1301 * 1302 * When swtch() is switching away from an interrupt thread because it 1303 * blocked or was preempted, this routine is called to complete the 1304 * saving of the interrupted thread state, and returns the interrupted 1305 * thread pointer so it may be resumed. 1306 * 1307 * Called by swtch() only at high spl. 1308 */ 1309 kthread_t * 1310 thread_unpin() 1311 { 1312 kthread_t *t = curthread; /* current thread */ 1313 kthread_t *itp; /* interrupted thread */ 1314 int i; /* interrupt level */ 1315 extern int intr_passivate(); 1316 1317 ASSERT(t->t_intr != NULL); 1318 1319 itp = t->t_intr; /* interrupted thread */ 1320 t->t_intr = NULL; /* clear interrupt ptr */ 1321 1322 /* 1323 * Get state from interrupt thread for the one 1324 * it interrupted. 1325 */ 1326 1327 i = intr_passivate(t, itp); 1328 1329 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1330 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1331 i, t, t, itp, itp); 1332 1333 /* 1334 * Dissociate the current thread from the interrupted thread's LWP. 1335 */ 1336 t->t_lwp = NULL; 1337 1338 /* 1339 * Interrupt handlers above the level that spinlocks block must 1340 * not block. 1341 */ 1342 #if DEBUG 1343 if (i < 0 || i > LOCK_LEVEL) 1344 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1345 #endif 1346 1347 /* 1348 * Compute the CPU's base interrupt level based on the active 1349 * interrupts. 1350 */ 1351 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1352 set_base_spl(); 1353 1354 return (itp); 1355 } 1356 1357 /* 1358 * Create and initialize an interrupt thread. 1359 * Returns non-zero on error. 1360 * Called at spl7() or better. 1361 */ 1362 void 1363 thread_create_intr(struct cpu *cp) 1364 { 1365 kthread_t *tp; 1366 1367 tp = thread_create(NULL, 0, 1368 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1369 1370 /* 1371 * Set the thread in the TS_FREE state. The state will change 1372 * to TS_ONPROC only while the interrupt is active. Think of these 1373 * as being on a private free list for the CPU. Being TS_FREE keeps 1374 * inactive interrupt threads out of debugger thread lists. 1375 * 1376 * We cannot call thread_create with TS_FREE because of the current 1377 * checks there for ONPROC. Fix this when thread_create takes flags. 1378 */ 1379 THREAD_FREEINTR(tp, cp); 1380 1381 /* 1382 * Nobody should ever reference the credentials of an interrupt 1383 * thread so make it NULL to catch any such references. 1384 */ 1385 tp->t_cred = NULL; 1386 tp->t_flag |= T_INTR_THREAD; 1387 tp->t_cpu = cp; 1388 tp->t_bound_cpu = cp; 1389 tp->t_disp_queue = cp->cpu_disp; 1390 tp->t_affinitycnt = 1; 1391 tp->t_preempt = 1; 1392 1393 /* 1394 * Don't make a user-requested binding on this thread so that 1395 * the processor can be offlined. 1396 */ 1397 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1398 tp->t_bind_pset = PS_NONE; 1399 1400 #if defined(__i386) || defined(__amd64) 1401 tp->t_stk -= STACK_ALIGN; 1402 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1403 #endif 1404 1405 /* 1406 * Link onto CPU's interrupt pool. 1407 */ 1408 tp->t_link = cp->cpu_intr_thread; 1409 cp->cpu_intr_thread = tp; 1410 } 1411 1412 /* 1413 * TSD -- THREAD SPECIFIC DATA 1414 */ 1415 static kmutex_t tsd_mutex; /* linked list spin lock */ 1416 static uint_t tsd_nkeys; /* size of destructor array */ 1417 /* per-key destructor funcs */ 1418 static void (**tsd_destructor)(void *); 1419 /* list of tsd_thread's */ 1420 static struct tsd_thread *tsd_list; 1421 1422 /* 1423 * Default destructor 1424 * Needed because NULL destructor means that the key is unused 1425 */ 1426 /* ARGSUSED */ 1427 void 1428 tsd_defaultdestructor(void *value) 1429 {} 1430 1431 /* 1432 * Create a key (index into per thread array) 1433 * Locks out tsd_create, tsd_destroy, and tsd_exit 1434 * May allocate memory with lock held 1435 */ 1436 void 1437 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1438 { 1439 int i; 1440 uint_t nkeys; 1441 1442 /* 1443 * if key is allocated, do nothing 1444 */ 1445 mutex_enter(&tsd_mutex); 1446 if (*keyp) { 1447 mutex_exit(&tsd_mutex); 1448 return; 1449 } 1450 /* 1451 * find an unused key 1452 */ 1453 if (destructor == NULL) 1454 destructor = tsd_defaultdestructor; 1455 1456 for (i = 0; i < tsd_nkeys; ++i) 1457 if (tsd_destructor[i] == NULL) 1458 break; 1459 1460 /* 1461 * if no unused keys, increase the size of the destructor array 1462 */ 1463 if (i == tsd_nkeys) { 1464 if ((nkeys = (tsd_nkeys << 1)) == 0) 1465 nkeys = 1; 1466 tsd_destructor = 1467 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1468 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1469 (size_t)(nkeys * sizeof (void (*)(void *)))); 1470 tsd_nkeys = nkeys; 1471 } 1472 1473 /* 1474 * allocate the next available unused key 1475 */ 1476 tsd_destructor[i] = destructor; 1477 *keyp = i + 1; 1478 mutex_exit(&tsd_mutex); 1479 } 1480 1481 /* 1482 * Destroy a key -- this is for unloadable modules 1483 * 1484 * Assumes that the caller is preventing tsd_set and tsd_get 1485 * Locks out tsd_create, tsd_destroy, and tsd_exit 1486 * May free memory with lock held 1487 */ 1488 void 1489 tsd_destroy(uint_t *keyp) 1490 { 1491 uint_t key; 1492 struct tsd_thread *tsd; 1493 1494 /* 1495 * protect the key namespace and our destructor lists 1496 */ 1497 mutex_enter(&tsd_mutex); 1498 key = *keyp; 1499 *keyp = 0; 1500 1501 ASSERT(key <= tsd_nkeys); 1502 1503 /* 1504 * if the key is valid 1505 */ 1506 if (key != 0) { 1507 uint_t k = key - 1; 1508 /* 1509 * for every thread with TSD, call key's destructor 1510 */ 1511 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1512 /* 1513 * no TSD for key in this thread 1514 */ 1515 if (key > tsd->ts_nkeys) 1516 continue; 1517 /* 1518 * call destructor for key 1519 */ 1520 if (tsd->ts_value[k] && tsd_destructor[k]) 1521 (*tsd_destructor[k])(tsd->ts_value[k]); 1522 /* 1523 * reset value for key 1524 */ 1525 tsd->ts_value[k] = NULL; 1526 } 1527 /* 1528 * actually free the key (NULL destructor == unused) 1529 */ 1530 tsd_destructor[k] = NULL; 1531 } 1532 1533 mutex_exit(&tsd_mutex); 1534 } 1535 1536 /* 1537 * Quickly return the per thread value that was stored with the specified key 1538 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1539 */ 1540 void * 1541 tsd_get(uint_t key) 1542 { 1543 return (tsd_agent_get(curthread, key)); 1544 } 1545 1546 /* 1547 * Set a per thread value indexed with the specified key 1548 */ 1549 int 1550 tsd_set(uint_t key, void *value) 1551 { 1552 return (tsd_agent_set(curthread, key, value)); 1553 } 1554 1555 /* 1556 * Like tsd_get(), except that the agent lwp can get the tsd of 1557 * another thread in the same process (the agent thread only runs when the 1558 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1559 */ 1560 void * 1561 tsd_agent_get(kthread_t *t, uint_t key) 1562 { 1563 struct tsd_thread *tsd = t->t_tsd; 1564 1565 ASSERT(t == curthread || 1566 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1567 1568 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1569 return (tsd->ts_value[key - 1]); 1570 return (NULL); 1571 } 1572 1573 /* 1574 * Like tsd_set(), except that the agent lwp can set the tsd of 1575 * another thread in the same process, or syslwp can set the tsd 1576 * of a thread it's in the middle of creating. 1577 * 1578 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1579 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1580 * lock held 1581 */ 1582 int 1583 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1584 { 1585 struct tsd_thread *tsd = t->t_tsd; 1586 1587 ASSERT(t == curthread || 1588 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1589 1590 if (key == 0) 1591 return (EINVAL); 1592 if (tsd == NULL) 1593 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1594 if (key <= tsd->ts_nkeys) { 1595 tsd->ts_value[key - 1] = value; 1596 return (0); 1597 } 1598 1599 ASSERT(key <= tsd_nkeys); 1600 1601 /* 1602 * lock out tsd_destroy() 1603 */ 1604 mutex_enter(&tsd_mutex); 1605 if (tsd->ts_nkeys == 0) { 1606 /* 1607 * Link onto list of threads with TSD 1608 */ 1609 if ((tsd->ts_next = tsd_list) != NULL) 1610 tsd_list->ts_prev = tsd; 1611 tsd_list = tsd; 1612 } 1613 1614 /* 1615 * Allocate thread local storage and set the value for key 1616 */ 1617 tsd->ts_value = tsd_realloc(tsd->ts_value, 1618 tsd->ts_nkeys * sizeof (void *), 1619 key * sizeof (void *)); 1620 tsd->ts_nkeys = key; 1621 tsd->ts_value[key - 1] = value; 1622 mutex_exit(&tsd_mutex); 1623 1624 return (0); 1625 } 1626 1627 1628 /* 1629 * Return the per thread value that was stored with the specified key 1630 * If necessary, create the key and the value 1631 * Assumes the caller is protecting *keyp from tsd_destroy 1632 */ 1633 void * 1634 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1635 { 1636 void *value; 1637 uint_t key = *keyp; 1638 struct tsd_thread *tsd = curthread->t_tsd; 1639 1640 if (tsd == NULL) 1641 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1642 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1643 return (value); 1644 if (key == 0) 1645 tsd_create(keyp, destroy); 1646 (void) tsd_set(*keyp, value = (*allocate)()); 1647 1648 return (value); 1649 } 1650 1651 /* 1652 * Called from thread_exit() to run the destructor function for each tsd 1653 * Locks out tsd_create and tsd_destroy 1654 * Assumes that the destructor *DOES NOT* use tsd 1655 */ 1656 void 1657 tsd_exit(void) 1658 { 1659 int i; 1660 struct tsd_thread *tsd = curthread->t_tsd; 1661 1662 if (tsd == NULL) 1663 return; 1664 1665 if (tsd->ts_nkeys == 0) { 1666 kmem_free(tsd, sizeof (*tsd)); 1667 curthread->t_tsd = NULL; 1668 return; 1669 } 1670 1671 /* 1672 * lock out tsd_create and tsd_destroy, call 1673 * the destructor, and mark the value as destroyed. 1674 */ 1675 mutex_enter(&tsd_mutex); 1676 1677 for (i = 0; i < tsd->ts_nkeys; i++) { 1678 if (tsd->ts_value[i] && tsd_destructor[i]) 1679 (*tsd_destructor[i])(tsd->ts_value[i]); 1680 tsd->ts_value[i] = NULL; 1681 } 1682 1683 /* 1684 * remove from linked list of threads with TSD 1685 */ 1686 if (tsd->ts_next) 1687 tsd->ts_next->ts_prev = tsd->ts_prev; 1688 if (tsd->ts_prev) 1689 tsd->ts_prev->ts_next = tsd->ts_next; 1690 if (tsd_list == tsd) 1691 tsd_list = tsd->ts_next; 1692 1693 mutex_exit(&tsd_mutex); 1694 1695 /* 1696 * free up the TSD 1697 */ 1698 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1699 kmem_free(tsd, sizeof (struct tsd_thread)); 1700 curthread->t_tsd = NULL; 1701 } 1702 1703 /* 1704 * realloc 1705 */ 1706 static void * 1707 tsd_realloc(void *old, size_t osize, size_t nsize) 1708 { 1709 void *new; 1710 1711 new = kmem_zalloc(nsize, KM_SLEEP); 1712 if (old) { 1713 bcopy(old, new, osize); 1714 kmem_free(old, osize); 1715 } 1716 return (new); 1717 } 1718 1719 /* 1720 * Return non-zero if an interrupt is being serviced. 1721 */ 1722 int 1723 servicing_interrupt() 1724 { 1725 int onintr = 0; 1726 1727 /* Are we an interrupt thread */ 1728 if (curthread->t_flag & T_INTR_THREAD) 1729 return (1); 1730 /* Are we servicing a high level interrupt? */ 1731 if (CPU_ON_INTR(CPU)) { 1732 kpreempt_disable(); 1733 onintr = CPU_ON_INTR(CPU); 1734 kpreempt_enable(); 1735 } 1736 return (onintr); 1737 } 1738 1739 1740 /* 1741 * Change the dispatch priority of a thread in the system. 1742 * Used when raising or lowering a thread's priority. 1743 * (E.g., priority inheritance) 1744 * 1745 * Since threads are queued according to their priority, we 1746 * we must check the thread's state to determine whether it 1747 * is on a queue somewhere. If it is, we've got to: 1748 * 1749 * o Dequeue the thread. 1750 * o Change its effective priority. 1751 * o Enqueue the thread. 1752 * 1753 * Assumptions: The thread whose priority we wish to change 1754 * must be locked before we call thread_change_(e)pri(). 1755 * The thread_change(e)pri() function doesn't drop the thread 1756 * lock--that must be done by its caller. 1757 */ 1758 void 1759 thread_change_epri(kthread_t *t, pri_t disp_pri) 1760 { 1761 uint_t state; 1762 1763 ASSERT(THREAD_LOCK_HELD(t)); 1764 1765 /* 1766 * If the inherited priority hasn't actually changed, 1767 * just return. 1768 */ 1769 if (t->t_epri == disp_pri) 1770 return; 1771 1772 state = t->t_state; 1773 1774 /* 1775 * If it's not on a queue, change the priority with impunity. 1776 */ 1777 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1778 t->t_epri = disp_pri; 1779 if (state == TS_ONPROC) { 1780 cpu_t *cp = t->t_disp_queue->disp_cpu; 1781 1782 if (t == cp->cpu_dispthread) 1783 cp->cpu_dispatch_pri = DISP_PRIO(t); 1784 } 1785 } else if (state == TS_SLEEP) { 1786 /* 1787 * Take the thread out of its sleep queue. 1788 * Change the inherited priority. 1789 * Re-enqueue the thread. 1790 * Each synchronization object exports a function 1791 * to do this in an appropriate manner. 1792 */ 1793 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1794 } else if (state == TS_WAIT) { 1795 /* 1796 * Re-enqueue a thread on the wait queue if its 1797 * effective priority needs to change. 1798 */ 1799 if (disp_pri != t->t_epri) 1800 waitq_change_pri(t, disp_pri); 1801 } else { 1802 /* 1803 * The thread is on a run queue. 1804 * Note: setbackdq() may not put the thread 1805 * back on the same run queue where it originally 1806 * resided. 1807 */ 1808 (void) dispdeq(t); 1809 t->t_epri = disp_pri; 1810 setbackdq(t); 1811 } 1812 schedctl_set_cidpri(t); 1813 } 1814 1815 /* 1816 * Function: Change the t_pri field of a thread. 1817 * Side Effects: Adjust the thread ordering on a run queue 1818 * or sleep queue, if necessary. 1819 * Returns: 1 if the thread was on a run queue, else 0. 1820 */ 1821 int 1822 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1823 { 1824 uint_t state; 1825 int on_rq = 0; 1826 1827 ASSERT(THREAD_LOCK_HELD(t)); 1828 1829 state = t->t_state; 1830 THREAD_WILLCHANGE_PRI(t, disp_pri); 1831 1832 /* 1833 * If it's not on a queue, change the priority with impunity. 1834 */ 1835 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1836 t->t_pri = disp_pri; 1837 1838 if (state == TS_ONPROC) { 1839 cpu_t *cp = t->t_disp_queue->disp_cpu; 1840 1841 if (t == cp->cpu_dispthread) 1842 cp->cpu_dispatch_pri = DISP_PRIO(t); 1843 } 1844 } else if (state == TS_SLEEP) { 1845 /* 1846 * If the priority has changed, take the thread out of 1847 * its sleep queue and change the priority. 1848 * Re-enqueue the thread. 1849 * Each synchronization object exports a function 1850 * to do this in an appropriate manner. 1851 */ 1852 if (disp_pri != t->t_pri) 1853 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1854 } else if (state == TS_WAIT) { 1855 /* 1856 * Re-enqueue a thread on the wait queue if its 1857 * priority needs to change. 1858 */ 1859 if (disp_pri != t->t_pri) 1860 waitq_change_pri(t, disp_pri); 1861 } else { 1862 /* 1863 * The thread is on a run queue. 1864 * Note: setbackdq() may not put the thread 1865 * back on the same run queue where it originally 1866 * resided. 1867 * 1868 * We still requeue the thread even if the priority 1869 * is unchanged to preserve round-robin (and other) 1870 * effects between threads of the same priority. 1871 */ 1872 on_rq = dispdeq(t); 1873 ASSERT(on_rq); 1874 t->t_pri = disp_pri; 1875 if (front) { 1876 setfrontdq(t); 1877 } else { 1878 setbackdq(t); 1879 } 1880 } 1881 schedctl_set_cidpri(t); 1882 return (on_rq); 1883 } 1884 1885 /* 1886 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a 1887 * specific pattern. 1888 */ 1889 static void 1890 stkinfo_begin(kthread_t *t) 1891 { 1892 caddr_t start; /* stack start */ 1893 caddr_t end; /* stack end */ 1894 uint64_t *ptr; /* pattern pointer */ 1895 1896 /* 1897 * Stack grows up or down, see thread_create(), 1898 * compute stack memory area start and end (start < end). 1899 */ 1900 if (t->t_stk > t->t_stkbase) { 1901 /* stack grows down */ 1902 start = t->t_stkbase; 1903 end = t->t_stk; 1904 } else { 1905 /* stack grows up */ 1906 start = t->t_stk; 1907 end = t->t_stkbase; 1908 } 1909 1910 /* 1911 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1912 * alignement for start and end in stack area boundaries 1913 * (protection against corrupt t_stkbase/t_stk data). 1914 */ 1915 if ((((uintptr_t)start) & 0x7) != 0) { 1916 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1917 } 1918 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 1919 1920 if ((end <= start) || (end - start) > (1024 * 1024)) { 1921 /* negative or stack size > 1 meg, assume bogus */ 1922 return; 1923 } 1924 1925 /* fill stack area with a pattern (instead of zeros) */ 1926 ptr = (uint64_t *)((void *)start); 1927 while (ptr < (uint64_t *)((void *)end)) { 1928 *ptr++ = KMEM_STKINFO_PATTERN; 1929 } 1930 } 1931 1932 1933 /* 1934 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist, 1935 * compute the percentage of kernel stack really used, and set in the log 1936 * if it's the latest highest percentage. 1937 */ 1938 static void 1939 stkinfo_end(kthread_t *t) 1940 { 1941 caddr_t start; /* stack start */ 1942 caddr_t end; /* stack end */ 1943 uint64_t *ptr; /* pattern pointer */ 1944 size_t stksz; /* stack size */ 1945 size_t smallest = 0; 1946 size_t percent = 0; 1947 uint_t index = 0; 1948 uint_t i; 1949 static size_t smallest_percent = (size_t)-1; 1950 static uint_t full = 0; 1951 1952 /* create the stackinfo log, if doesn't already exist */ 1953 mutex_enter(&kmem_stkinfo_lock); 1954 if (kmem_stkinfo_log == NULL) { 1955 kmem_stkinfo_log = (kmem_stkinfo_t *) 1956 kmem_zalloc(KMEM_STKINFO_LOG_SIZE * 1957 (sizeof (kmem_stkinfo_t)), KM_NOSLEEP); 1958 if (kmem_stkinfo_log == NULL) { 1959 mutex_exit(&kmem_stkinfo_lock); 1960 return; 1961 } 1962 } 1963 mutex_exit(&kmem_stkinfo_lock); 1964 1965 /* 1966 * Stack grows up or down, see thread_create(), 1967 * compute stack memory area start and end (start < end). 1968 */ 1969 if (t->t_stk > t->t_stkbase) { 1970 /* stack grows down */ 1971 start = t->t_stkbase; 1972 end = t->t_stk; 1973 } else { 1974 /* stack grows up */ 1975 start = t->t_stk; 1976 end = t->t_stkbase; 1977 } 1978 1979 /* stack size as found in kthread_t */ 1980 stksz = end - start; 1981 1982 /* 1983 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1984 * alignement for start and end in stack area boundaries 1985 * (protection against corrupt t_stkbase/t_stk data). 1986 */ 1987 if ((((uintptr_t)start) & 0x7) != 0) { 1988 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1989 } 1990 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 1991 1992 if ((end <= start) || (end - start) > (1024 * 1024)) { 1993 /* negative or stack size > 1 meg, assume bogus */ 1994 return; 1995 } 1996 1997 /* search until no pattern in the stack */ 1998 if (t->t_stk > t->t_stkbase) { 1999 /* stack grows down */ 2000 #if defined(__i386) || defined(__amd64) 2001 /* 2002 * 6 longs are pushed on stack, see thread_load(). Skip 2003 * them, so if kthread has never run, percent is zero. 2004 * 8 bytes alignement is preserved for a 32 bit kernel, 2005 * 6 x 4 = 24, 24 is a multiple of 8. 2006 * 2007 */ 2008 end -= (6 * sizeof (long)); 2009 #endif 2010 ptr = (uint64_t *)((void *)start); 2011 while (ptr < (uint64_t *)((void *)end)) { 2012 if (*ptr != KMEM_STKINFO_PATTERN) { 2013 percent = stkinfo_percent(end, 2014 start, (caddr_t)ptr); 2015 break; 2016 } 2017 ptr++; 2018 } 2019 } else { 2020 /* stack grows up */ 2021 ptr = (uint64_t *)((void *)end); 2022 ptr--; 2023 while (ptr >= (uint64_t *)((void *)start)) { 2024 if (*ptr != KMEM_STKINFO_PATTERN) { 2025 percent = stkinfo_percent(start, 2026 end, (caddr_t)ptr); 2027 break; 2028 } 2029 ptr--; 2030 } 2031 } 2032 2033 DTRACE_PROBE3(stack__usage, kthread_t *, t, 2034 size_t, stksz, size_t, percent); 2035 2036 if (percent == 0) { 2037 return; 2038 } 2039 2040 mutex_enter(&kmem_stkinfo_lock); 2041 if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) { 2042 /* 2043 * The log is full and already contains the highest values 2044 */ 2045 mutex_exit(&kmem_stkinfo_lock); 2046 return; 2047 } 2048 2049 /* keep a log of the highest used stack */ 2050 for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) { 2051 if (kmem_stkinfo_log[i].percent == 0) { 2052 index = i; 2053 full++; 2054 break; 2055 } 2056 if (smallest == 0) { 2057 smallest = kmem_stkinfo_log[i].percent; 2058 index = i; 2059 continue; 2060 } 2061 if (kmem_stkinfo_log[i].percent < smallest) { 2062 smallest = kmem_stkinfo_log[i].percent; 2063 index = i; 2064 } 2065 } 2066 2067 if (percent >= kmem_stkinfo_log[index].percent) { 2068 kmem_stkinfo_log[index].kthread = (caddr_t)t; 2069 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc; 2070 kmem_stkinfo_log[index].start = start; 2071 kmem_stkinfo_log[index].stksz = stksz; 2072 kmem_stkinfo_log[index].percent = percent; 2073 kmem_stkinfo_log[index].t_tid = t->t_tid; 2074 kmem_stkinfo_log[index].cmd[0] = '\0'; 2075 if (t->t_tid != 0) { 2076 stksz = strlen((t->t_procp)->p_user.u_comm); 2077 if (stksz >= KMEM_STKINFO_STR_SIZE) { 2078 stksz = KMEM_STKINFO_STR_SIZE - 1; 2079 kmem_stkinfo_log[index].cmd[stksz] = '\0'; 2080 } else { 2081 stksz += 1; 2082 } 2083 (void) memcpy(kmem_stkinfo_log[index].cmd, 2084 (t->t_procp)->p_user.u_comm, stksz); 2085 } 2086 if (percent < smallest_percent) { 2087 smallest_percent = percent; 2088 } 2089 } 2090 mutex_exit(&kmem_stkinfo_lock); 2091 } 2092 2093 /* 2094 * Tunable kmem_stackinfo is set, compute stack utilization percentage. 2095 */ 2096 static size_t 2097 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp) 2098 { 2099 size_t percent; 2100 size_t s; 2101 2102 if (t_stk > t_stkbase) { 2103 /* stack grows down */ 2104 if (sp > t_stk) { 2105 return (0); 2106 } 2107 if (sp < t_stkbase) { 2108 return (100); 2109 } 2110 percent = t_stk - sp + 1; 2111 s = t_stk - t_stkbase + 1; 2112 } else { 2113 /* stack grows up */ 2114 if (sp < t_stk) { 2115 return (0); 2116 } 2117 if (sp > t_stkbase) { 2118 return (100); 2119 } 2120 percent = sp - t_stk + 1; 2121 s = t_stkbase - t_stk + 1; 2122 } 2123 percent = ((100 * percent) / s) + 1; 2124 if (percent > 100) { 2125 percent = 100; 2126 } 2127 return (percent); 2128 }