1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26 /* 27 * Architecture-independent CPU control functions. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/var.h> 33 #include <sys/thread.h> 34 #include <sys/cpuvar.h> 35 #include <sys/cpu_event.h> 36 #include <sys/kstat.h> 37 #include <sys/uadmin.h> 38 #include <sys/systm.h> 39 #include <sys/errno.h> 40 #include <sys/cmn_err.h> 41 #include <sys/procset.h> 42 #include <sys/processor.h> 43 #include <sys/debug.h> 44 #include <sys/cpupart.h> 45 #include <sys/lgrp.h> 46 #include <sys/pset.h> 47 #include <sys/pghw.h> 48 #include <sys/kmem.h> 49 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */ 50 #include <sys/atomic.h> 51 #include <sys/callb.h> 52 #include <sys/vtrace.h> 53 #include <sys/cyclic.h> 54 #include <sys/bitmap.h> 55 #include <sys/nvpair.h> 56 #include <sys/pool_pset.h> 57 #include <sys/msacct.h> 58 #include <sys/time.h> 59 #include <sys/archsystm.h> 60 #include <sys/sdt.h> 61 #if defined(__x86) || defined(__amd64) 62 #include <sys/x86_archext.h> 63 #endif 64 #include <sys/callo.h> 65 66 extern int mp_cpu_start(cpu_t *); 67 extern int mp_cpu_stop(cpu_t *); 68 extern int mp_cpu_poweron(cpu_t *); 69 extern int mp_cpu_poweroff(cpu_t *); 70 extern int mp_cpu_configure(int); 71 extern int mp_cpu_unconfigure(int); 72 extern void mp_cpu_faulted_enter(cpu_t *); 73 extern void mp_cpu_faulted_exit(cpu_t *); 74 75 extern int cmp_cpu_to_chip(processorid_t cpuid); 76 #ifdef __sparcv9 77 extern char *cpu_fru_fmri(cpu_t *cp); 78 #endif 79 80 static void cpu_add_active_internal(cpu_t *cp); 81 static void cpu_remove_active(cpu_t *cp); 82 static void cpu_info_kstat_create(cpu_t *cp); 83 static void cpu_info_kstat_destroy(cpu_t *cp); 84 static void cpu_stats_kstat_create(cpu_t *cp); 85 static void cpu_stats_kstat_destroy(cpu_t *cp); 86 87 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw); 88 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw); 89 static int cpu_stat_ks_update(kstat_t *ksp, int rw); 90 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t); 91 92 /* 93 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active, 94 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with 95 * respect to related locks is: 96 * 97 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock() 98 * 99 * Warning: Certain sections of code do not use the cpu_lock when 100 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since 101 * all cpus are paused during modifications to this list, a solution 102 * to protect the list is too either disable kernel preemption while 103 * walking the list, *or* recheck the cpu_next pointer at each 104 * iteration in the loop. Note that in no cases can any cached 105 * copies of the cpu pointers be kept as they may become invalid. 106 */ 107 kmutex_t cpu_lock; 108 cpu_t *cpu_list; /* list of all CPUs */ 109 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ 110 cpu_t *cpu_active; /* list of active CPUs */ 111 static cpuset_t cpu_available; /* set of available CPUs */ 112 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ 113 114 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */ 115 116 /* 117 * max_ncpus keeps the max cpus the system can have. Initially 118 * it's NCPU, but since most archs scan the devtree for cpus 119 * fairly early on during boot, the real max can be known before 120 * ncpus is set (useful for early NCPU based allocations). 121 */ 122 int max_ncpus = NCPU; 123 /* 124 * platforms that set max_ncpus to maxiumum number of cpus that can be 125 * dynamically added will set boot_max_ncpus to the number of cpus found 126 * at device tree scan time during boot. 127 */ 128 int boot_max_ncpus = -1; 129 int boot_ncpus = -1; 130 /* 131 * Maximum possible CPU id. This can never be >= NCPU since NCPU is 132 * used to size arrays that are indexed by CPU id. 133 */ 134 processorid_t max_cpuid = NCPU - 1; 135 136 /* 137 * Maximum cpu_seqid was given. This number can only grow and never shrink. It 138 * can be used to optimize NCPU loops to avoid going through CPUs which were 139 * never on-line. 140 */ 141 processorid_t max_cpu_seqid_ever = 0; 142 143 int ncpus = 1; 144 int ncpus_online = 1; 145 146 /* 147 * CPU that we're trying to offline. Protected by cpu_lock. 148 */ 149 cpu_t *cpu_inmotion; 150 151 /* 152 * Can be raised to suppress further weakbinding, which are instead 153 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock, 154 * while individual thread weakbinding synchronization is done under thread 155 * lock. 156 */ 157 int weakbindingbarrier; 158 159 /* 160 * Variables used in pause_cpus(). 161 */ 162 static volatile char safe_list[NCPU]; 163 164 static struct _cpu_pause_info { 165 int cp_spl; /* spl saved in pause_cpus() */ 166 volatile int cp_go; /* Go signal sent after all ready */ 167 int cp_count; /* # of CPUs to pause */ 168 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */ 169 kthread_id_t cp_paused; 170 } cpu_pause_info; 171 172 static kmutex_t pause_free_mutex; 173 static kcondvar_t pause_free_cv; 174 175 void *(*cpu_pause_func)(void *) = NULL; 176 177 178 static struct cpu_sys_stats_ks_data { 179 kstat_named_t cpu_ticks_idle; 180 kstat_named_t cpu_ticks_user; 181 kstat_named_t cpu_ticks_kernel; 182 kstat_named_t cpu_ticks_wait; 183 kstat_named_t cpu_nsec_idle; 184 kstat_named_t cpu_nsec_user; 185 kstat_named_t cpu_nsec_kernel; 186 kstat_named_t cpu_nsec_dtrace; 187 kstat_named_t cpu_nsec_intr; 188 kstat_named_t cpu_load_intr; 189 kstat_named_t wait_ticks_io; 190 kstat_named_t dtrace_probes; 191 kstat_named_t bread; 192 kstat_named_t bwrite; 193 kstat_named_t lread; 194 kstat_named_t lwrite; 195 kstat_named_t phread; 196 kstat_named_t phwrite; 197 kstat_named_t pswitch; 198 kstat_named_t trap; 199 kstat_named_t intr; 200 kstat_named_t syscall; 201 kstat_named_t sysread; 202 kstat_named_t syswrite; 203 kstat_named_t sysfork; 204 kstat_named_t sysvfork; 205 kstat_named_t sysexec; 206 kstat_named_t readch; 207 kstat_named_t writech; 208 kstat_named_t rcvint; 209 kstat_named_t xmtint; 210 kstat_named_t mdmint; 211 kstat_named_t rawch; 212 kstat_named_t canch; 213 kstat_named_t outch; 214 kstat_named_t msg; 215 kstat_named_t sema; 216 kstat_named_t namei; 217 kstat_named_t ufsiget; 218 kstat_named_t ufsdirblk; 219 kstat_named_t ufsipage; 220 kstat_named_t ufsinopage; 221 kstat_named_t procovf; 222 kstat_named_t intrthread; 223 kstat_named_t intrblk; 224 kstat_named_t intrunpin; 225 kstat_named_t idlethread; 226 kstat_named_t inv_swtch; 227 kstat_named_t nthreads; 228 kstat_named_t cpumigrate; 229 kstat_named_t xcalls; 230 kstat_named_t mutex_adenters; 231 kstat_named_t rw_rdfails; 232 kstat_named_t rw_wrfails; 233 kstat_named_t modload; 234 kstat_named_t modunload; 235 kstat_named_t bawrite; 236 kstat_named_t iowait; 237 } cpu_sys_stats_ks_data_template = { 238 { "cpu_ticks_idle", KSTAT_DATA_UINT64 }, 239 { "cpu_ticks_user", KSTAT_DATA_UINT64 }, 240 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 }, 241 { "cpu_ticks_wait", KSTAT_DATA_UINT64 }, 242 { "cpu_nsec_idle", KSTAT_DATA_UINT64 }, 243 { "cpu_nsec_user", KSTAT_DATA_UINT64 }, 244 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 }, 245 { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 }, 246 { "cpu_nsec_intr", KSTAT_DATA_UINT64 }, 247 { "cpu_load_intr", KSTAT_DATA_UINT64 }, 248 { "wait_ticks_io", KSTAT_DATA_UINT64 }, 249 { "dtrace_probes", KSTAT_DATA_UINT64 }, 250 { "bread", KSTAT_DATA_UINT64 }, 251 { "bwrite", KSTAT_DATA_UINT64 }, 252 { "lread", KSTAT_DATA_UINT64 }, 253 { "lwrite", KSTAT_DATA_UINT64 }, 254 { "phread", KSTAT_DATA_UINT64 }, 255 { "phwrite", KSTAT_DATA_UINT64 }, 256 { "pswitch", KSTAT_DATA_UINT64 }, 257 { "trap", KSTAT_DATA_UINT64 }, 258 { "intr", KSTAT_DATA_UINT64 }, 259 { "syscall", KSTAT_DATA_UINT64 }, 260 { "sysread", KSTAT_DATA_UINT64 }, 261 { "syswrite", KSTAT_DATA_UINT64 }, 262 { "sysfork", KSTAT_DATA_UINT64 }, 263 { "sysvfork", KSTAT_DATA_UINT64 }, 264 { "sysexec", KSTAT_DATA_UINT64 }, 265 { "readch", KSTAT_DATA_UINT64 }, 266 { "writech", KSTAT_DATA_UINT64 }, 267 { "rcvint", KSTAT_DATA_UINT64 }, 268 { "xmtint", KSTAT_DATA_UINT64 }, 269 { "mdmint", KSTAT_DATA_UINT64 }, 270 { "rawch", KSTAT_DATA_UINT64 }, 271 { "canch", KSTAT_DATA_UINT64 }, 272 { "outch", KSTAT_DATA_UINT64 }, 273 { "msg", KSTAT_DATA_UINT64 }, 274 { "sema", KSTAT_DATA_UINT64 }, 275 { "namei", KSTAT_DATA_UINT64 }, 276 { "ufsiget", KSTAT_DATA_UINT64 }, 277 { "ufsdirblk", KSTAT_DATA_UINT64 }, 278 { "ufsipage", KSTAT_DATA_UINT64 }, 279 { "ufsinopage", KSTAT_DATA_UINT64 }, 280 { "procovf", KSTAT_DATA_UINT64 }, 281 { "intrthread", KSTAT_DATA_UINT64 }, 282 { "intrblk", KSTAT_DATA_UINT64 }, 283 { "intrunpin", KSTAT_DATA_UINT64 }, 284 { "idlethread", KSTAT_DATA_UINT64 }, 285 { "inv_swtch", KSTAT_DATA_UINT64 }, 286 { "nthreads", KSTAT_DATA_UINT64 }, 287 { "cpumigrate", KSTAT_DATA_UINT64 }, 288 { "xcalls", KSTAT_DATA_UINT64 }, 289 { "mutex_adenters", KSTAT_DATA_UINT64 }, 290 { "rw_rdfails", KSTAT_DATA_UINT64 }, 291 { "rw_wrfails", KSTAT_DATA_UINT64 }, 292 { "modload", KSTAT_DATA_UINT64 }, 293 { "modunload", KSTAT_DATA_UINT64 }, 294 { "bawrite", KSTAT_DATA_UINT64 }, 295 { "iowait", KSTAT_DATA_UINT64 }, 296 }; 297 298 static struct cpu_vm_stats_ks_data { 299 kstat_named_t pgrec; 300 kstat_named_t pgfrec; 301 kstat_named_t pgin; 302 kstat_named_t pgpgin; 303 kstat_named_t pgout; 304 kstat_named_t pgpgout; 305 kstat_named_t zfod; 306 kstat_named_t dfree; 307 kstat_named_t scan; 308 kstat_named_t rev; 309 kstat_named_t hat_fault; 310 kstat_named_t as_fault; 311 kstat_named_t maj_fault; 312 kstat_named_t cow_fault; 313 kstat_named_t prot_fault; 314 kstat_named_t softlock; 315 kstat_named_t kernel_asflt; 316 kstat_named_t pgrrun; 317 kstat_named_t execpgin; 318 kstat_named_t execpgout; 319 kstat_named_t execfree; 320 kstat_named_t anonpgin; 321 kstat_named_t anonpgout; 322 kstat_named_t anonfree; 323 kstat_named_t fspgin; 324 kstat_named_t fspgout; 325 kstat_named_t fsfree; 326 } cpu_vm_stats_ks_data_template = { 327 { "pgrec", KSTAT_DATA_UINT64 }, 328 { "pgfrec", KSTAT_DATA_UINT64 }, 329 { "pgin", KSTAT_DATA_UINT64 }, 330 { "pgpgin", KSTAT_DATA_UINT64 }, 331 { "pgout", KSTAT_DATA_UINT64 }, 332 { "pgpgout", KSTAT_DATA_UINT64 }, 333 { "zfod", KSTAT_DATA_UINT64 }, 334 { "dfree", KSTAT_DATA_UINT64 }, 335 { "scan", KSTAT_DATA_UINT64 }, 336 { "rev", KSTAT_DATA_UINT64 }, 337 { "hat_fault", KSTAT_DATA_UINT64 }, 338 { "as_fault", KSTAT_DATA_UINT64 }, 339 { "maj_fault", KSTAT_DATA_UINT64 }, 340 { "cow_fault", KSTAT_DATA_UINT64 }, 341 { "prot_fault", KSTAT_DATA_UINT64 }, 342 { "softlock", KSTAT_DATA_UINT64 }, 343 { "kernel_asflt", KSTAT_DATA_UINT64 }, 344 { "pgrrun", KSTAT_DATA_UINT64 }, 345 { "execpgin", KSTAT_DATA_UINT64 }, 346 { "execpgout", KSTAT_DATA_UINT64 }, 347 { "execfree", KSTAT_DATA_UINT64 }, 348 { "anonpgin", KSTAT_DATA_UINT64 }, 349 { "anonpgout", KSTAT_DATA_UINT64 }, 350 { "anonfree", KSTAT_DATA_UINT64 }, 351 { "fspgin", KSTAT_DATA_UINT64 }, 352 { "fspgout", KSTAT_DATA_UINT64 }, 353 { "fsfree", KSTAT_DATA_UINT64 }, 354 }; 355 356 /* 357 * Force the specified thread to migrate to the appropriate processor. 358 * Called with thread lock held, returns with it dropped. 359 */ 360 static void 361 force_thread_migrate(kthread_id_t tp) 362 { 363 ASSERT(THREAD_LOCK_HELD(tp)); 364 if (tp == curthread) { 365 THREAD_TRANSITION(tp); 366 CL_SETRUN(tp); 367 thread_unlock_nopreempt(tp); 368 swtch(); 369 } else { 370 if (tp->t_state == TS_ONPROC) { 371 cpu_surrender(tp); 372 } else if (tp->t_state == TS_RUN) { 373 (void) dispdeq(tp); 374 setbackdq(tp); 375 } 376 thread_unlock(tp); 377 } 378 } 379 380 /* 381 * Set affinity for a specified CPU. 382 * A reference count is incremented and the affinity is held until the 383 * reference count is decremented to zero by thread_affinity_clear(). 384 * This is so regions of code requiring affinity can be nested. 385 * Caller needs to ensure that cpu_id remains valid, which can be 386 * done by holding cpu_lock across this call, unless the caller 387 * specifies CPU_CURRENT in which case the cpu_lock will be acquired 388 * by thread_affinity_set and CPU->cpu_id will be the target CPU. 389 */ 390 void 391 thread_affinity_set(kthread_id_t t, int cpu_id) 392 { 393 cpu_t *cp; 394 int c; 395 396 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); 397 398 if ((c = cpu_id) == CPU_CURRENT) { 399 mutex_enter(&cpu_lock); 400 cpu_id = CPU->cpu_id; 401 } 402 /* 403 * We should be asserting that cpu_lock is held here, but 404 * the NCA code doesn't acquire it. The following assert 405 * should be uncommented when the NCA code is fixed. 406 * 407 * ASSERT(MUTEX_HELD(&cpu_lock)); 408 */ 409 ASSERT((cpu_id >= 0) && (cpu_id < NCPU)); 410 cp = cpu[cpu_id]; 411 ASSERT(cp != NULL); /* user must provide a good cpu_id */ 412 /* 413 * If there is already a hard affinity requested, and this affinity 414 * conflicts with that, panic. 415 */ 416 thread_lock(t); 417 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) { 418 panic("affinity_set: setting %p but already bound to %p", 419 (void *)cp, (void *)t->t_bound_cpu); 420 } 421 t->t_affinitycnt++; 422 t->t_bound_cpu = cp; 423 424 /* 425 * Make sure we're running on the right CPU. 426 */ 427 if (cp != t->t_cpu || t != curthread) { 428 force_thread_migrate(t); /* drops thread lock */ 429 } else { 430 thread_unlock(t); 431 } 432 433 if (c == CPU_CURRENT) 434 mutex_exit(&cpu_lock); 435 } 436 437 /* 438 * Wrapper for backward compatibility. 439 */ 440 void 441 affinity_set(int cpu_id) 442 { 443 thread_affinity_set(curthread, cpu_id); 444 } 445 446 /* 447 * Decrement the affinity reservation count and if it becomes zero, 448 * clear the CPU affinity for the current thread, or set it to the user's 449 * software binding request. 450 */ 451 void 452 thread_affinity_clear(kthread_id_t t) 453 { 454 register processorid_t binding; 455 456 thread_lock(t); 457 if (--t->t_affinitycnt == 0) { 458 if ((binding = t->t_bind_cpu) == PBIND_NONE) { 459 /* 460 * Adjust disp_max_unbound_pri if necessary. 461 */ 462 disp_adjust_unbound_pri(t); 463 t->t_bound_cpu = NULL; 464 if (t->t_cpu->cpu_part != t->t_cpupart) { 465 force_thread_migrate(t); 466 return; 467 } 468 } else { 469 t->t_bound_cpu = cpu[binding]; 470 /* 471 * Make sure the thread is running on the bound CPU. 472 */ 473 if (t->t_cpu != t->t_bound_cpu) { 474 force_thread_migrate(t); 475 return; /* already dropped lock */ 476 } 477 } 478 } 479 thread_unlock(t); 480 } 481 482 /* 483 * Wrapper for backward compatibility. 484 */ 485 void 486 affinity_clear(void) 487 { 488 thread_affinity_clear(curthread); 489 } 490 491 /* 492 * Weak cpu affinity. Bind to the "current" cpu for short periods 493 * of time during which the thread must not block (but may be preempted). 494 * Use this instead of kpreempt_disable() when it is only "no migration" 495 * rather than "no preemption" semantics that are required - disabling 496 * preemption holds higher priority threads off of cpu and if the 497 * operation that is protected is more than momentary this is not good 498 * for realtime etc. 499 * 500 * Weakly bound threads will not prevent a cpu from being offlined - 501 * we'll only run them on the cpu to which they are weakly bound but 502 * (because they do not block) we'll always be able to move them on to 503 * another cpu at offline time if we give them just a short moment to 504 * run during which they will unbind. To give a cpu a chance of offlining, 505 * however, we require a barrier to weak bindings that may be raised for a 506 * given cpu (offline/move code may set this and then wait a short time for 507 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier. 508 * 509 * There are few restrictions on the calling context of thread_nomigrate. 510 * The caller must not hold the thread lock. Calls may be nested. 511 * 512 * After weakbinding a thread must not perform actions that may block. 513 * In particular it must not call thread_affinity_set; calling that when 514 * already weakbound is nonsensical anyway. 515 * 516 * If curthread is prevented from migrating for other reasons 517 * (kernel preemption disabled; high pil; strongly bound; interrupt thread) 518 * then the weak binding will succeed even if this cpu is the target of an 519 * offline/move request. 520 */ 521 void 522 thread_nomigrate(void) 523 { 524 cpu_t *cp; 525 kthread_id_t t = curthread; 526 527 again: 528 kpreempt_disable(); 529 cp = CPU; 530 531 /* 532 * A highlevel interrupt must not modify t_nomigrate or 533 * t_weakbound_cpu of the thread it has interrupted. A lowlevel 534 * interrupt thread cannot migrate and we can avoid the 535 * thread_lock call below by short-circuiting here. In either 536 * case we can just return since no migration is possible and 537 * the condition will persist (ie, when we test for these again 538 * in thread_allowmigrate they can't have changed). Migration 539 * is also impossible if we're at or above DISP_LEVEL pil. 540 */ 541 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD || 542 getpil() >= DISP_LEVEL) { 543 kpreempt_enable(); 544 return; 545 } 546 547 /* 548 * We must be consistent with existing weak bindings. Since we 549 * may be interrupted between the increment of t_nomigrate and 550 * the store to t_weakbound_cpu below we cannot assume that 551 * t_weakbound_cpu will be set if t_nomigrate is. Note that we 552 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not 553 * always the case. 554 */ 555 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) { 556 if (!panicstr) 557 panic("thread_nomigrate: binding to %p but already " 558 "bound to %p", (void *)cp, 559 (void *)t->t_weakbound_cpu); 560 } 561 562 /* 563 * At this point we have preemption disabled and we don't yet hold 564 * the thread lock. So it's possible that somebody else could 565 * set t_bind_cpu here and not be able to force us across to the 566 * new cpu (since we have preemption disabled). 567 */ 568 thread_lock(curthread); 569 570 /* 571 * If further weak bindings are being (temporarily) suppressed then 572 * we'll settle for disabling kernel preemption (which assures 573 * no migration provided the thread does not block which it is 574 * not allowed to if using thread_nomigrate). We must remember 575 * this disposition so we can take appropriate action in 576 * thread_allowmigrate. If this is a nested call and the 577 * thread is already weakbound then fall through as normal. 578 * We remember the decision to settle for kpreempt_disable through 579 * negative nesting counting in t_nomigrate. Once a thread has had one 580 * weakbinding request satisfied in this way any further (nested) 581 * requests will continue to be satisfied in the same way, 582 * even if weak bindings have recommenced. 583 */ 584 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) { 585 --t->t_nomigrate; 586 thread_unlock(curthread); 587 return; /* with kpreempt_disable still active */ 588 } 589 590 /* 591 * We hold thread_lock so t_bind_cpu cannot change. We could, 592 * however, be running on a different cpu to which we are t_bound_cpu 593 * to (as explained above). If we grant the weak binding request 594 * in that case then the dispatcher must favour our weak binding 595 * over our strong (in which case, just as when preemption is 596 * disabled, we can continue to run on a cpu other than the one to 597 * which we are strongbound; the difference in this case is that 598 * this thread can be preempted and so can appear on the dispatch 599 * queues of a cpu other than the one it is strongbound to). 600 * 601 * If the cpu we are running on does not appear to be a current 602 * offline target (we check cpu_inmotion to determine this - since 603 * we don't hold cpu_lock we may not see a recent store to that, 604 * so it's possible that we at times can grant a weak binding to a 605 * cpu that is an offline target, but that one request will not 606 * prevent the offline from succeeding) then we will always grant 607 * the weak binding request. This includes the case above where 608 * we grant a weakbinding not commensurate with our strong binding. 609 * 610 * If our cpu does appear to be an offline target then we're inclined 611 * not to grant the weakbinding request just yet - we'd prefer to 612 * migrate to another cpu and grant the request there. The 613 * exceptions are those cases where going through preemption code 614 * will not result in us changing cpu: 615 * 616 * . interrupts have already bypassed this case (see above) 617 * . we are already weakbound to this cpu (dispatcher code will 618 * always return us to the weakbound cpu) 619 * . preemption was disabled even before we disabled it above 620 * . we are strongbound to this cpu (if we're strongbound to 621 * another and not yet running there the trip through the 622 * dispatcher will move us to the strongbound cpu and we 623 * will grant the weak binding there) 624 */ 625 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 || 626 t->t_bound_cpu == cp) { 627 /* 628 * Don't be tempted to store to t_weakbound_cpu only on 629 * the first nested bind request - if we're interrupted 630 * after the increment of t_nomigrate and before the 631 * store to t_weakbound_cpu and the interrupt calls 632 * thread_nomigrate then the assertion in thread_allowmigrate 633 * would fail. 634 */ 635 t->t_nomigrate++; 636 t->t_weakbound_cpu = cp; 637 membar_producer(); 638 thread_unlock(curthread); 639 /* 640 * Now that we have dropped the thread_lock another thread 641 * can set our t_weakbound_cpu, and will try to migrate us 642 * to the strongbound cpu (which will not be prevented by 643 * preemption being disabled since we're about to enable 644 * preemption). We have granted the weakbinding to the current 645 * cpu, so again we are in the position that is is is possible 646 * that our weak and strong bindings differ. Again this 647 * is catered for by dispatcher code which will favour our 648 * weak binding. 649 */ 650 kpreempt_enable(); 651 } else { 652 /* 653 * Move to another cpu before granting the request by 654 * forcing this thread through preemption code. When we 655 * get to set{front,back}dq called from CL_PREEMPT() 656 * cpu_choose() will be used to select a cpu to queue 657 * us on - that will see cpu_inmotion and take 658 * steps to avoid returning us to this cpu. 659 */ 660 cp->cpu_kprunrun = 1; 661 thread_unlock(curthread); 662 kpreempt_enable(); /* will call preempt() */ 663 goto again; 664 } 665 } 666 667 void 668 thread_allowmigrate(void) 669 { 670 kthread_id_t t = curthread; 671 672 ASSERT(t->t_weakbound_cpu == CPU || 673 (t->t_nomigrate < 0 && t->t_preempt > 0) || 674 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD || 675 getpil() >= DISP_LEVEL); 676 677 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) || 678 getpil() >= DISP_LEVEL) 679 return; 680 681 if (t->t_nomigrate < 0) { 682 /* 683 * This thread was granted "weak binding" in the 684 * stronger form of kernel preemption disabling. 685 * Undo a level of nesting for both t_nomigrate 686 * and t_preempt. 687 */ 688 ++t->t_nomigrate; 689 kpreempt_enable(); 690 } else if (--t->t_nomigrate == 0) { 691 /* 692 * Time to drop the weak binding. We need to cater 693 * for the case where we're weakbound to a different 694 * cpu than that to which we're strongbound (a very 695 * temporary arrangement that must only persist until 696 * weak binding drops). We don't acquire thread_lock 697 * here so even as this code executes t_bound_cpu 698 * may be changing. So we disable preemption and 699 * a) in the case that t_bound_cpu changes while we 700 * have preemption disabled kprunrun will be set 701 * asynchronously, and b) if before disabling 702 * preemption we were already on a different cpu to 703 * our t_bound_cpu then we set kprunrun ourselves 704 * to force a trip through the dispatcher when 705 * preemption is enabled. 706 */ 707 kpreempt_disable(); 708 if (t->t_bound_cpu && 709 t->t_weakbound_cpu != t->t_bound_cpu) 710 CPU->cpu_kprunrun = 1; 711 t->t_weakbound_cpu = NULL; 712 membar_producer(); 713 kpreempt_enable(); 714 } 715 } 716 717 /* 718 * weakbinding_stop can be used to temporarily cause weakbindings made 719 * with thread_nomigrate to be satisfied through the stronger action of 720 * kpreempt_disable. weakbinding_start recommences normal weakbinding. 721 */ 722 723 void 724 weakbinding_stop(void) 725 { 726 ASSERT(MUTEX_HELD(&cpu_lock)); 727 weakbindingbarrier = 1; 728 membar_producer(); /* make visible before subsequent thread_lock */ 729 } 730 731 void 732 weakbinding_start(void) 733 { 734 ASSERT(MUTEX_HELD(&cpu_lock)); 735 weakbindingbarrier = 0; 736 } 737 738 void 739 null_xcall(void) 740 { 741 } 742 743 /* 744 * This routine is called to place the CPUs in a safe place so that 745 * one of them can be taken off line or placed on line. What we are 746 * trying to do here is prevent a thread from traversing the list 747 * of active CPUs while we are changing it or from getting placed on 748 * the run queue of a CPU that has just gone off line. We do this by 749 * creating a thread with the highest possible prio for each CPU and 750 * having it call this routine. The advantage of this method is that 751 * we can eliminate all checks for CPU_ACTIVE in the disp routines. 752 * This makes disp faster at the expense of making p_online() slower 753 * which is a good trade off. 754 */ 755 static void 756 cpu_pause(int index) 757 { 758 int s; 759 struct _cpu_pause_info *cpi = &cpu_pause_info; 760 volatile char *safe = &safe_list[index]; 761 long lindex = index; 762 763 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE)); 764 765 while (*safe != PAUSE_DIE) { 766 *safe = PAUSE_READY; 767 membar_enter(); /* make sure stores are flushed */ 768 sema_v(&cpi->cp_sem); /* signal requesting thread */ 769 770 /* 771 * Wait here until all pause threads are running. That 772 * indicates that it's safe to do the spl. Until 773 * cpu_pause_info.cp_go is set, we don't want to spl 774 * because that might block clock interrupts needed 775 * to preempt threads on other CPUs. 776 */ 777 while (cpi->cp_go == 0) 778 ; 779 /* 780 * Even though we are at the highest disp prio, we need 781 * to block out all interrupts below LOCK_LEVEL so that 782 * an intr doesn't come in, wake up a thread, and call 783 * setbackdq/setfrontdq. 784 */ 785 s = splhigh(); 786 /* 787 * if cpu_pause_func() has been set then call it using 788 * index as the argument, currently only used by 789 * cpr_suspend_cpus(). This function is used as the 790 * code to execute on the "paused" cpu's when a machine 791 * comes out of a sleep state and CPU's were powered off. 792 * (could also be used for hotplugging CPU's). 793 */ 794 if (cpu_pause_func != NULL) 795 (*cpu_pause_func)((void *)lindex); 796 797 mach_cpu_pause(safe); 798 799 splx(s); 800 /* 801 * Waiting is at an end. Switch out of cpu_pause 802 * loop and resume useful work. 803 */ 804 swtch(); 805 } 806 807 mutex_enter(&pause_free_mutex); 808 *safe = PAUSE_DEAD; 809 cv_broadcast(&pause_free_cv); 810 mutex_exit(&pause_free_mutex); 811 } 812 813 /* 814 * Allow the cpus to start running again. 815 */ 816 void 817 start_cpus() 818 { 819 int i; 820 821 ASSERT(MUTEX_HELD(&cpu_lock)); 822 ASSERT(cpu_pause_info.cp_paused); 823 cpu_pause_info.cp_paused = NULL; 824 for (i = 0; i < NCPU; i++) 825 safe_list[i] = PAUSE_IDLE; 826 membar_enter(); /* make sure stores are flushed */ 827 affinity_clear(); 828 splx(cpu_pause_info.cp_spl); 829 kpreempt_enable(); 830 } 831 832 /* 833 * Allocate a pause thread for a CPU. 834 */ 835 static void 836 cpu_pause_alloc(cpu_t *cp) 837 { 838 kthread_id_t t; 839 long cpun = cp->cpu_id; 840 841 /* 842 * Note, v.v_nglobpris will not change value as long as I hold 843 * cpu_lock. 844 */ 845 t = thread_create(NULL, 0, cpu_pause, (void *)cpun, 846 0, &p0, TS_STOPPED, v.v_nglobpris - 1); 847 thread_lock(t); 848 t->t_bound_cpu = cp; 849 t->t_disp_queue = cp->cpu_disp; 850 t->t_affinitycnt = 1; 851 t->t_preempt = 1; 852 thread_unlock(t); 853 cp->cpu_pause_thread = t; 854 /* 855 * Registering a thread in the callback table is usually done 856 * in the initialization code of the thread. In this 857 * case, we do it right after thread creation because the 858 * thread itself may never run, and we need to register the 859 * fact that it is safe for cpr suspend. 860 */ 861 CALLB_CPR_INIT_SAFE(t, "cpu_pause"); 862 } 863 864 /* 865 * Free a pause thread for a CPU. 866 */ 867 static void 868 cpu_pause_free(cpu_t *cp) 869 { 870 kthread_id_t t; 871 int cpun = cp->cpu_id; 872 873 ASSERT(MUTEX_HELD(&cpu_lock)); 874 /* 875 * We have to get the thread and tell him to die. 876 */ 877 if ((t = cp->cpu_pause_thread) == NULL) { 878 ASSERT(safe_list[cpun] == PAUSE_IDLE); 879 return; 880 } 881 thread_lock(t); 882 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */ 883 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */ 884 t->t_pri = v.v_nglobpris - 1; 885 ASSERT(safe_list[cpun] == PAUSE_IDLE); 886 safe_list[cpun] = PAUSE_DIE; 887 THREAD_TRANSITION(t); 888 setbackdq(t); 889 thread_unlock_nopreempt(t); 890 891 /* 892 * If we don't wait for the thread to actually die, it may try to 893 * run on the wrong cpu as part of an actual call to pause_cpus(). 894 */ 895 mutex_enter(&pause_free_mutex); 896 while (safe_list[cpun] != PAUSE_DEAD) { 897 cv_wait(&pause_free_cv, &pause_free_mutex); 898 } 899 mutex_exit(&pause_free_mutex); 900 safe_list[cpun] = PAUSE_IDLE; 901 902 cp->cpu_pause_thread = NULL; 903 } 904 905 /* 906 * Initialize basic structures for pausing CPUs. 907 */ 908 void 909 cpu_pause_init() 910 { 911 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL); 912 /* 913 * Create initial CPU pause thread. 914 */ 915 cpu_pause_alloc(CPU); 916 } 917 918 /* 919 * Start the threads used to pause another CPU. 920 */ 921 static int 922 cpu_pause_start(processorid_t cpu_id) 923 { 924 int i; 925 int cpu_count = 0; 926 927 for (i = 0; i < NCPU; i++) { 928 cpu_t *cp; 929 kthread_id_t t; 930 931 cp = cpu[i]; 932 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) { 933 safe_list[i] = PAUSE_WAIT; 934 continue; 935 } 936 937 /* 938 * Skip CPU if it is quiesced or not yet started. 939 */ 940 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) { 941 safe_list[i] = PAUSE_WAIT; 942 continue; 943 } 944 945 /* 946 * Start this CPU's pause thread. 947 */ 948 t = cp->cpu_pause_thread; 949 thread_lock(t); 950 /* 951 * Reset the priority, since nglobpris may have 952 * changed since the thread was created, if someone 953 * has loaded the RT (or some other) scheduling 954 * class. 955 */ 956 t->t_pri = v.v_nglobpris - 1; 957 THREAD_TRANSITION(t); 958 setbackdq(t); 959 thread_unlock_nopreempt(t); 960 ++cpu_count; 961 } 962 return (cpu_count); 963 } 964 965 966 /* 967 * Pause all of the CPUs except the one we are on by creating a high 968 * priority thread bound to those CPUs. 969 * 970 * Note that one must be extremely careful regarding code 971 * executed while CPUs are paused. Since a CPU may be paused 972 * while a thread scheduling on that CPU is holding an adaptive 973 * lock, code executed with CPUs paused must not acquire adaptive 974 * (or low-level spin) locks. Also, such code must not block, 975 * since the thread that is supposed to initiate the wakeup may 976 * never run. 977 * 978 * With a few exceptions, the restrictions on code executed with CPUs 979 * paused match those for code executed at high-level interrupt 980 * context. 981 */ 982 void 983 pause_cpus(cpu_t *off_cp) 984 { 985 processorid_t cpu_id; 986 int i; 987 struct _cpu_pause_info *cpi = &cpu_pause_info; 988 989 ASSERT(MUTEX_HELD(&cpu_lock)); 990 ASSERT(cpi->cp_paused == NULL); 991 cpi->cp_count = 0; 992 cpi->cp_go = 0; 993 for (i = 0; i < NCPU; i++) 994 safe_list[i] = PAUSE_IDLE; 995 kpreempt_disable(); 996 997 /* 998 * If running on the cpu that is going offline, get off it. 999 * This is so that it won't be necessary to rechoose a CPU 1000 * when done. 1001 */ 1002 if (CPU == off_cp) 1003 cpu_id = off_cp->cpu_next_part->cpu_id; 1004 else 1005 cpu_id = CPU->cpu_id; 1006 affinity_set(cpu_id); 1007 1008 /* 1009 * Start the pause threads and record how many were started 1010 */ 1011 cpi->cp_count = cpu_pause_start(cpu_id); 1012 1013 /* 1014 * Now wait for all CPUs to be running the pause thread. 1015 */ 1016 while (cpi->cp_count > 0) { 1017 /* 1018 * Spin reading the count without grabbing the disp 1019 * lock to make sure we don't prevent the pause 1020 * threads from getting the lock. 1021 */ 1022 while (sema_held(&cpi->cp_sem)) 1023 ; 1024 if (sema_tryp(&cpi->cp_sem)) 1025 --cpi->cp_count; 1026 } 1027 cpi->cp_go = 1; /* all have reached cpu_pause */ 1028 1029 /* 1030 * Now wait for all CPUs to spl. (Transition from PAUSE_READY 1031 * to PAUSE_WAIT.) 1032 */ 1033 for (i = 0; i < NCPU; i++) { 1034 while (safe_list[i] != PAUSE_WAIT) 1035 ; 1036 } 1037 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */ 1038 cpi->cp_paused = curthread; 1039 } 1040 1041 /* 1042 * Check whether the current thread has CPUs paused 1043 */ 1044 int 1045 cpus_paused(void) 1046 { 1047 if (cpu_pause_info.cp_paused != NULL) { 1048 ASSERT(cpu_pause_info.cp_paused == curthread); 1049 return (1); 1050 } 1051 return (0); 1052 } 1053 1054 static cpu_t * 1055 cpu_get_all(processorid_t cpun) 1056 { 1057 ASSERT(MUTEX_HELD(&cpu_lock)); 1058 1059 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun)) 1060 return (NULL); 1061 return (cpu[cpun]); 1062 } 1063 1064 /* 1065 * Check whether cpun is a valid processor id and whether it should be 1066 * visible from the current zone. If it is, return a pointer to the 1067 * associated CPU structure. 1068 */ 1069 cpu_t * 1070 cpu_get(processorid_t cpun) 1071 { 1072 cpu_t *c; 1073 1074 ASSERT(MUTEX_HELD(&cpu_lock)); 1075 c = cpu_get_all(cpun); 1076 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() && 1077 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c)) 1078 return (NULL); 1079 return (c); 1080 } 1081 1082 /* 1083 * The following functions should be used to check CPU states in the kernel. 1084 * They should be invoked with cpu_lock held. Kernel subsystems interested 1085 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc 1086 * states. Those are for user-land (and system call) use only. 1087 */ 1088 1089 /* 1090 * Determine whether the CPU is online and handling interrupts. 1091 */ 1092 int 1093 cpu_is_online(cpu_t *cpu) 1094 { 1095 ASSERT(MUTEX_HELD(&cpu_lock)); 1096 return (cpu_flagged_online(cpu->cpu_flags)); 1097 } 1098 1099 /* 1100 * Determine whether the CPU is offline (this includes spare and faulted). 1101 */ 1102 int 1103 cpu_is_offline(cpu_t *cpu) 1104 { 1105 ASSERT(MUTEX_HELD(&cpu_lock)); 1106 return (cpu_flagged_offline(cpu->cpu_flags)); 1107 } 1108 1109 /* 1110 * Determine whether the CPU is powered off. 1111 */ 1112 int 1113 cpu_is_poweredoff(cpu_t *cpu) 1114 { 1115 ASSERT(MUTEX_HELD(&cpu_lock)); 1116 return (cpu_flagged_poweredoff(cpu->cpu_flags)); 1117 } 1118 1119 /* 1120 * Determine whether the CPU is handling interrupts. 1121 */ 1122 int 1123 cpu_is_nointr(cpu_t *cpu) 1124 { 1125 ASSERT(MUTEX_HELD(&cpu_lock)); 1126 return (cpu_flagged_nointr(cpu->cpu_flags)); 1127 } 1128 1129 /* 1130 * Determine whether the CPU is active (scheduling threads). 1131 */ 1132 int 1133 cpu_is_active(cpu_t *cpu) 1134 { 1135 ASSERT(MUTEX_HELD(&cpu_lock)); 1136 return (cpu_flagged_active(cpu->cpu_flags)); 1137 } 1138 1139 /* 1140 * Same as above, but these require cpu_flags instead of cpu_t pointers. 1141 */ 1142 int 1143 cpu_flagged_online(cpu_flag_t cpu_flags) 1144 { 1145 return (cpu_flagged_active(cpu_flags) && 1146 (cpu_flags & CPU_ENABLE)); 1147 } 1148 1149 int 1150 cpu_flagged_offline(cpu_flag_t cpu_flags) 1151 { 1152 return (((cpu_flags & CPU_POWEROFF) == 0) && 1153 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)); 1154 } 1155 1156 int 1157 cpu_flagged_poweredoff(cpu_flag_t cpu_flags) 1158 { 1159 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF); 1160 } 1161 1162 int 1163 cpu_flagged_nointr(cpu_flag_t cpu_flags) 1164 { 1165 return (cpu_flagged_active(cpu_flags) && 1166 (cpu_flags & CPU_ENABLE) == 0); 1167 } 1168 1169 int 1170 cpu_flagged_active(cpu_flag_t cpu_flags) 1171 { 1172 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) && 1173 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY)); 1174 } 1175 1176 /* 1177 * Bring the indicated CPU online. 1178 */ 1179 int 1180 cpu_online(cpu_t *cp) 1181 { 1182 int error = 0; 1183 1184 /* 1185 * Handle on-line request. 1186 * This code must put the new CPU on the active list before 1187 * starting it because it will not be paused, and will start 1188 * using the active list immediately. The real start occurs 1189 * when the CPU_QUIESCED flag is turned off. 1190 */ 1191 1192 ASSERT(MUTEX_HELD(&cpu_lock)); 1193 1194 /* 1195 * Put all the cpus into a known safe place. 1196 * No mutexes can be entered while CPUs are paused. 1197 */ 1198 error = mp_cpu_start(cp); /* arch-dep hook */ 1199 if (error == 0) { 1200 pg_cpupart_in(cp, cp->cpu_part); 1201 pause_cpus(NULL); 1202 cpu_add_active_internal(cp); 1203 if (cp->cpu_flags & CPU_FAULTED) { 1204 cp->cpu_flags &= ~CPU_FAULTED; 1205 mp_cpu_faulted_exit(cp); 1206 } 1207 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN | 1208 CPU_SPARE); 1209 CPU_NEW_GENERATION(cp); 1210 start_cpus(); 1211 cpu_stats_kstat_create(cp); 1212 cpu_create_intrstat(cp); 1213 lgrp_kstat_create(cp); 1214 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1215 cpu_intr_enable(cp); /* arch-dep hook */ 1216 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1217 cpu_set_state(cp); 1218 cyclic_online(cp); 1219 /* 1220 * This has to be called only after cyclic_online(). This 1221 * function uses cyclics. 1222 */ 1223 callout_cpu_online(cp); 1224 poke_cpu(cp->cpu_id); 1225 } 1226 1227 return (error); 1228 } 1229 1230 /* 1231 * Take the indicated CPU offline. 1232 */ 1233 int 1234 cpu_offline(cpu_t *cp, int flags) 1235 { 1236 cpupart_t *pp; 1237 int error = 0; 1238 cpu_t *ncp; 1239 int intr_enable; 1240 int cyclic_off = 0; 1241 int callout_off = 0; 1242 int loop_count; 1243 int no_quiesce = 0; 1244 int (*bound_func)(struct cpu *, int); 1245 kthread_t *t; 1246 lpl_t *cpu_lpl; 1247 proc_t *p; 1248 int lgrp_diff_lpl; 1249 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0; 1250 1251 ASSERT(MUTEX_HELD(&cpu_lock)); 1252 1253 /* 1254 * If we're going from faulted or spare to offline, just 1255 * clear these flags and update CPU state. 1256 */ 1257 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) { 1258 if (cp->cpu_flags & CPU_FAULTED) { 1259 cp->cpu_flags &= ~CPU_FAULTED; 1260 mp_cpu_faulted_exit(cp); 1261 } 1262 cp->cpu_flags &= ~CPU_SPARE; 1263 cpu_set_state(cp); 1264 return (0); 1265 } 1266 1267 /* 1268 * Handle off-line request. 1269 */ 1270 pp = cp->cpu_part; 1271 /* 1272 * Don't offline last online CPU in partition 1273 */ 1274 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2) 1275 return (EBUSY); 1276 /* 1277 * Unbind all soft-bound threads bound to our CPU and hard bound threads 1278 * if we were asked to. 1279 */ 1280 error = cpu_unbind(cp->cpu_id, unbind_all_threads); 1281 if (error != 0) 1282 return (error); 1283 /* 1284 * We shouldn't be bound to this CPU ourselves. 1285 */ 1286 if (curthread->t_bound_cpu == cp) 1287 return (EBUSY); 1288 1289 /* 1290 * Tell interested parties that this CPU is going offline. 1291 */ 1292 CPU_NEW_GENERATION(cp); 1293 cpu_state_change_notify(cp->cpu_id, CPU_OFF); 1294 1295 /* 1296 * Tell the PG subsystem that the CPU is leaving the partition 1297 */ 1298 pg_cpupart_out(cp, pp); 1299 1300 /* 1301 * Take the CPU out of interrupt participation so we won't find 1302 * bound kernel threads. If the architecture cannot completely 1303 * shut off interrupts on the CPU, don't quiesce it, but don't 1304 * run anything but interrupt thread... this is indicated by 1305 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being 1306 * off. 1307 */ 1308 intr_enable = cp->cpu_flags & CPU_ENABLE; 1309 if (intr_enable) 1310 no_quiesce = cpu_intr_disable(cp); 1311 1312 /* 1313 * Record that we are aiming to offline this cpu. This acts as 1314 * a barrier to further weak binding requests in thread_nomigrate 1315 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to 1316 * lean away from this cpu. Further strong bindings are already 1317 * avoided since we hold cpu_lock. Since threads that are set 1318 * runnable around now and others coming off the target cpu are 1319 * directed away from the target, existing strong and weak bindings 1320 * (especially the latter) to the target cpu stand maximum chance of 1321 * being able to unbind during the short delay loop below (if other 1322 * unbound threads compete they may not see cpu in time to unbind 1323 * even if they would do so immediately. 1324 */ 1325 cpu_inmotion = cp; 1326 membar_enter(); 1327 1328 /* 1329 * Check for kernel threads (strong or weak) bound to that CPU. 1330 * Strongly bound threads may not unbind, and we'll have to return 1331 * EBUSY. Weakly bound threads should always disappear - we've 1332 * stopped more weak binding with cpu_inmotion and existing 1333 * bindings will drain imminently (they may not block). Nonetheless 1334 * we will wait for a fixed period for all bound threads to disappear. 1335 * Inactive interrupt threads are OK (they'll be in TS_FREE 1336 * state). If test finds some bound threads, wait a few ticks 1337 * to give short-lived threads (such as interrupts) chance to 1338 * complete. Note that if no_quiesce is set, i.e. this cpu 1339 * is required to service interrupts, then we take the route 1340 * that permits interrupt threads to be active (or bypassed). 1341 */ 1342 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads; 1343 1344 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) { 1345 if (loop_count >= 5) { 1346 error = EBUSY; /* some threads still bound */ 1347 break; 1348 } 1349 1350 /* 1351 * If some threads were assigned, give them 1352 * a chance to complete or move. 1353 * 1354 * This assumes that the clock_thread is not bound 1355 * to any CPU, because the clock_thread is needed to 1356 * do the delay(hz/100). 1357 * 1358 * Note: we still hold the cpu_lock while waiting for 1359 * the next clock tick. This is OK since it isn't 1360 * needed for anything else except processor_bind(2), 1361 * and system initialization. If we drop the lock, 1362 * we would risk another p_online disabling the last 1363 * processor. 1364 */ 1365 delay(hz/100); 1366 } 1367 1368 if (error == 0 && callout_off == 0) { 1369 callout_cpu_offline(cp); 1370 callout_off = 1; 1371 } 1372 1373 if (error == 0 && cyclic_off == 0) { 1374 if (!cyclic_offline(cp)) { 1375 /* 1376 * We must have bound cyclics... 1377 */ 1378 error = EBUSY; 1379 goto out; 1380 } 1381 cyclic_off = 1; 1382 } 1383 1384 /* 1385 * Call mp_cpu_stop() to perform any special operations 1386 * needed for this machine architecture to offline a CPU. 1387 */ 1388 if (error == 0) 1389 error = mp_cpu_stop(cp); /* arch-dep hook */ 1390 1391 /* 1392 * If that all worked, take the CPU offline and decrement 1393 * ncpus_online. 1394 */ 1395 if (error == 0) { 1396 /* 1397 * Put all the cpus into a known safe place. 1398 * No mutexes can be entered while CPUs are paused. 1399 */ 1400 pause_cpus(cp); 1401 /* 1402 * Repeat the operation, if necessary, to make sure that 1403 * all outstanding low-level interrupts run to completion 1404 * before we set the CPU_QUIESCED flag. It's also possible 1405 * that a thread has weak bound to the cpu despite our raising 1406 * cpu_inmotion above since it may have loaded that 1407 * value before the barrier became visible (this would have 1408 * to be the thread that was on the target cpu at the time 1409 * we raised the barrier). 1410 */ 1411 if ((!no_quiesce && cp->cpu_intr_actv != 0) || 1412 (*bound_func)(cp, 1)) { 1413 start_cpus(); 1414 (void) mp_cpu_start(cp); 1415 goto again; 1416 } 1417 ncp = cp->cpu_next_part; 1418 cpu_lpl = cp->cpu_lpl; 1419 ASSERT(cpu_lpl != NULL); 1420 1421 /* 1422 * Remove the CPU from the list of active CPUs. 1423 */ 1424 cpu_remove_active(cp); 1425 1426 /* 1427 * Walk the active process list and look for threads 1428 * whose home lgroup needs to be updated, or 1429 * the last CPU they run on is the one being offlined now. 1430 */ 1431 1432 ASSERT(curthread->t_cpu != cp); 1433 for (p = practive; p != NULL; p = p->p_next) { 1434 1435 t = p->p_tlist; 1436 1437 if (t == NULL) 1438 continue; 1439 1440 lgrp_diff_lpl = 0; 1441 1442 do { 1443 ASSERT(t->t_lpl != NULL); 1444 /* 1445 * Taking last CPU in lpl offline 1446 * Rehome thread if it is in this lpl 1447 * Otherwise, update the count of how many 1448 * threads are in this CPU's lgroup but have 1449 * a different lpl. 1450 */ 1451 1452 if (cpu_lpl->lpl_ncpu == 0) { 1453 if (t->t_lpl == cpu_lpl) 1454 lgrp_move_thread(t, 1455 lgrp_choose(t, 1456 t->t_cpupart), 0); 1457 else if (t->t_lpl->lpl_lgrpid == 1458 cpu_lpl->lpl_lgrpid) 1459 lgrp_diff_lpl++; 1460 } 1461 ASSERT(t->t_lpl->lpl_ncpu > 0); 1462 1463 /* 1464 * Update CPU last ran on if it was this CPU 1465 */ 1466 if (t->t_cpu == cp && t->t_bound_cpu != cp) 1467 t->t_cpu = disp_lowpri_cpu(ncp, 1468 t->t_lpl, t->t_pri, NULL); 1469 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1470 t->t_weakbound_cpu == cp); 1471 1472 t = t->t_forw; 1473 } while (t != p->p_tlist); 1474 1475 /* 1476 * Didn't find any threads in the same lgroup as this 1477 * CPU with a different lpl, so remove the lgroup from 1478 * the process lgroup bitmask. 1479 */ 1480 1481 if (lgrp_diff_lpl == 0) 1482 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid); 1483 } 1484 1485 /* 1486 * Walk thread list looking for threads that need to be 1487 * rehomed, since there are some threads that are not in 1488 * their process's p_tlist. 1489 */ 1490 1491 t = curthread; 1492 do { 1493 ASSERT(t != NULL && t->t_lpl != NULL); 1494 1495 /* 1496 * Rehome threads with same lpl as this CPU when this 1497 * is the last CPU in the lpl. 1498 */ 1499 1500 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl)) 1501 lgrp_move_thread(t, 1502 lgrp_choose(t, t->t_cpupart), 1); 1503 1504 ASSERT(t->t_lpl->lpl_ncpu > 0); 1505 1506 /* 1507 * Update CPU last ran on if it was this CPU 1508 */ 1509 1510 if (t->t_cpu == cp && t->t_bound_cpu != cp) { 1511 t->t_cpu = disp_lowpri_cpu(ncp, 1512 t->t_lpl, t->t_pri, NULL); 1513 } 1514 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1515 t->t_weakbound_cpu == cp); 1516 t = t->t_next; 1517 1518 } while (t != curthread); 1519 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0); 1520 cp->cpu_flags |= CPU_OFFLINE; 1521 disp_cpu_inactive(cp); 1522 if (!no_quiesce) 1523 cp->cpu_flags |= CPU_QUIESCED; 1524 ncpus_online--; 1525 cpu_set_state(cp); 1526 cpu_inmotion = NULL; 1527 start_cpus(); 1528 cpu_stats_kstat_destroy(cp); 1529 cpu_delete_intrstat(cp); 1530 lgrp_kstat_destroy(cp); 1531 } 1532 1533 out: 1534 cpu_inmotion = NULL; 1535 1536 /* 1537 * If we failed, re-enable interrupts. 1538 * Do this even if cpu_intr_disable returned an error, because 1539 * it may have partially disabled interrupts. 1540 */ 1541 if (error && intr_enable) 1542 cpu_intr_enable(cp); 1543 1544 /* 1545 * If we failed, but managed to offline the cyclic subsystem on this 1546 * CPU, bring it back online. 1547 */ 1548 if (error && cyclic_off) 1549 cyclic_online(cp); 1550 1551 /* 1552 * If we failed, but managed to offline callouts on this CPU, 1553 * bring it back online. 1554 */ 1555 if (error && callout_off) 1556 callout_cpu_online(cp); 1557 1558 /* 1559 * If we failed, tell the PG subsystem that the CPU is back 1560 */ 1561 pg_cpupart_in(cp, pp); 1562 1563 /* 1564 * If we failed, we need to notify everyone that this CPU is back on. 1565 */ 1566 if (error != 0) { 1567 CPU_NEW_GENERATION(cp); 1568 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1569 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1570 } 1571 1572 return (error); 1573 } 1574 1575 /* 1576 * Mark the indicated CPU as faulted, taking it offline. 1577 */ 1578 int 1579 cpu_faulted(cpu_t *cp, int flags) 1580 { 1581 int error = 0; 1582 1583 ASSERT(MUTEX_HELD(&cpu_lock)); 1584 ASSERT(!cpu_is_poweredoff(cp)); 1585 1586 if (cpu_is_offline(cp)) { 1587 cp->cpu_flags &= ~CPU_SPARE; 1588 cp->cpu_flags |= CPU_FAULTED; 1589 mp_cpu_faulted_enter(cp); 1590 cpu_set_state(cp); 1591 return (0); 1592 } 1593 1594 if ((error = cpu_offline(cp, flags)) == 0) { 1595 cp->cpu_flags |= CPU_FAULTED; 1596 mp_cpu_faulted_enter(cp); 1597 cpu_set_state(cp); 1598 } 1599 1600 return (error); 1601 } 1602 1603 /* 1604 * Mark the indicated CPU as a spare, taking it offline. 1605 */ 1606 int 1607 cpu_spare(cpu_t *cp, int flags) 1608 { 1609 int error = 0; 1610 1611 ASSERT(MUTEX_HELD(&cpu_lock)); 1612 ASSERT(!cpu_is_poweredoff(cp)); 1613 1614 if (cpu_is_offline(cp)) { 1615 if (cp->cpu_flags & CPU_FAULTED) { 1616 cp->cpu_flags &= ~CPU_FAULTED; 1617 mp_cpu_faulted_exit(cp); 1618 } 1619 cp->cpu_flags |= CPU_SPARE; 1620 cpu_set_state(cp); 1621 return (0); 1622 } 1623 1624 if ((error = cpu_offline(cp, flags)) == 0) { 1625 cp->cpu_flags |= CPU_SPARE; 1626 cpu_set_state(cp); 1627 } 1628 1629 return (error); 1630 } 1631 1632 /* 1633 * Take the indicated CPU from poweroff to offline. 1634 */ 1635 int 1636 cpu_poweron(cpu_t *cp) 1637 { 1638 int error = ENOTSUP; 1639 1640 ASSERT(MUTEX_HELD(&cpu_lock)); 1641 ASSERT(cpu_is_poweredoff(cp)); 1642 1643 error = mp_cpu_poweron(cp); /* arch-dep hook */ 1644 if (error == 0) 1645 cpu_set_state(cp); 1646 1647 return (error); 1648 } 1649 1650 /* 1651 * Take the indicated CPU from any inactive state to powered off. 1652 */ 1653 int 1654 cpu_poweroff(cpu_t *cp) 1655 { 1656 int error = ENOTSUP; 1657 1658 ASSERT(MUTEX_HELD(&cpu_lock)); 1659 ASSERT(cpu_is_offline(cp)); 1660 1661 if (!(cp->cpu_flags & CPU_QUIESCED)) 1662 return (EBUSY); /* not completely idle */ 1663 1664 error = mp_cpu_poweroff(cp); /* arch-dep hook */ 1665 if (error == 0) 1666 cpu_set_state(cp); 1667 1668 return (error); 1669 } 1670 1671 /* 1672 * Initialize the Sequential CPU id lookup table 1673 */ 1674 void 1675 cpu_seq_tbl_init() 1676 { 1677 cpu_t **tbl; 1678 1679 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP); 1680 tbl[0] = CPU; 1681 1682 cpu_seq = tbl; 1683 } 1684 1685 /* 1686 * Initialize the CPU lists for the first CPU. 1687 */ 1688 void 1689 cpu_list_init(cpu_t *cp) 1690 { 1691 cp->cpu_next = cp; 1692 cp->cpu_prev = cp; 1693 cpu_list = cp; 1694 clock_cpu_list = cp; 1695 1696 cp->cpu_next_onln = cp; 1697 cp->cpu_prev_onln = cp; 1698 cpu_active = cp; 1699 1700 cp->cpu_seqid = 0; 1701 CPUSET_ADD(cpu_seqid_inuse, 0); 1702 1703 /* 1704 * Bootstrap cpu_seq using cpu_list 1705 * The cpu_seq[] table will be dynamically allocated 1706 * when kmem later becomes available (but before going MP) 1707 */ 1708 cpu_seq = &cpu_list; 1709 1710 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1711 cp_default.cp_cpulist = cp; 1712 cp_default.cp_ncpus = 1; 1713 cp->cpu_next_part = cp; 1714 cp->cpu_prev_part = cp; 1715 cp->cpu_part = &cp_default; 1716 1717 CPUSET_ADD(cpu_available, cp->cpu_id); 1718 } 1719 1720 /* 1721 * Insert a CPU into the list of available CPUs. 1722 */ 1723 void 1724 cpu_add_unit(cpu_t *cp) 1725 { 1726 int seqid; 1727 1728 ASSERT(MUTEX_HELD(&cpu_lock)); 1729 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1730 1731 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0); 1732 1733 /* 1734 * Note: most users of the cpu_list will grab the 1735 * cpu_lock to insure that it isn't modified. However, 1736 * certain users can't or won't do that. To allow this 1737 * we pause the other cpus. Users who walk the list 1738 * without cpu_lock, must disable kernel preemption 1739 * to insure that the list isn't modified underneath 1740 * them. Also, any cached pointers to cpu structures 1741 * must be revalidated by checking to see if the 1742 * cpu_next pointer points to itself. This check must 1743 * be done with the cpu_lock held or kernel preemption 1744 * disabled. This check relies upon the fact that 1745 * old cpu structures are not free'ed or cleared after 1746 * then are removed from the cpu_list. 1747 * 1748 * Note that the clock code walks the cpu list dereferencing 1749 * the cpu_part pointer, so we need to initialize it before 1750 * adding the cpu to the list. 1751 */ 1752 cp->cpu_part = &cp_default; 1753 (void) pause_cpus(NULL); 1754 cp->cpu_next = cpu_list; 1755 cp->cpu_prev = cpu_list->cpu_prev; 1756 cpu_list->cpu_prev->cpu_next = cp; 1757 cpu_list->cpu_prev = cp; 1758 start_cpus(); 1759 1760 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++) 1761 continue; 1762 CPUSET_ADD(cpu_seqid_inuse, seqid); 1763 cp->cpu_seqid = seqid; 1764 1765 if (seqid > max_cpu_seqid_ever) 1766 max_cpu_seqid_ever = seqid; 1767 1768 ASSERT(ncpus < max_ncpus); 1769 ncpus++; 1770 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1771 cpu[cp->cpu_id] = cp; 1772 CPUSET_ADD(cpu_available, cp->cpu_id); 1773 cpu_seq[cp->cpu_seqid] = cp; 1774 1775 /* 1776 * allocate a pause thread for this CPU. 1777 */ 1778 cpu_pause_alloc(cp); 1779 1780 /* 1781 * So that new CPUs won't have NULL prev_onln and next_onln pointers, 1782 * link them into a list of just that CPU. 1783 * This is so that disp_lowpri_cpu will work for thread_create in 1784 * pause_cpus() when called from the startup thread in a new CPU. 1785 */ 1786 cp->cpu_next_onln = cp; 1787 cp->cpu_prev_onln = cp; 1788 cpu_info_kstat_create(cp); 1789 cp->cpu_next_part = cp; 1790 cp->cpu_prev_part = cp; 1791 1792 init_cpu_mstate(cp, CMS_SYSTEM); 1793 1794 pool_pset_mod = gethrtime(); 1795 } 1796 1797 /* 1798 * Do the opposite of cpu_add_unit(). 1799 */ 1800 void 1801 cpu_del_unit(int cpuid) 1802 { 1803 struct cpu *cp, *cpnext; 1804 1805 ASSERT(MUTEX_HELD(&cpu_lock)); 1806 cp = cpu[cpuid]; 1807 ASSERT(cp != NULL); 1808 1809 ASSERT(cp->cpu_next_onln == cp); 1810 ASSERT(cp->cpu_prev_onln == cp); 1811 ASSERT(cp->cpu_next_part == cp); 1812 ASSERT(cp->cpu_prev_part == cp); 1813 1814 /* 1815 * Tear down the CPU's physical ID cache, and update any 1816 * processor groups 1817 */ 1818 pg_cpu_fini(cp, NULL); 1819 pghw_physid_destroy(cp); 1820 1821 /* 1822 * Destroy kstat stuff. 1823 */ 1824 cpu_info_kstat_destroy(cp); 1825 term_cpu_mstate(cp); 1826 /* 1827 * Free up pause thread. 1828 */ 1829 cpu_pause_free(cp); 1830 CPUSET_DEL(cpu_available, cp->cpu_id); 1831 cpu[cp->cpu_id] = NULL; 1832 cpu_seq[cp->cpu_seqid] = NULL; 1833 1834 /* 1835 * The clock thread and mutex_vector_enter cannot hold the 1836 * cpu_lock while traversing the cpu list, therefore we pause 1837 * all other threads by pausing the other cpus. These, and any 1838 * other routines holding cpu pointers while possibly sleeping 1839 * must be sure to call kpreempt_disable before processing the 1840 * list and be sure to check that the cpu has not been deleted 1841 * after any sleeps (check cp->cpu_next != NULL). We guarantee 1842 * to keep the deleted cpu structure around. 1843 * 1844 * Note that this MUST be done AFTER cpu_available 1845 * has been updated so that we don't waste time 1846 * trying to pause the cpu we're trying to delete. 1847 */ 1848 (void) pause_cpus(NULL); 1849 1850 cpnext = cp->cpu_next; 1851 cp->cpu_prev->cpu_next = cp->cpu_next; 1852 cp->cpu_next->cpu_prev = cp->cpu_prev; 1853 if (cp == cpu_list) 1854 cpu_list = cpnext; 1855 1856 /* 1857 * Signals that the cpu has been deleted (see above). 1858 */ 1859 cp->cpu_next = NULL; 1860 cp->cpu_prev = NULL; 1861 1862 start_cpus(); 1863 1864 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid); 1865 ncpus--; 1866 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0); 1867 1868 pool_pset_mod = gethrtime(); 1869 } 1870 1871 /* 1872 * Add a CPU to the list of active CPUs. 1873 * This routine must not get any locks, because other CPUs are paused. 1874 */ 1875 static void 1876 cpu_add_active_internal(cpu_t *cp) 1877 { 1878 cpupart_t *pp = cp->cpu_part; 1879 1880 ASSERT(MUTEX_HELD(&cpu_lock)); 1881 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1882 1883 ncpus_online++; 1884 cpu_set_state(cp); 1885 cp->cpu_next_onln = cpu_active; 1886 cp->cpu_prev_onln = cpu_active->cpu_prev_onln; 1887 cpu_active->cpu_prev_onln->cpu_next_onln = cp; 1888 cpu_active->cpu_prev_onln = cp; 1889 1890 if (pp->cp_cpulist) { 1891 cp->cpu_next_part = pp->cp_cpulist; 1892 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part; 1893 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp; 1894 pp->cp_cpulist->cpu_prev_part = cp; 1895 } else { 1896 ASSERT(pp->cp_ncpus == 0); 1897 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp; 1898 } 1899 pp->cp_ncpus++; 1900 if (pp->cp_ncpus == 1) { 1901 cp_numparts_nonempty++; 1902 ASSERT(cp_numparts_nonempty != 0); 1903 } 1904 1905 pg_cpu_active(cp); 1906 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0); 1907 1908 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg)); 1909 } 1910 1911 /* 1912 * Add a CPU to the list of active CPUs. 1913 * This is called from machine-dependent layers when a new CPU is started. 1914 */ 1915 void 1916 cpu_add_active(cpu_t *cp) 1917 { 1918 pg_cpupart_in(cp, cp->cpu_part); 1919 1920 pause_cpus(NULL); 1921 cpu_add_active_internal(cp); 1922 start_cpus(); 1923 1924 cpu_stats_kstat_create(cp); 1925 cpu_create_intrstat(cp); 1926 lgrp_kstat_create(cp); 1927 cpu_state_change_notify(cp->cpu_id, CPU_INIT); 1928 } 1929 1930 1931 /* 1932 * Remove a CPU from the list of active CPUs. 1933 * This routine must not get any locks, because other CPUs are paused. 1934 */ 1935 /* ARGSUSED */ 1936 static void 1937 cpu_remove_active(cpu_t *cp) 1938 { 1939 cpupart_t *pp = cp->cpu_part; 1940 1941 ASSERT(MUTEX_HELD(&cpu_lock)); 1942 ASSERT(cp->cpu_next_onln != cp); /* not the last one */ 1943 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */ 1944 1945 pg_cpu_inactive(cp); 1946 1947 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0); 1948 1949 if (cp == clock_cpu_list) 1950 clock_cpu_list = cp->cpu_next_onln; 1951 1952 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln; 1953 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln; 1954 if (cpu_active == cp) { 1955 cpu_active = cp->cpu_next_onln; 1956 } 1957 cp->cpu_next_onln = cp; 1958 cp->cpu_prev_onln = cp; 1959 1960 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; 1961 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; 1962 if (pp->cp_cpulist == cp) { 1963 pp->cp_cpulist = cp->cpu_next_part; 1964 ASSERT(pp->cp_cpulist != cp); 1965 } 1966 cp->cpu_next_part = cp; 1967 cp->cpu_prev_part = cp; 1968 pp->cp_ncpus--; 1969 if (pp->cp_ncpus == 0) { 1970 cp_numparts_nonempty--; 1971 ASSERT(cp_numparts_nonempty != 0); 1972 } 1973 } 1974 1975 /* 1976 * Routine used to setup a newly inserted CPU in preparation for starting 1977 * it running code. 1978 */ 1979 int 1980 cpu_configure(int cpuid) 1981 { 1982 int retval = 0; 1983 1984 ASSERT(MUTEX_HELD(&cpu_lock)); 1985 1986 /* 1987 * Some structures are statically allocated based upon 1988 * the maximum number of cpus the system supports. Do not 1989 * try to add anything beyond this limit. 1990 */ 1991 if (cpuid < 0 || cpuid >= NCPU) { 1992 return (EINVAL); 1993 } 1994 1995 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) { 1996 return (EALREADY); 1997 } 1998 1999 if ((retval = mp_cpu_configure(cpuid)) != 0) { 2000 return (retval); 2001 } 2002 2003 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF; 2004 cpu_set_state(cpu[cpuid]); 2005 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG); 2006 if (retval != 0) 2007 (void) mp_cpu_unconfigure(cpuid); 2008 2009 return (retval); 2010 } 2011 2012 /* 2013 * Routine used to cleanup a CPU that has been powered off. This will 2014 * destroy all per-cpu information related to this cpu. 2015 */ 2016 int 2017 cpu_unconfigure(int cpuid) 2018 { 2019 int error; 2020 2021 ASSERT(MUTEX_HELD(&cpu_lock)); 2022 2023 if (cpu[cpuid] == NULL) { 2024 return (ENODEV); 2025 } 2026 2027 if (cpu[cpuid]->cpu_flags == 0) { 2028 return (EALREADY); 2029 } 2030 2031 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) { 2032 return (EBUSY); 2033 } 2034 2035 if (cpu[cpuid]->cpu_props != NULL) { 2036 (void) nvlist_free(cpu[cpuid]->cpu_props); 2037 cpu[cpuid]->cpu_props = NULL; 2038 } 2039 2040 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG); 2041 2042 if (error != 0) 2043 return (error); 2044 2045 return (mp_cpu_unconfigure(cpuid)); 2046 } 2047 2048 /* 2049 * Routines for registering and de-registering cpu_setup callback functions. 2050 * 2051 * Caller's context 2052 * These routines must not be called from a driver's attach(9E) or 2053 * detach(9E) entry point. 2054 * 2055 * NOTE: CPU callbacks should not block. They are called with cpu_lock held. 2056 */ 2057 2058 /* 2059 * Ideally, these would be dynamically allocated and put into a linked 2060 * list; however that is not feasible because the registration routine 2061 * has to be available before the kmem allocator is working (in fact, 2062 * it is called by the kmem allocator init code). In any case, there 2063 * are quite a few extra entries for future users. 2064 */ 2065 #define NCPU_SETUPS 20 2066 2067 struct cpu_setup { 2068 cpu_setup_func_t *func; 2069 void *arg; 2070 } cpu_setups[NCPU_SETUPS]; 2071 2072 void 2073 register_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2074 { 2075 int i; 2076 2077 ASSERT(MUTEX_HELD(&cpu_lock)); 2078 2079 for (i = 0; i < NCPU_SETUPS; i++) 2080 if (cpu_setups[i].func == NULL) 2081 break; 2082 if (i >= NCPU_SETUPS) 2083 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries"); 2084 2085 cpu_setups[i].func = func; 2086 cpu_setups[i].arg = arg; 2087 } 2088 2089 void 2090 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2091 { 2092 int i; 2093 2094 ASSERT(MUTEX_HELD(&cpu_lock)); 2095 2096 for (i = 0; i < NCPU_SETUPS; i++) 2097 if ((cpu_setups[i].func == func) && 2098 (cpu_setups[i].arg == arg)) 2099 break; 2100 if (i >= NCPU_SETUPS) 2101 cmn_err(CE_PANIC, "Could not find cpu_setup callback to " 2102 "deregister"); 2103 2104 cpu_setups[i].func = NULL; 2105 cpu_setups[i].arg = 0; 2106 } 2107 2108 /* 2109 * Call any state change hooks for this CPU, ignore any errors. 2110 */ 2111 void 2112 cpu_state_change_notify(int id, cpu_setup_t what) 2113 { 2114 int i; 2115 2116 ASSERT(MUTEX_HELD(&cpu_lock)); 2117 2118 for (i = 0; i < NCPU_SETUPS; i++) { 2119 if (cpu_setups[i].func != NULL) { 2120 cpu_setups[i].func(what, id, cpu_setups[i].arg); 2121 } 2122 } 2123 } 2124 2125 /* 2126 * Call any state change hooks for this CPU, undo it if error found. 2127 */ 2128 static int 2129 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo) 2130 { 2131 int i; 2132 int retval = 0; 2133 2134 ASSERT(MUTEX_HELD(&cpu_lock)); 2135 2136 for (i = 0; i < NCPU_SETUPS; i++) { 2137 if (cpu_setups[i].func != NULL) { 2138 retval = cpu_setups[i].func(what, id, 2139 cpu_setups[i].arg); 2140 if (retval) { 2141 for (i--; i >= 0; i--) { 2142 if (cpu_setups[i].func != NULL) 2143 cpu_setups[i].func(undo, 2144 id, cpu_setups[i].arg); 2145 } 2146 break; 2147 } 2148 } 2149 } 2150 return (retval); 2151 } 2152 2153 /* 2154 * Export information about this CPU via the kstat mechanism. 2155 */ 2156 static struct { 2157 kstat_named_t ci_state; 2158 kstat_named_t ci_state_begin; 2159 kstat_named_t ci_cpu_type; 2160 kstat_named_t ci_fpu_type; 2161 kstat_named_t ci_clock_MHz; 2162 kstat_named_t ci_chip_id; 2163 kstat_named_t ci_implementation; 2164 kstat_named_t ci_brandstr; 2165 kstat_named_t ci_core_id; 2166 kstat_named_t ci_curr_clock_Hz; 2167 kstat_named_t ci_supp_freq_Hz; 2168 kstat_named_t ci_pg_id; 2169 #if defined(__sparcv9) 2170 kstat_named_t ci_device_ID; 2171 kstat_named_t ci_cpu_fru; 2172 #endif 2173 #if defined(__x86) 2174 kstat_named_t ci_vendorstr; 2175 kstat_named_t ci_family; 2176 kstat_named_t ci_model; 2177 kstat_named_t ci_step; 2178 kstat_named_t ci_clogid; 2179 kstat_named_t ci_pkg_core_id; 2180 kstat_named_t ci_ncpuperchip; 2181 kstat_named_t ci_ncoreperchip; 2182 kstat_named_t ci_max_cstates; 2183 kstat_named_t ci_curr_cstate; 2184 kstat_named_t ci_cacheid; 2185 kstat_named_t ci_sktstr; 2186 #endif 2187 } cpu_info_template = { 2188 { "state", KSTAT_DATA_CHAR }, 2189 { "state_begin", KSTAT_DATA_LONG }, 2190 { "cpu_type", KSTAT_DATA_CHAR }, 2191 { "fpu_type", KSTAT_DATA_CHAR }, 2192 { "clock_MHz", KSTAT_DATA_LONG }, 2193 { "chip_id", KSTAT_DATA_LONG }, 2194 { "implementation", KSTAT_DATA_STRING }, 2195 { "brand", KSTAT_DATA_STRING }, 2196 { "core_id", KSTAT_DATA_LONG }, 2197 { "current_clock_Hz", KSTAT_DATA_UINT64 }, 2198 { "supported_frequencies_Hz", KSTAT_DATA_STRING }, 2199 { "pg_id", KSTAT_DATA_LONG }, 2200 #if defined(__sparcv9) 2201 { "device_ID", KSTAT_DATA_UINT64 }, 2202 { "cpu_fru", KSTAT_DATA_STRING }, 2203 #endif 2204 #if defined(__x86) 2205 { "vendor_id", KSTAT_DATA_STRING }, 2206 { "family", KSTAT_DATA_INT32 }, 2207 { "model", KSTAT_DATA_INT32 }, 2208 { "stepping", KSTAT_DATA_INT32 }, 2209 { "clog_id", KSTAT_DATA_INT32 }, 2210 { "pkg_core_id", KSTAT_DATA_LONG }, 2211 { "ncpu_per_chip", KSTAT_DATA_INT32 }, 2212 { "ncore_per_chip", KSTAT_DATA_INT32 }, 2213 { "supported_max_cstates", KSTAT_DATA_INT32 }, 2214 { "current_cstate", KSTAT_DATA_INT32 }, 2215 { "cache_id", KSTAT_DATA_INT32 }, 2216 { "socket_type", KSTAT_DATA_STRING }, 2217 #endif 2218 }; 2219 2220 static kmutex_t cpu_info_template_lock; 2221 2222 static int 2223 cpu_info_kstat_update(kstat_t *ksp, int rw) 2224 { 2225 cpu_t *cp = ksp->ks_private; 2226 const char *pi_state; 2227 2228 if (rw == KSTAT_WRITE) 2229 return (EACCES); 2230 2231 #if defined(__x86) 2232 /* Is the cpu still initialising itself? */ 2233 if (cpuid_checkpass(cp, 1) == 0) 2234 return (ENXIO); 2235 #endif 2236 switch (cp->cpu_type_info.pi_state) { 2237 case P_ONLINE: 2238 pi_state = PS_ONLINE; 2239 break; 2240 case P_POWEROFF: 2241 pi_state = PS_POWEROFF; 2242 break; 2243 case P_NOINTR: 2244 pi_state = PS_NOINTR; 2245 break; 2246 case P_FAULTED: 2247 pi_state = PS_FAULTED; 2248 break; 2249 case P_SPARE: 2250 pi_state = PS_SPARE; 2251 break; 2252 case P_OFFLINE: 2253 pi_state = PS_OFFLINE; 2254 break; 2255 default: 2256 pi_state = "unknown"; 2257 } 2258 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state); 2259 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin; 2260 (void) strncpy(cpu_info_template.ci_cpu_type.value.c, 2261 cp->cpu_type_info.pi_processor_type, 15); 2262 (void) strncpy(cpu_info_template.ci_fpu_type.value.c, 2263 cp->cpu_type_info.pi_fputypes, 15); 2264 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock; 2265 cpu_info_template.ci_chip_id.value.l = 2266 pg_plat_hw_instance_id(cp, PGHW_CHIP); 2267 kstat_named_setstr(&cpu_info_template.ci_implementation, 2268 cp->cpu_idstr); 2269 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr); 2270 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp); 2271 cpu_info_template.ci_curr_clock_Hz.value.ui64 = 2272 cp->cpu_curr_clock; 2273 cpu_info_template.ci_pg_id.value.l = 2274 cp->cpu_pg && cp->cpu_pg->cmt_lineage ? 2275 cp->cpu_pg->cmt_lineage->pg_id : -1; 2276 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz, 2277 cp->cpu_supp_freqs); 2278 #if defined(__sparcv9) 2279 cpu_info_template.ci_device_ID.value.ui64 = 2280 cpunodes[cp->cpu_id].device_id; 2281 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp)); 2282 #endif 2283 #if defined(__x86) 2284 kstat_named_setstr(&cpu_info_template.ci_vendorstr, 2285 cpuid_getvendorstr(cp)); 2286 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp); 2287 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp); 2288 cpu_info_template.ci_step.value.l = cpuid_getstep(cp); 2289 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp); 2290 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp); 2291 cpu_info_template.ci_ncoreperchip.value.l = 2292 cpuid_get_ncore_per_chip(cp); 2293 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp); 2294 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates; 2295 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp); 2296 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp); 2297 kstat_named_setstr(&cpu_info_template.ci_sktstr, 2298 cpuid_getsocketstr(cp)); 2299 #endif 2300 2301 return (0); 2302 } 2303 2304 static void 2305 cpu_info_kstat_create(cpu_t *cp) 2306 { 2307 zoneid_t zoneid; 2308 2309 ASSERT(MUTEX_HELD(&cpu_lock)); 2310 2311 if (pool_pset_enabled()) 2312 zoneid = GLOBAL_ZONEID; 2313 else 2314 zoneid = ALL_ZONES; 2315 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id, 2316 NULL, "misc", KSTAT_TYPE_NAMED, 2317 sizeof (cpu_info_template) / sizeof (kstat_named_t), 2318 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) { 2319 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN; 2320 #if defined(__sparcv9) 2321 cp->cpu_info_kstat->ks_data_size += 2322 strlen(cpu_fru_fmri(cp)) + 1; 2323 #endif 2324 #if defined(__x86) 2325 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN; 2326 #endif 2327 if (cp->cpu_supp_freqs != NULL) 2328 cp->cpu_info_kstat->ks_data_size += 2329 strlen(cp->cpu_supp_freqs) + 1; 2330 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock; 2331 cp->cpu_info_kstat->ks_data = &cpu_info_template; 2332 cp->cpu_info_kstat->ks_private = cp; 2333 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update; 2334 kstat_install(cp->cpu_info_kstat); 2335 } 2336 } 2337 2338 static void 2339 cpu_info_kstat_destroy(cpu_t *cp) 2340 { 2341 ASSERT(MUTEX_HELD(&cpu_lock)); 2342 2343 kstat_delete(cp->cpu_info_kstat); 2344 cp->cpu_info_kstat = NULL; 2345 } 2346 2347 /* 2348 * Create and install kstats for the boot CPU. 2349 */ 2350 void 2351 cpu_kstat_init(cpu_t *cp) 2352 { 2353 mutex_enter(&cpu_lock); 2354 cpu_info_kstat_create(cp); 2355 cpu_stats_kstat_create(cp); 2356 cpu_create_intrstat(cp); 2357 cpu_set_state(cp); 2358 mutex_exit(&cpu_lock); 2359 } 2360 2361 /* 2362 * Make visible to the zone that subset of the cpu information that would be 2363 * initialized when a cpu is configured (but still offline). 2364 */ 2365 void 2366 cpu_visibility_configure(cpu_t *cp, zone_t *zone) 2367 { 2368 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2369 2370 ASSERT(MUTEX_HELD(&cpu_lock)); 2371 ASSERT(pool_pset_enabled()); 2372 ASSERT(cp != NULL); 2373 2374 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2375 zone->zone_ncpus++; 2376 ASSERT(zone->zone_ncpus <= ncpus); 2377 } 2378 if (cp->cpu_info_kstat != NULL) 2379 kstat_zone_add(cp->cpu_info_kstat, zoneid); 2380 } 2381 2382 /* 2383 * Make visible to the zone that subset of the cpu information that would be 2384 * initialized when a previously configured cpu is onlined. 2385 */ 2386 void 2387 cpu_visibility_online(cpu_t *cp, zone_t *zone) 2388 { 2389 kstat_t *ksp; 2390 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2391 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2392 processorid_t cpun; 2393 2394 ASSERT(MUTEX_HELD(&cpu_lock)); 2395 ASSERT(pool_pset_enabled()); 2396 ASSERT(cp != NULL); 2397 ASSERT(cpu_is_active(cp)); 2398 2399 cpun = cp->cpu_id; 2400 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2401 zone->zone_ncpus_online++; 2402 ASSERT(zone->zone_ncpus_online <= ncpus_online); 2403 } 2404 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2405 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2406 != NULL) { 2407 kstat_zone_add(ksp, zoneid); 2408 kstat_rele(ksp); 2409 } 2410 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2411 kstat_zone_add(ksp, zoneid); 2412 kstat_rele(ksp); 2413 } 2414 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2415 kstat_zone_add(ksp, zoneid); 2416 kstat_rele(ksp); 2417 } 2418 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2419 NULL) { 2420 kstat_zone_add(ksp, zoneid); 2421 kstat_rele(ksp); 2422 } 2423 } 2424 2425 /* 2426 * Update relevant kstats such that cpu is now visible to processes 2427 * executing in specified zone. 2428 */ 2429 void 2430 cpu_visibility_add(cpu_t *cp, zone_t *zone) 2431 { 2432 cpu_visibility_configure(cp, zone); 2433 if (cpu_is_active(cp)) 2434 cpu_visibility_online(cp, zone); 2435 } 2436 2437 /* 2438 * Make invisible to the zone that subset of the cpu information that would be 2439 * torn down when a previously offlined cpu is unconfigured. 2440 */ 2441 void 2442 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone) 2443 { 2444 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2445 2446 ASSERT(MUTEX_HELD(&cpu_lock)); 2447 ASSERT(pool_pset_enabled()); 2448 ASSERT(cp != NULL); 2449 2450 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2451 ASSERT(zone->zone_ncpus != 0); 2452 zone->zone_ncpus--; 2453 } 2454 if (cp->cpu_info_kstat) 2455 kstat_zone_remove(cp->cpu_info_kstat, zoneid); 2456 } 2457 2458 /* 2459 * Make invisible to the zone that subset of the cpu information that would be 2460 * torn down when a cpu is offlined (but still configured). 2461 */ 2462 void 2463 cpu_visibility_offline(cpu_t *cp, zone_t *zone) 2464 { 2465 kstat_t *ksp; 2466 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2467 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2468 processorid_t cpun; 2469 2470 ASSERT(MUTEX_HELD(&cpu_lock)); 2471 ASSERT(pool_pset_enabled()); 2472 ASSERT(cp != NULL); 2473 ASSERT(cpu_is_active(cp)); 2474 2475 cpun = cp->cpu_id; 2476 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2477 ASSERT(zone->zone_ncpus_online != 0); 2478 zone->zone_ncpus_online--; 2479 } 2480 2481 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2482 NULL) { 2483 kstat_zone_remove(ksp, zoneid); 2484 kstat_rele(ksp); 2485 } 2486 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2487 kstat_zone_remove(ksp, zoneid); 2488 kstat_rele(ksp); 2489 } 2490 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2491 kstat_zone_remove(ksp, zoneid); 2492 kstat_rele(ksp); 2493 } 2494 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2495 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2496 != NULL) { 2497 kstat_zone_remove(ksp, zoneid); 2498 kstat_rele(ksp); 2499 } 2500 } 2501 2502 /* 2503 * Update relevant kstats such that cpu is no longer visible to processes 2504 * executing in specified zone. 2505 */ 2506 void 2507 cpu_visibility_remove(cpu_t *cp, zone_t *zone) 2508 { 2509 if (cpu_is_active(cp)) 2510 cpu_visibility_offline(cp, zone); 2511 cpu_visibility_unconfigure(cp, zone); 2512 } 2513 2514 /* 2515 * Bind a thread to a CPU as requested. 2516 */ 2517 int 2518 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, 2519 int *error) 2520 { 2521 processorid_t binding; 2522 cpu_t *cp = NULL; 2523 2524 ASSERT(MUTEX_HELD(&cpu_lock)); 2525 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 2526 2527 thread_lock(tp); 2528 2529 /* 2530 * Record old binding, but change the obind, which was initialized 2531 * to PBIND_NONE, only if this thread has a binding. This avoids 2532 * reporting PBIND_NONE for a process when some LWPs are bound. 2533 */ 2534 binding = tp->t_bind_cpu; 2535 if (binding != PBIND_NONE) 2536 *obind = binding; /* record old binding */ 2537 2538 switch (bind) { 2539 case PBIND_QUERY: 2540 /* Just return the old binding */ 2541 thread_unlock(tp); 2542 return (0); 2543 2544 case PBIND_QUERY_TYPE: 2545 /* Return the binding type */ 2546 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD; 2547 thread_unlock(tp); 2548 return (0); 2549 2550 case PBIND_SOFT: 2551 /* 2552 * Set soft binding for this thread and return the actual 2553 * binding 2554 */ 2555 TB_CPU_SOFT_SET(tp); 2556 thread_unlock(tp); 2557 return (0); 2558 2559 case PBIND_HARD: 2560 /* 2561 * Set hard binding for this thread and return the actual 2562 * binding 2563 */ 2564 TB_CPU_HARD_SET(tp); 2565 thread_unlock(tp); 2566 return (0); 2567 2568 default: 2569 break; 2570 } 2571 2572 /* 2573 * If this thread/LWP cannot be bound because of permission 2574 * problems, just note that and return success so that the 2575 * other threads/LWPs will be bound. This is the way 2576 * processor_bind() is defined to work. 2577 * 2578 * Binding will get EPERM if the thread is of system class 2579 * or hasprocperm() fails. 2580 */ 2581 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) { 2582 *error = EPERM; 2583 thread_unlock(tp); 2584 return (0); 2585 } 2586 2587 binding = bind; 2588 if (binding != PBIND_NONE) { 2589 cp = cpu_get((processorid_t)binding); 2590 /* 2591 * Make sure binding is valid and is in right partition. 2592 */ 2593 if (cp == NULL || tp->t_cpupart != cp->cpu_part) { 2594 *error = EINVAL; 2595 thread_unlock(tp); 2596 return (0); 2597 } 2598 } 2599 tp->t_bind_cpu = binding; /* set new binding */ 2600 2601 /* 2602 * If there is no system-set reason for affinity, set 2603 * the t_bound_cpu field to reflect the binding. 2604 */ 2605 if (tp->t_affinitycnt == 0) { 2606 if (binding == PBIND_NONE) { 2607 /* 2608 * We may need to adjust disp_max_unbound_pri 2609 * since we're becoming unbound. 2610 */ 2611 disp_adjust_unbound_pri(tp); 2612 2613 tp->t_bound_cpu = NULL; /* set new binding */ 2614 2615 /* 2616 * Move thread to lgroup with strongest affinity 2617 * after unbinding 2618 */ 2619 if (tp->t_lgrp_affinity) 2620 lgrp_move_thread(tp, 2621 lgrp_choose(tp, tp->t_cpupart), 1); 2622 2623 if (tp->t_state == TS_ONPROC && 2624 tp->t_cpu->cpu_part != tp->t_cpupart) 2625 cpu_surrender(tp); 2626 } else { 2627 lpl_t *lpl; 2628 2629 tp->t_bound_cpu = cp; 2630 ASSERT(cp->cpu_lpl != NULL); 2631 2632 /* 2633 * Set home to lgroup with most affinity containing CPU 2634 * that thread is being bound or minimum bounding 2635 * lgroup if no affinities set 2636 */ 2637 if (tp->t_lgrp_affinity) 2638 lpl = lgrp_affinity_best(tp, tp->t_cpupart, 2639 LGRP_NONE, B_FALSE); 2640 else 2641 lpl = cp->cpu_lpl; 2642 2643 if (tp->t_lpl != lpl) { 2644 /* can't grab cpu_lock */ 2645 lgrp_move_thread(tp, lpl, 1); 2646 } 2647 2648 /* 2649 * Make the thread switch to the bound CPU. 2650 * If the thread is runnable, we need to 2651 * requeue it even if t_cpu is already set 2652 * to the right CPU, since it may be on a 2653 * kpreempt queue and need to move to a local 2654 * queue. We could check t_disp_queue to 2655 * avoid unnecessary overhead if it's already 2656 * on the right queue, but since this isn't 2657 * a performance-critical operation it doesn't 2658 * seem worth the extra code and complexity. 2659 * 2660 * If the thread is weakbound to the cpu then it will 2661 * resist the new binding request until the weak 2662 * binding drops. The cpu_surrender or requeueing 2663 * below could be skipped in such cases (since it 2664 * will have no effect), but that would require 2665 * thread_allowmigrate to acquire thread_lock so 2666 * we'll take the very occasional hit here instead. 2667 */ 2668 if (tp->t_state == TS_ONPROC) { 2669 cpu_surrender(tp); 2670 } else if (tp->t_state == TS_RUN) { 2671 cpu_t *ocp = tp->t_cpu; 2672 2673 (void) dispdeq(tp); 2674 setbackdq(tp); 2675 /* 2676 * On the bound CPU's disp queue now. 2677 */ 2678 ASSERT(tp->t_disp_queue == cp->cpu_disp || 2679 tp->t_weakbound_cpu == ocp); 2680 } 2681 } 2682 } 2683 2684 /* 2685 * Our binding has changed; set TP_CHANGEBIND. 2686 */ 2687 tp->t_proc_flag |= TP_CHANGEBIND; 2688 aston(tp); 2689 2690 thread_unlock(tp); 2691 2692 return (0); 2693 } 2694 2695 #if CPUSET_WORDS > 1 2696 2697 /* 2698 * Functions for implementing cpuset operations when a cpuset is more 2699 * than one word. On platforms where a cpuset is a single word these 2700 * are implemented as macros in cpuvar.h. 2701 */ 2702 2703 void 2704 cpuset_all(cpuset_t *s) 2705 { 2706 int i; 2707 2708 for (i = 0; i < CPUSET_WORDS; i++) 2709 s->cpub[i] = ~0UL; 2710 } 2711 2712 void 2713 cpuset_all_but(cpuset_t *s, uint_t cpu) 2714 { 2715 cpuset_all(s); 2716 CPUSET_DEL(*s, cpu); 2717 } 2718 2719 void 2720 cpuset_only(cpuset_t *s, uint_t cpu) 2721 { 2722 CPUSET_ZERO(*s); 2723 CPUSET_ADD(*s, cpu); 2724 } 2725 2726 int 2727 cpuset_isnull(cpuset_t *s) 2728 { 2729 int i; 2730 2731 for (i = 0; i < CPUSET_WORDS; i++) 2732 if (s->cpub[i] != 0) 2733 return (0); 2734 return (1); 2735 } 2736 2737 int 2738 cpuset_cmp(cpuset_t *s1, cpuset_t *s2) 2739 { 2740 int i; 2741 2742 for (i = 0; i < CPUSET_WORDS; i++) 2743 if (s1->cpub[i] != s2->cpub[i]) 2744 return (0); 2745 return (1); 2746 } 2747 2748 uint_t 2749 cpuset_find(cpuset_t *s) 2750 { 2751 2752 uint_t i; 2753 uint_t cpu = (uint_t)-1; 2754 2755 /* 2756 * Find a cpu in the cpuset 2757 */ 2758 for (i = 0; i < CPUSET_WORDS; i++) { 2759 cpu = (uint_t)(lowbit(s->cpub[i]) - 1); 2760 if (cpu != (uint_t)-1) { 2761 cpu += i * BT_NBIPUL; 2762 break; 2763 } 2764 } 2765 return (cpu); 2766 } 2767 2768 void 2769 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid) 2770 { 2771 int i, j; 2772 uint_t bit; 2773 2774 /* 2775 * First, find the smallest cpu id in the set. 2776 */ 2777 for (i = 0; i < CPUSET_WORDS; i++) { 2778 if (s->cpub[i] != 0) { 2779 bit = (uint_t)(lowbit(s->cpub[i]) - 1); 2780 ASSERT(bit != (uint_t)-1); 2781 *smallestid = bit + (i * BT_NBIPUL); 2782 2783 /* 2784 * Now find the largest cpu id in 2785 * the set and return immediately. 2786 * Done in an inner loop to avoid 2787 * having to break out of the first 2788 * loop. 2789 */ 2790 for (j = CPUSET_WORDS - 1; j >= i; j--) { 2791 if (s->cpub[j] != 0) { 2792 bit = (uint_t)(highbit(s->cpub[j]) - 1); 2793 ASSERT(bit != (uint_t)-1); 2794 *largestid = bit + (j * BT_NBIPUL); 2795 ASSERT(*largestid >= *smallestid); 2796 return; 2797 } 2798 } 2799 2800 /* 2801 * If this code is reached, a 2802 * smallestid was found, but not a 2803 * largestid. The cpuset must have 2804 * been changed during the course 2805 * of this function call. 2806 */ 2807 ASSERT(0); 2808 } 2809 } 2810 *smallestid = *largestid = CPUSET_NOTINSET; 2811 } 2812 2813 #endif /* CPUSET_WORDS */ 2814 2815 /* 2816 * Unbind threads bound to specified CPU. 2817 * 2818 * If `unbind_all_threads' is true, unbind all user threads bound to a given 2819 * CPU. Otherwise unbind all soft-bound user threads. 2820 */ 2821 int 2822 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads) 2823 { 2824 processorid_t obind; 2825 kthread_t *tp; 2826 int ret = 0; 2827 proc_t *pp; 2828 int err, berr = 0; 2829 2830 ASSERT(MUTEX_HELD(&cpu_lock)); 2831 2832 mutex_enter(&pidlock); 2833 for (pp = practive; pp != NULL; pp = pp->p_next) { 2834 mutex_enter(&pp->p_lock); 2835 tp = pp->p_tlist; 2836 /* 2837 * Skip zombies, kernel processes, and processes in 2838 * other zones, if called from a non-global zone. 2839 */ 2840 if (tp == NULL || (pp->p_flag & SSYS) || 2841 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 2842 mutex_exit(&pp->p_lock); 2843 continue; 2844 } 2845 do { 2846 if (tp->t_bind_cpu != cpu) 2847 continue; 2848 /* 2849 * Skip threads with hard binding when 2850 * `unbind_all_threads' is not specified. 2851 */ 2852 if (!unbind_all_threads && TB_CPU_IS_HARD(tp)) 2853 continue; 2854 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr); 2855 if (ret == 0) 2856 ret = err; 2857 } while ((tp = tp->t_forw) != pp->p_tlist); 2858 mutex_exit(&pp->p_lock); 2859 } 2860 mutex_exit(&pidlock); 2861 if (ret == 0) 2862 ret = berr; 2863 return (ret); 2864 } 2865 2866 2867 /* 2868 * Destroy all remaining bound threads on a cpu. 2869 */ 2870 void 2871 cpu_destroy_bound_threads(cpu_t *cp) 2872 { 2873 extern id_t syscid; 2874 register kthread_id_t t, tlist, tnext; 2875 2876 /* 2877 * Destroy all remaining bound threads on the cpu. This 2878 * should include both the interrupt threads and the idle thread. 2879 * This requires some care, since we need to traverse the 2880 * thread list with the pidlock mutex locked, but thread_free 2881 * also locks the pidlock mutex. So, we collect the threads 2882 * we're going to reap in a list headed by "tlist", then we 2883 * unlock the pidlock mutex and traverse the tlist list, 2884 * doing thread_free's on the thread's. Simple, n'est pas? 2885 * Also, this depends on thread_free not mucking with the 2886 * t_next and t_prev links of the thread. 2887 */ 2888 2889 if ((t = curthread) != NULL) { 2890 2891 tlist = NULL; 2892 mutex_enter(&pidlock); 2893 do { 2894 tnext = t->t_next; 2895 if (t->t_bound_cpu == cp) { 2896 2897 /* 2898 * We've found a bound thread, carefully unlink 2899 * it out of the thread list, and add it to 2900 * our "tlist". We "know" we don't have to 2901 * worry about unlinking curthread (the thread 2902 * that is executing this code). 2903 */ 2904 t->t_next->t_prev = t->t_prev; 2905 t->t_prev->t_next = t->t_next; 2906 t->t_next = tlist; 2907 tlist = t; 2908 ASSERT(t->t_cid == syscid); 2909 /* wake up anyone blocked in thread_join */ 2910 cv_broadcast(&t->t_joincv); 2911 /* 2912 * t_lwp set by interrupt threads and not 2913 * cleared. 2914 */ 2915 t->t_lwp = NULL; 2916 /* 2917 * Pause and idle threads always have 2918 * t_state set to TS_ONPROC. 2919 */ 2920 t->t_state = TS_FREE; 2921 t->t_prev = NULL; /* Just in case */ 2922 } 2923 2924 } while ((t = tnext) != curthread); 2925 2926 mutex_exit(&pidlock); 2927 2928 mutex_sync(); 2929 for (t = tlist; t != NULL; t = tnext) { 2930 tnext = t->t_next; 2931 thread_free(t); 2932 } 2933 } 2934 } 2935 2936 /* 2937 * Update the cpu_supp_freqs of this cpu. This information is returned 2938 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then 2939 * maintain the kstat data size. 2940 */ 2941 void 2942 cpu_set_supp_freqs(cpu_t *cp, const char *freqs) 2943 { 2944 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */ 2945 const char *lfreqs = clkstr; 2946 boolean_t kstat_exists = B_FALSE; 2947 kstat_t *ksp; 2948 size_t len; 2949 2950 /* 2951 * A NULL pointer means we only support one speed. 2952 */ 2953 if (freqs == NULL) 2954 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64, 2955 cp->cpu_curr_clock); 2956 else 2957 lfreqs = freqs; 2958 2959 /* 2960 * Make sure the frequency doesn't change while a snapshot is 2961 * going on. Of course, we only need to worry about this if 2962 * the kstat exists. 2963 */ 2964 if ((ksp = cp->cpu_info_kstat) != NULL) { 2965 mutex_enter(ksp->ks_lock); 2966 kstat_exists = B_TRUE; 2967 } 2968 2969 /* 2970 * Free any previously allocated string and if the kstat 2971 * already exists, then update its data size. 2972 */ 2973 if (cp->cpu_supp_freqs != NULL) { 2974 len = strlen(cp->cpu_supp_freqs) + 1; 2975 kmem_free(cp->cpu_supp_freqs, len); 2976 if (kstat_exists) 2977 ksp->ks_data_size -= len; 2978 } 2979 2980 /* 2981 * Allocate the new string and set the pointer. 2982 */ 2983 len = strlen(lfreqs) + 1; 2984 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP); 2985 (void) strcpy(cp->cpu_supp_freqs, lfreqs); 2986 2987 /* 2988 * If the kstat already exists then update the data size and 2989 * free the lock. 2990 */ 2991 if (kstat_exists) { 2992 ksp->ks_data_size += len; 2993 mutex_exit(ksp->ks_lock); 2994 } 2995 } 2996 2997 /* 2998 * Indicate the current CPU's clock freqency (in Hz). 2999 * The calling context must be such that CPU references are safe. 3000 */ 3001 void 3002 cpu_set_curr_clock(uint64_t new_clk) 3003 { 3004 uint64_t old_clk; 3005 3006 old_clk = CPU->cpu_curr_clock; 3007 CPU->cpu_curr_clock = new_clk; 3008 3009 /* 3010 * The cpu-change-speed DTrace probe exports the frequency in Hz 3011 */ 3012 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id, 3013 uint64_t, old_clk, uint64_t, new_clk); 3014 } 3015 3016 /* 3017 * processor_info(2) and p_online(2) status support functions 3018 * The constants returned by the cpu_get_state() and cpu_get_state_str() are 3019 * for use in communicating processor state information to userland. Kernel 3020 * subsystems should only be using the cpu_flags value directly. Subsystems 3021 * modifying cpu_flags should record the state change via a call to the 3022 * cpu_set_state(). 3023 */ 3024 3025 /* 3026 * Update the pi_state of this CPU. This function provides the CPU status for 3027 * the information returned by processor_info(2). 3028 */ 3029 void 3030 cpu_set_state(cpu_t *cpu) 3031 { 3032 ASSERT(MUTEX_HELD(&cpu_lock)); 3033 cpu->cpu_type_info.pi_state = cpu_get_state(cpu); 3034 cpu->cpu_state_begin = gethrestime_sec(); 3035 pool_cpu_mod = gethrtime(); 3036 } 3037 3038 /* 3039 * Return offline/online/other status for the indicated CPU. Use only for 3040 * communication with user applications; cpu_flags provides the in-kernel 3041 * interface. 3042 */ 3043 int 3044 cpu_get_state(cpu_t *cpu) 3045 { 3046 ASSERT(MUTEX_HELD(&cpu_lock)); 3047 if (cpu->cpu_flags & CPU_POWEROFF) 3048 return (P_POWEROFF); 3049 else if (cpu->cpu_flags & CPU_FAULTED) 3050 return (P_FAULTED); 3051 else if (cpu->cpu_flags & CPU_SPARE) 3052 return (P_SPARE); 3053 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY) 3054 return (P_OFFLINE); 3055 else if (cpu->cpu_flags & CPU_ENABLE) 3056 return (P_ONLINE); 3057 else 3058 return (P_NOINTR); 3059 } 3060 3061 /* 3062 * Return processor_info(2) state as a string. 3063 */ 3064 const char * 3065 cpu_get_state_str(cpu_t *cpu) 3066 { 3067 const char *string; 3068 3069 switch (cpu_get_state(cpu)) { 3070 case P_ONLINE: 3071 string = PS_ONLINE; 3072 break; 3073 case P_POWEROFF: 3074 string = PS_POWEROFF; 3075 break; 3076 case P_NOINTR: 3077 string = PS_NOINTR; 3078 break; 3079 case P_SPARE: 3080 string = PS_SPARE; 3081 break; 3082 case P_FAULTED: 3083 string = PS_FAULTED; 3084 break; 3085 case P_OFFLINE: 3086 string = PS_OFFLINE; 3087 break; 3088 default: 3089 string = "unknown"; 3090 break; 3091 } 3092 return (string); 3093 } 3094 3095 /* 3096 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named 3097 * kstats, respectively. This is done when a CPU is initialized or placed 3098 * online via p_online(2). 3099 */ 3100 static void 3101 cpu_stats_kstat_create(cpu_t *cp) 3102 { 3103 int instance = cp->cpu_id; 3104 char *module = "cpu"; 3105 char *class = "misc"; 3106 kstat_t *ksp; 3107 zoneid_t zoneid; 3108 3109 ASSERT(MUTEX_HELD(&cpu_lock)); 3110 3111 if (pool_pset_enabled()) 3112 zoneid = GLOBAL_ZONEID; 3113 else 3114 zoneid = ALL_ZONES; 3115 /* 3116 * Create named kstats 3117 */ 3118 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \ 3119 ksp = kstat_create_zone(module, instance, (name), class, \ 3120 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \ 3121 zoneid); \ 3122 if (ksp != NULL) { \ 3123 ksp->ks_private = cp; \ 3124 ksp->ks_update = (update_func); \ 3125 kstat_install(ksp); \ 3126 } else \ 3127 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \ 3128 module, instance, (name)); 3129 3130 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template), 3131 cpu_sys_stats_ks_update); 3132 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template), 3133 cpu_vm_stats_ks_update); 3134 3135 /* 3136 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat. 3137 */ 3138 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL, 3139 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid); 3140 if (ksp != NULL) { 3141 ksp->ks_update = cpu_stat_ks_update; 3142 ksp->ks_private = cp; 3143 kstat_install(ksp); 3144 } 3145 } 3146 3147 static void 3148 cpu_stats_kstat_destroy(cpu_t *cp) 3149 { 3150 char ks_name[KSTAT_STRLEN]; 3151 3152 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id); 3153 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name); 3154 3155 kstat_delete_byname("cpu", cp->cpu_id, "sys"); 3156 kstat_delete_byname("cpu", cp->cpu_id, "vm"); 3157 } 3158 3159 static int 3160 cpu_sys_stats_ks_update(kstat_t *ksp, int rw) 3161 { 3162 cpu_t *cp = (cpu_t *)ksp->ks_private; 3163 struct cpu_sys_stats_ks_data *csskd; 3164 cpu_sys_stats_t *css; 3165 hrtime_t msnsecs[NCMSTATES]; 3166 int i; 3167 3168 if (rw == KSTAT_WRITE) 3169 return (EACCES); 3170 3171 csskd = ksp->ks_data; 3172 css = &cp->cpu_stats.sys; 3173 3174 /* 3175 * Read CPU mstate, but compare with the last values we 3176 * received to make sure that the returned kstats never 3177 * decrease. 3178 */ 3179 3180 get_cpu_mstate(cp, msnsecs); 3181 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE]) 3182 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64; 3183 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER]) 3184 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64; 3185 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM]) 3186 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64; 3187 3188 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data, 3189 sizeof (cpu_sys_stats_ks_data_template)); 3190 3191 csskd->cpu_ticks_wait.value.ui64 = 0; 3192 csskd->wait_ticks_io.value.ui64 = 0; 3193 3194 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE]; 3195 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER]; 3196 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM]; 3197 csskd->cpu_ticks_idle.value.ui64 = 3198 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64); 3199 csskd->cpu_ticks_user.value.ui64 = 3200 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64); 3201 csskd->cpu_ticks_kernel.value.ui64 = 3202 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64); 3203 csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec; 3204 csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes; 3205 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast; 3206 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload; 3207 csskd->bread.value.ui64 = css->bread; 3208 csskd->bwrite.value.ui64 = css->bwrite; 3209 csskd->lread.value.ui64 = css->lread; 3210 csskd->lwrite.value.ui64 = css->lwrite; 3211 csskd->phread.value.ui64 = css->phread; 3212 csskd->phwrite.value.ui64 = css->phwrite; 3213 csskd->pswitch.value.ui64 = css->pswitch; 3214 csskd->trap.value.ui64 = css->trap; 3215 csskd->intr.value.ui64 = 0; 3216 for (i = 0; i < PIL_MAX; i++) 3217 csskd->intr.value.ui64 += css->intr[i]; 3218 csskd->syscall.value.ui64 = css->syscall; 3219 csskd->sysread.value.ui64 = css->sysread; 3220 csskd->syswrite.value.ui64 = css->syswrite; 3221 csskd->sysfork.value.ui64 = css->sysfork; 3222 csskd->sysvfork.value.ui64 = css->sysvfork; 3223 csskd->sysexec.value.ui64 = css->sysexec; 3224 csskd->readch.value.ui64 = css->readch; 3225 csskd->writech.value.ui64 = css->writech; 3226 csskd->rcvint.value.ui64 = css->rcvint; 3227 csskd->xmtint.value.ui64 = css->xmtint; 3228 csskd->mdmint.value.ui64 = css->mdmint; 3229 csskd->rawch.value.ui64 = css->rawch; 3230 csskd->canch.value.ui64 = css->canch; 3231 csskd->outch.value.ui64 = css->outch; 3232 csskd->msg.value.ui64 = css->msg; 3233 csskd->sema.value.ui64 = css->sema; 3234 csskd->namei.value.ui64 = css->namei; 3235 csskd->ufsiget.value.ui64 = css->ufsiget; 3236 csskd->ufsdirblk.value.ui64 = css->ufsdirblk; 3237 csskd->ufsipage.value.ui64 = css->ufsipage; 3238 csskd->ufsinopage.value.ui64 = css->ufsinopage; 3239 csskd->procovf.value.ui64 = css->procovf; 3240 csskd->intrthread.value.ui64 = 0; 3241 for (i = 0; i < LOCK_LEVEL - 1; i++) 3242 csskd->intrthread.value.ui64 += css->intr[i]; 3243 csskd->intrblk.value.ui64 = css->intrblk; 3244 csskd->intrunpin.value.ui64 = css->intrunpin; 3245 csskd->idlethread.value.ui64 = css->idlethread; 3246 csskd->inv_swtch.value.ui64 = css->inv_swtch; 3247 csskd->nthreads.value.ui64 = css->nthreads; 3248 csskd->cpumigrate.value.ui64 = css->cpumigrate; 3249 csskd->xcalls.value.ui64 = css->xcalls; 3250 csskd->mutex_adenters.value.ui64 = css->mutex_adenters; 3251 csskd->rw_rdfails.value.ui64 = css->rw_rdfails; 3252 csskd->rw_wrfails.value.ui64 = css->rw_wrfails; 3253 csskd->modload.value.ui64 = css->modload; 3254 csskd->modunload.value.ui64 = css->modunload; 3255 csskd->bawrite.value.ui64 = css->bawrite; 3256 csskd->iowait.value.ui64 = css->iowait; 3257 3258 return (0); 3259 } 3260 3261 static int 3262 cpu_vm_stats_ks_update(kstat_t *ksp, int rw) 3263 { 3264 cpu_t *cp = (cpu_t *)ksp->ks_private; 3265 struct cpu_vm_stats_ks_data *cvskd; 3266 cpu_vm_stats_t *cvs; 3267 3268 if (rw == KSTAT_WRITE) 3269 return (EACCES); 3270 3271 cvs = &cp->cpu_stats.vm; 3272 cvskd = ksp->ks_data; 3273 3274 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data, 3275 sizeof (cpu_vm_stats_ks_data_template)); 3276 cvskd->pgrec.value.ui64 = cvs->pgrec; 3277 cvskd->pgfrec.value.ui64 = cvs->pgfrec; 3278 cvskd->pgin.value.ui64 = cvs->pgin; 3279 cvskd->pgpgin.value.ui64 = cvs->pgpgin; 3280 cvskd->pgout.value.ui64 = cvs->pgout; 3281 cvskd->pgpgout.value.ui64 = cvs->pgpgout; 3282 cvskd->zfod.value.ui64 = cvs->zfod; 3283 cvskd->dfree.value.ui64 = cvs->dfree; 3284 cvskd->scan.value.ui64 = cvs->scan; 3285 cvskd->rev.value.ui64 = cvs->rev; 3286 cvskd->hat_fault.value.ui64 = cvs->hat_fault; 3287 cvskd->as_fault.value.ui64 = cvs->as_fault; 3288 cvskd->maj_fault.value.ui64 = cvs->maj_fault; 3289 cvskd->cow_fault.value.ui64 = cvs->cow_fault; 3290 cvskd->prot_fault.value.ui64 = cvs->prot_fault; 3291 cvskd->softlock.value.ui64 = cvs->softlock; 3292 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt; 3293 cvskd->pgrrun.value.ui64 = cvs->pgrrun; 3294 cvskd->execpgin.value.ui64 = cvs->execpgin; 3295 cvskd->execpgout.value.ui64 = cvs->execpgout; 3296 cvskd->execfree.value.ui64 = cvs->execfree; 3297 cvskd->anonpgin.value.ui64 = cvs->anonpgin; 3298 cvskd->anonpgout.value.ui64 = cvs->anonpgout; 3299 cvskd->anonfree.value.ui64 = cvs->anonfree; 3300 cvskd->fspgin.value.ui64 = cvs->fspgin; 3301 cvskd->fspgout.value.ui64 = cvs->fspgout; 3302 cvskd->fsfree.value.ui64 = cvs->fsfree; 3303 3304 return (0); 3305 } 3306 3307 static int 3308 cpu_stat_ks_update(kstat_t *ksp, int rw) 3309 { 3310 cpu_stat_t *cso; 3311 cpu_t *cp; 3312 int i; 3313 hrtime_t msnsecs[NCMSTATES]; 3314 3315 cso = (cpu_stat_t *)ksp->ks_data; 3316 cp = (cpu_t *)ksp->ks_private; 3317 3318 if (rw == KSTAT_WRITE) 3319 return (EACCES); 3320 3321 /* 3322 * Read CPU mstate, but compare with the last values we 3323 * received to make sure that the returned kstats never 3324 * decrease. 3325 */ 3326 3327 get_cpu_mstate(cp, msnsecs); 3328 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]); 3329 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]); 3330 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]); 3331 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE]) 3332 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE]; 3333 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER]) 3334 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; 3335 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) 3336 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; 3337 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; 3338 cso->cpu_sysinfo.wait[W_IO] = 0; 3339 cso->cpu_sysinfo.wait[W_SWAP] = 0; 3340 cso->cpu_sysinfo.wait[W_PIO] = 0; 3341 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); 3342 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); 3343 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); 3344 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); 3345 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); 3346 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); 3347 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); 3348 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); 3349 cso->cpu_sysinfo.intr = 0; 3350 for (i = 0; i < PIL_MAX; i++) 3351 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); 3352 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall); 3353 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread); 3354 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite); 3355 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork); 3356 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork); 3357 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec); 3358 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch); 3359 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech); 3360 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint); 3361 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint); 3362 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint); 3363 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch); 3364 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch); 3365 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch); 3366 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg); 3367 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema); 3368 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei); 3369 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget); 3370 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk); 3371 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage); 3372 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage); 3373 cso->cpu_sysinfo.inodeovf = 0; 3374 cso->cpu_sysinfo.fileovf = 0; 3375 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf); 3376 cso->cpu_sysinfo.intrthread = 0; 3377 for (i = 0; i < LOCK_LEVEL - 1; i++) 3378 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]); 3379 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk); 3380 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread); 3381 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch); 3382 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads); 3383 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate); 3384 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls); 3385 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters); 3386 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails); 3387 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails); 3388 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload); 3389 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload); 3390 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite); 3391 cso->cpu_sysinfo.rw_enters = 0; 3392 cso->cpu_sysinfo.win_uo_cnt = 0; 3393 cso->cpu_sysinfo.win_uu_cnt = 0; 3394 cso->cpu_sysinfo.win_so_cnt = 0; 3395 cso->cpu_sysinfo.win_su_cnt = 0; 3396 cso->cpu_sysinfo.win_suo_cnt = 0; 3397 3398 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait); 3399 cso->cpu_syswait.swap = 0; 3400 cso->cpu_syswait.physio = 0; 3401 3402 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec); 3403 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec); 3404 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin); 3405 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin); 3406 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout); 3407 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout); 3408 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod); 3409 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree); 3410 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan); 3411 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev); 3412 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault); 3413 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault); 3414 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault); 3415 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault); 3416 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault); 3417 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock); 3418 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt); 3419 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun); 3420 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin); 3421 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout); 3422 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree); 3423 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin); 3424 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout); 3425 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree); 3426 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin); 3427 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout); 3428 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree); 3429 3430 return (0); 3431 }