150
151 /*
152 * Can be raised to suppress further weakbinding, which are instead
153 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
154 * while individual thread weakbinding synchronization is done under thread
155 * lock.
156 */
157 int weakbindingbarrier;
158
159 /*
160 * Variables used in pause_cpus().
161 */
162 static volatile char safe_list[NCPU];
163
164 static struct _cpu_pause_info {
165 int cp_spl; /* spl saved in pause_cpus() */
166 volatile int cp_go; /* Go signal sent after all ready */
167 int cp_count; /* # of CPUs to pause */
168 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
169 kthread_id_t cp_paused;
170 } cpu_pause_info;
171
172 static kmutex_t pause_free_mutex;
173 static kcondvar_t pause_free_cv;
174
175 void *(*cpu_pause_func)(void *) = NULL;
176
177
178 static struct cpu_sys_stats_ks_data {
179 kstat_named_t cpu_ticks_idle;
180 kstat_named_t cpu_ticks_user;
181 kstat_named_t cpu_ticks_kernel;
182 kstat_named_t cpu_ticks_wait;
183 kstat_named_t cpu_nsec_idle;
184 kstat_named_t cpu_nsec_user;
185 kstat_named_t cpu_nsec_kernel;
186 kstat_named_t cpu_nsec_dtrace;
187 kstat_named_t cpu_nsec_intr;
188 kstat_named_t cpu_load_intr;
189 kstat_named_t wait_ticks_io;
190 kstat_named_t dtrace_probes;
191 kstat_named_t bread;
192 kstat_named_t bwrite;
193 kstat_named_t lread;
194 kstat_named_t lwrite;
195 kstat_named_t phread;
196 kstat_named_t phwrite;
775 membar_enter(); /* make sure stores are flushed */
776 sema_v(&cpi->cp_sem); /* signal requesting thread */
777
778 /*
779 * Wait here until all pause threads are running. That
780 * indicates that it's safe to do the spl. Until
781 * cpu_pause_info.cp_go is set, we don't want to spl
782 * because that might block clock interrupts needed
783 * to preempt threads on other CPUs.
784 */
785 while (cpi->cp_go == 0)
786 ;
787 /*
788 * Even though we are at the highest disp prio, we need
789 * to block out all interrupts below LOCK_LEVEL so that
790 * an intr doesn't come in, wake up a thread, and call
791 * setbackdq/setfrontdq.
792 */
793 s = splhigh();
794 /*
795 * if cpu_pause_func() has been set then call it using
796 * index as the argument, currently only used by
797 * cpr_suspend_cpus(). This function is used as the
798 * code to execute on the "paused" cpu's when a machine
799 * comes out of a sleep state and CPU's were powered off.
800 * (could also be used for hotplugging CPU's).
801 */
802 if (cpu_pause_func != NULL)
803 (*cpu_pause_func)((void *)lindex);
804
805 mach_cpu_pause(safe);
806
807 splx(s);
808 /*
809 * Waiting is at an end. Switch out of cpu_pause
810 * loop and resume useful work.
811 */
812 swtch();
813 }
814
815 mutex_enter(&pause_free_mutex);
816 *safe = PAUSE_DEAD;
817 cv_broadcast(&pause_free_cv);
818 mutex_exit(&pause_free_mutex);
819 }
820
821 /*
822 * Allow the cpus to start running again.
823 */
971 }
972
973
974 /*
975 * Pause all of the CPUs except the one we are on by creating a high
976 * priority thread bound to those CPUs.
977 *
978 * Note that one must be extremely careful regarding code
979 * executed while CPUs are paused. Since a CPU may be paused
980 * while a thread scheduling on that CPU is holding an adaptive
981 * lock, code executed with CPUs paused must not acquire adaptive
982 * (or low-level spin) locks. Also, such code must not block,
983 * since the thread that is supposed to initiate the wakeup may
984 * never run.
985 *
986 * With a few exceptions, the restrictions on code executed with CPUs
987 * paused match those for code executed at high-level interrupt
988 * context.
989 */
990 void
991 pause_cpus(cpu_t *off_cp)
992 {
993 processorid_t cpu_id;
994 int i;
995 struct _cpu_pause_info *cpi = &cpu_pause_info;
996
997 ASSERT(MUTEX_HELD(&cpu_lock));
998 ASSERT(cpi->cp_paused == NULL);
999 cpi->cp_count = 0;
1000 cpi->cp_go = 0;
1001 for (i = 0; i < NCPU; i++)
1002 safe_list[i] = PAUSE_IDLE;
1003 kpreempt_disable();
1004
1005 /*
1006 * If running on the cpu that is going offline, get off it.
1007 * This is so that it won't be necessary to rechoose a CPU
1008 * when done.
1009 */
1010 if (CPU == off_cp)
1011 cpu_id = off_cp->cpu_next_part->cpu_id;
1012 else
1013 cpu_id = CPU->cpu_id;
1014 affinity_set(cpu_id);
1015
1016 /*
1017 * Start the pause threads and record how many were started
1018 */
1019 cpi->cp_count = cpu_pause_start(cpu_id);
1020
1021 /*
1022 * Now wait for all CPUs to be running the pause thread.
1023 */
1024 while (cpi->cp_count > 0) {
1189 {
1190 int error = 0;
1191
1192 /*
1193 * Handle on-line request.
1194 * This code must put the new CPU on the active list before
1195 * starting it because it will not be paused, and will start
1196 * using the active list immediately. The real start occurs
1197 * when the CPU_QUIESCED flag is turned off.
1198 */
1199
1200 ASSERT(MUTEX_HELD(&cpu_lock));
1201
1202 /*
1203 * Put all the cpus into a known safe place.
1204 * No mutexes can be entered while CPUs are paused.
1205 */
1206 error = mp_cpu_start(cp); /* arch-dep hook */
1207 if (error == 0) {
1208 pg_cpupart_in(cp, cp->cpu_part);
1209 pause_cpus(NULL);
1210 cpu_add_active_internal(cp);
1211 if (cp->cpu_flags & CPU_FAULTED) {
1212 cp->cpu_flags &= ~CPU_FAULTED;
1213 mp_cpu_faulted_exit(cp);
1214 }
1215 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1216 CPU_SPARE);
1217 CPU_NEW_GENERATION(cp);
1218 start_cpus();
1219 cpu_stats_kstat_create(cp);
1220 cpu_create_intrstat(cp);
1221 lgrp_kstat_create(cp);
1222 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1223 cpu_intr_enable(cp); /* arch-dep hook */
1224 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1225 cpu_set_state(cp);
1226 cyclic_online(cp);
1227 /*
1228 * This has to be called only after cyclic_online(). This
1229 * function uses cyclics.
1388 }
1389 cyclic_off = 1;
1390 }
1391
1392 /*
1393 * Call mp_cpu_stop() to perform any special operations
1394 * needed for this machine architecture to offline a CPU.
1395 */
1396 if (error == 0)
1397 error = mp_cpu_stop(cp); /* arch-dep hook */
1398
1399 /*
1400 * If that all worked, take the CPU offline and decrement
1401 * ncpus_online.
1402 */
1403 if (error == 0) {
1404 /*
1405 * Put all the cpus into a known safe place.
1406 * No mutexes can be entered while CPUs are paused.
1407 */
1408 pause_cpus(cp);
1409 /*
1410 * Repeat the operation, if necessary, to make sure that
1411 * all outstanding low-level interrupts run to completion
1412 * before we set the CPU_QUIESCED flag. It's also possible
1413 * that a thread has weak bound to the cpu despite our raising
1414 * cpu_inmotion above since it may have loaded that
1415 * value before the barrier became visible (this would have
1416 * to be the thread that was on the target cpu at the time
1417 * we raised the barrier).
1418 */
1419 if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
1420 (*bound_func)(cp, 1)) {
1421 start_cpus();
1422 (void) mp_cpu_start(cp);
1423 goto again;
1424 }
1425 ncp = cp->cpu_next_part;
1426 cpu_lpl = cp->cpu_lpl;
1427 ASSERT(cpu_lpl != NULL);
1428
1741 /*
1742 * Note: most users of the cpu_list will grab the
1743 * cpu_lock to insure that it isn't modified. However,
1744 * certain users can't or won't do that. To allow this
1745 * we pause the other cpus. Users who walk the list
1746 * without cpu_lock, must disable kernel preemption
1747 * to insure that the list isn't modified underneath
1748 * them. Also, any cached pointers to cpu structures
1749 * must be revalidated by checking to see if the
1750 * cpu_next pointer points to itself. This check must
1751 * be done with the cpu_lock held or kernel preemption
1752 * disabled. This check relies upon the fact that
1753 * old cpu structures are not free'ed or cleared after
1754 * then are removed from the cpu_list.
1755 *
1756 * Note that the clock code walks the cpu list dereferencing
1757 * the cpu_part pointer, so we need to initialize it before
1758 * adding the cpu to the list.
1759 */
1760 cp->cpu_part = &cp_default;
1761 (void) pause_cpus(NULL);
1762 cp->cpu_next = cpu_list;
1763 cp->cpu_prev = cpu_list->cpu_prev;
1764 cpu_list->cpu_prev->cpu_next = cp;
1765 cpu_list->cpu_prev = cp;
1766 start_cpus();
1767
1768 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1769 continue;
1770 CPUSET_ADD(cpu_seqid_inuse, seqid);
1771 cp->cpu_seqid = seqid;
1772
1773 if (seqid > max_cpu_seqid_ever)
1774 max_cpu_seqid_ever = seqid;
1775
1776 ASSERT(ncpus < max_ncpus);
1777 ncpus++;
1778 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1779 cpu[cp->cpu_id] = cp;
1780 CPUSET_ADD(cpu_available, cp->cpu_id);
1781 cpu_seq[cp->cpu_seqid] = cp;
1836 */
1837 cpu_pause_free(cp);
1838 CPUSET_DEL(cpu_available, cp->cpu_id);
1839 cpu[cp->cpu_id] = NULL;
1840 cpu_seq[cp->cpu_seqid] = NULL;
1841
1842 /*
1843 * The clock thread and mutex_vector_enter cannot hold the
1844 * cpu_lock while traversing the cpu list, therefore we pause
1845 * all other threads by pausing the other cpus. These, and any
1846 * other routines holding cpu pointers while possibly sleeping
1847 * must be sure to call kpreempt_disable before processing the
1848 * list and be sure to check that the cpu has not been deleted
1849 * after any sleeps (check cp->cpu_next != NULL). We guarantee
1850 * to keep the deleted cpu structure around.
1851 *
1852 * Note that this MUST be done AFTER cpu_available
1853 * has been updated so that we don't waste time
1854 * trying to pause the cpu we're trying to delete.
1855 */
1856 (void) pause_cpus(NULL);
1857
1858 cpnext = cp->cpu_next;
1859 cp->cpu_prev->cpu_next = cp->cpu_next;
1860 cp->cpu_next->cpu_prev = cp->cpu_prev;
1861 if (cp == cpu_list)
1862 cpu_list = cpnext;
1863
1864 /*
1865 * Signals that the cpu has been deleted (see above).
1866 */
1867 cp->cpu_next = NULL;
1868 cp->cpu_prev = NULL;
1869
1870 start_cpus();
1871
1872 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
1873 ncpus--;
1874 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
1875
1876 pool_pset_mod = gethrtime();
1908 if (pp->cp_ncpus == 1) {
1909 cp_numparts_nonempty++;
1910 ASSERT(cp_numparts_nonempty != 0);
1911 }
1912
1913 pg_cpu_active(cp);
1914 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
1915
1916 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
1917 }
1918
1919 /*
1920 * Add a CPU to the list of active CPUs.
1921 * This is called from machine-dependent layers when a new CPU is started.
1922 */
1923 void
1924 cpu_add_active(cpu_t *cp)
1925 {
1926 pg_cpupart_in(cp, cp->cpu_part);
1927
1928 pause_cpus(NULL);
1929 cpu_add_active_internal(cp);
1930 start_cpus();
1931
1932 cpu_stats_kstat_create(cp);
1933 cpu_create_intrstat(cp);
1934 lgrp_kstat_create(cp);
1935 cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1936 }
1937
1938
1939 /*
1940 * Remove a CPU from the list of active CPUs.
1941 * This routine must not get any locks, because other CPUs are paused.
1942 */
1943 /* ARGSUSED */
1944 static void
1945 cpu_remove_active(cpu_t *cp)
1946 {
1947 cpupart_t *pp = cp->cpu_part;
1948
|
150
151 /*
152 * Can be raised to suppress further weakbinding, which are instead
153 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
154 * while individual thread weakbinding synchronization is done under thread
155 * lock.
156 */
157 int weakbindingbarrier;
158
159 /*
160 * Variables used in pause_cpus().
161 */
162 static volatile char safe_list[NCPU];
163
164 static struct _cpu_pause_info {
165 int cp_spl; /* spl saved in pause_cpus() */
166 volatile int cp_go; /* Go signal sent after all ready */
167 int cp_count; /* # of CPUs to pause */
168 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
169 kthread_id_t cp_paused;
170 void *(*cp_func)(void *);
171 } cpu_pause_info;
172
173 static kmutex_t pause_free_mutex;
174 static kcondvar_t pause_free_cv;
175
176
177 static struct cpu_sys_stats_ks_data {
178 kstat_named_t cpu_ticks_idle;
179 kstat_named_t cpu_ticks_user;
180 kstat_named_t cpu_ticks_kernel;
181 kstat_named_t cpu_ticks_wait;
182 kstat_named_t cpu_nsec_idle;
183 kstat_named_t cpu_nsec_user;
184 kstat_named_t cpu_nsec_kernel;
185 kstat_named_t cpu_nsec_dtrace;
186 kstat_named_t cpu_nsec_intr;
187 kstat_named_t cpu_load_intr;
188 kstat_named_t wait_ticks_io;
189 kstat_named_t dtrace_probes;
190 kstat_named_t bread;
191 kstat_named_t bwrite;
192 kstat_named_t lread;
193 kstat_named_t lwrite;
194 kstat_named_t phread;
195 kstat_named_t phwrite;
774 membar_enter(); /* make sure stores are flushed */
775 sema_v(&cpi->cp_sem); /* signal requesting thread */
776
777 /*
778 * Wait here until all pause threads are running. That
779 * indicates that it's safe to do the spl. Until
780 * cpu_pause_info.cp_go is set, we don't want to spl
781 * because that might block clock interrupts needed
782 * to preempt threads on other CPUs.
783 */
784 while (cpi->cp_go == 0)
785 ;
786 /*
787 * Even though we are at the highest disp prio, we need
788 * to block out all interrupts below LOCK_LEVEL so that
789 * an intr doesn't come in, wake up a thread, and call
790 * setbackdq/setfrontdq.
791 */
792 s = splhigh();
793 /*
794 * if cp_func has been set then call it using index as the
795 * argument, currently only used by cpr_suspend_cpus().
796 * This function is used as the code to execute on the
797 * "paused" cpu's when a machine comes out of a sleep state
798 * and CPU's were powered off. (could also be used for
799 * hotplugging CPU's).
800 */
801 if (cpi->cp_func != NULL)
802 (*cpi->cp_func)((void *)lindex);
803
804 mach_cpu_pause(safe);
805
806 splx(s);
807 /*
808 * Waiting is at an end. Switch out of cpu_pause
809 * loop and resume useful work.
810 */
811 swtch();
812 }
813
814 mutex_enter(&pause_free_mutex);
815 *safe = PAUSE_DEAD;
816 cv_broadcast(&pause_free_cv);
817 mutex_exit(&pause_free_mutex);
818 }
819
820 /*
821 * Allow the cpus to start running again.
822 */
970 }
971
972
973 /*
974 * Pause all of the CPUs except the one we are on by creating a high
975 * priority thread bound to those CPUs.
976 *
977 * Note that one must be extremely careful regarding code
978 * executed while CPUs are paused. Since a CPU may be paused
979 * while a thread scheduling on that CPU is holding an adaptive
980 * lock, code executed with CPUs paused must not acquire adaptive
981 * (or low-level spin) locks. Also, such code must not block,
982 * since the thread that is supposed to initiate the wakeup may
983 * never run.
984 *
985 * With a few exceptions, the restrictions on code executed with CPUs
986 * paused match those for code executed at high-level interrupt
987 * context.
988 */
989 void
990 pause_cpus(cpu_t *off_cp, void *(*func)(void *))
991 {
992 processorid_t cpu_id;
993 int i;
994 struct _cpu_pause_info *cpi = &cpu_pause_info;
995
996 ASSERT(MUTEX_HELD(&cpu_lock));
997 ASSERT(cpi->cp_paused == NULL);
998 cpi->cp_count = 0;
999 cpi->cp_go = 0;
1000 for (i = 0; i < NCPU; i++)
1001 safe_list[i] = PAUSE_IDLE;
1002 kpreempt_disable();
1003
1004 cpi->cp_func = func;
1005
1006 /*
1007 * If running on the cpu that is going offline, get off it.
1008 * This is so that it won't be necessary to rechoose a CPU
1009 * when done.
1010 */
1011 if (CPU == off_cp)
1012 cpu_id = off_cp->cpu_next_part->cpu_id;
1013 else
1014 cpu_id = CPU->cpu_id;
1015 affinity_set(cpu_id);
1016
1017 /*
1018 * Start the pause threads and record how many were started
1019 */
1020 cpi->cp_count = cpu_pause_start(cpu_id);
1021
1022 /*
1023 * Now wait for all CPUs to be running the pause thread.
1024 */
1025 while (cpi->cp_count > 0) {
1190 {
1191 int error = 0;
1192
1193 /*
1194 * Handle on-line request.
1195 * This code must put the new CPU on the active list before
1196 * starting it because it will not be paused, and will start
1197 * using the active list immediately. The real start occurs
1198 * when the CPU_QUIESCED flag is turned off.
1199 */
1200
1201 ASSERT(MUTEX_HELD(&cpu_lock));
1202
1203 /*
1204 * Put all the cpus into a known safe place.
1205 * No mutexes can be entered while CPUs are paused.
1206 */
1207 error = mp_cpu_start(cp); /* arch-dep hook */
1208 if (error == 0) {
1209 pg_cpupart_in(cp, cp->cpu_part);
1210 pause_cpus(NULL, NULL);
1211 cpu_add_active_internal(cp);
1212 if (cp->cpu_flags & CPU_FAULTED) {
1213 cp->cpu_flags &= ~CPU_FAULTED;
1214 mp_cpu_faulted_exit(cp);
1215 }
1216 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1217 CPU_SPARE);
1218 CPU_NEW_GENERATION(cp);
1219 start_cpus();
1220 cpu_stats_kstat_create(cp);
1221 cpu_create_intrstat(cp);
1222 lgrp_kstat_create(cp);
1223 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1224 cpu_intr_enable(cp); /* arch-dep hook */
1225 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1226 cpu_set_state(cp);
1227 cyclic_online(cp);
1228 /*
1229 * This has to be called only after cyclic_online(). This
1230 * function uses cyclics.
1389 }
1390 cyclic_off = 1;
1391 }
1392
1393 /*
1394 * Call mp_cpu_stop() to perform any special operations
1395 * needed for this machine architecture to offline a CPU.
1396 */
1397 if (error == 0)
1398 error = mp_cpu_stop(cp); /* arch-dep hook */
1399
1400 /*
1401 * If that all worked, take the CPU offline and decrement
1402 * ncpus_online.
1403 */
1404 if (error == 0) {
1405 /*
1406 * Put all the cpus into a known safe place.
1407 * No mutexes can be entered while CPUs are paused.
1408 */
1409 pause_cpus(cp, NULL);
1410 /*
1411 * Repeat the operation, if necessary, to make sure that
1412 * all outstanding low-level interrupts run to completion
1413 * before we set the CPU_QUIESCED flag. It's also possible
1414 * that a thread has weak bound to the cpu despite our raising
1415 * cpu_inmotion above since it may have loaded that
1416 * value before the barrier became visible (this would have
1417 * to be the thread that was on the target cpu at the time
1418 * we raised the barrier).
1419 */
1420 if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
1421 (*bound_func)(cp, 1)) {
1422 start_cpus();
1423 (void) mp_cpu_start(cp);
1424 goto again;
1425 }
1426 ncp = cp->cpu_next_part;
1427 cpu_lpl = cp->cpu_lpl;
1428 ASSERT(cpu_lpl != NULL);
1429
1742 /*
1743 * Note: most users of the cpu_list will grab the
1744 * cpu_lock to insure that it isn't modified. However,
1745 * certain users can't or won't do that. To allow this
1746 * we pause the other cpus. Users who walk the list
1747 * without cpu_lock, must disable kernel preemption
1748 * to insure that the list isn't modified underneath
1749 * them. Also, any cached pointers to cpu structures
1750 * must be revalidated by checking to see if the
1751 * cpu_next pointer points to itself. This check must
1752 * be done with the cpu_lock held or kernel preemption
1753 * disabled. This check relies upon the fact that
1754 * old cpu structures are not free'ed or cleared after
1755 * then are removed from the cpu_list.
1756 *
1757 * Note that the clock code walks the cpu list dereferencing
1758 * the cpu_part pointer, so we need to initialize it before
1759 * adding the cpu to the list.
1760 */
1761 cp->cpu_part = &cp_default;
1762 (void) pause_cpus(NULL, NULL);
1763 cp->cpu_next = cpu_list;
1764 cp->cpu_prev = cpu_list->cpu_prev;
1765 cpu_list->cpu_prev->cpu_next = cp;
1766 cpu_list->cpu_prev = cp;
1767 start_cpus();
1768
1769 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1770 continue;
1771 CPUSET_ADD(cpu_seqid_inuse, seqid);
1772 cp->cpu_seqid = seqid;
1773
1774 if (seqid > max_cpu_seqid_ever)
1775 max_cpu_seqid_ever = seqid;
1776
1777 ASSERT(ncpus < max_ncpus);
1778 ncpus++;
1779 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1780 cpu[cp->cpu_id] = cp;
1781 CPUSET_ADD(cpu_available, cp->cpu_id);
1782 cpu_seq[cp->cpu_seqid] = cp;
1837 */
1838 cpu_pause_free(cp);
1839 CPUSET_DEL(cpu_available, cp->cpu_id);
1840 cpu[cp->cpu_id] = NULL;
1841 cpu_seq[cp->cpu_seqid] = NULL;
1842
1843 /*
1844 * The clock thread and mutex_vector_enter cannot hold the
1845 * cpu_lock while traversing the cpu list, therefore we pause
1846 * all other threads by pausing the other cpus. These, and any
1847 * other routines holding cpu pointers while possibly sleeping
1848 * must be sure to call kpreempt_disable before processing the
1849 * list and be sure to check that the cpu has not been deleted
1850 * after any sleeps (check cp->cpu_next != NULL). We guarantee
1851 * to keep the deleted cpu structure around.
1852 *
1853 * Note that this MUST be done AFTER cpu_available
1854 * has been updated so that we don't waste time
1855 * trying to pause the cpu we're trying to delete.
1856 */
1857 (void) pause_cpus(NULL, NULL);
1858
1859 cpnext = cp->cpu_next;
1860 cp->cpu_prev->cpu_next = cp->cpu_next;
1861 cp->cpu_next->cpu_prev = cp->cpu_prev;
1862 if (cp == cpu_list)
1863 cpu_list = cpnext;
1864
1865 /*
1866 * Signals that the cpu has been deleted (see above).
1867 */
1868 cp->cpu_next = NULL;
1869 cp->cpu_prev = NULL;
1870
1871 start_cpus();
1872
1873 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
1874 ncpus--;
1875 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
1876
1877 pool_pset_mod = gethrtime();
1909 if (pp->cp_ncpus == 1) {
1910 cp_numparts_nonempty++;
1911 ASSERT(cp_numparts_nonempty != 0);
1912 }
1913
1914 pg_cpu_active(cp);
1915 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
1916
1917 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
1918 }
1919
1920 /*
1921 * Add a CPU to the list of active CPUs.
1922 * This is called from machine-dependent layers when a new CPU is started.
1923 */
1924 void
1925 cpu_add_active(cpu_t *cp)
1926 {
1927 pg_cpupart_in(cp, cp->cpu_part);
1928
1929 pause_cpus(NULL, NULL);
1930 cpu_add_active_internal(cp);
1931 start_cpus();
1932
1933 cpu_stats_kstat_create(cp);
1934 cpu_create_intrstat(cp);
1935 lgrp_kstat_create(cp);
1936 cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1937 }
1938
1939
1940 /*
1941 * Remove a CPU from the list of active CPUs.
1942 * This routine must not get any locks, because other CPUs are paused.
1943 */
1944 /* ARGSUSED */
1945 static void
1946 cpu_remove_active(cpu_t *cp)
1947 {
1948 cpupart_t *pp = cp->cpu_part;
1949
|