424 }
425 }
426
427 /*
428 * Before we actually start changing data structures, notify
429 * the cyclic subsystem that we want to move this CPU out of its
430 * partition.
431 */
432 if (!cyclic_move_out(cp)) {
433 /*
434 * This CPU must be the last CPU in a processor set with
435 * a bound cyclic.
436 */
437 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
438 pg_cpupart_out(cp, newpp);
439 pg_cpupart_in(cp, oldpp);
440 cpu_inmotion = NULL;
441 return (EBUSY);
442 }
443
444 pause_cpus(cp);
445
446 if (move_threads) {
447 /*
448 * The thread on cpu before the pause thread may have read
449 * cpu_inmotion before we raised the barrier above. Check
450 * again.
451 */
452 if (disp_bound_threads(cp, 1)) {
453 start_cpus();
454 goto again;
455 }
456
457 }
458
459 /*
460 * Now that CPUs are paused, let the PG subsystem perform
461 * any necessary data structure updates.
462 */
463 pg_cpupart_move(cp, oldpp, newpp);
464
848 pp->cp_gen = 0;
849 DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
850 *psid = CPTOPS(pp->cp_id);
851 disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
852 cpupart_kstat_create(pp);
853 cpupart_lpl_initialize(pp);
854
855 bitset_init(&pp->cp_cmt_pgs);
856
857 /*
858 * Initialize and size the partition's bitset of halted CPUs.
859 */
860 bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
861 bitset_resize(&pp->cp_haltset, max_ncpus);
862
863 /*
864 * Pause all CPUs while changing the partition list, to make sure
865 * the clock thread (which traverses the list without holding
866 * cpu_lock) isn't running.
867 */
868 pause_cpus(NULL);
869 pp->cp_next = cp_list_head;
870 pp->cp_prev = cp_list_head->cp_prev;
871 cp_list_head->cp_prev->cp_next = pp;
872 cp_list_head->cp_prev = pp;
873 start_cpus();
874 mutex_exit(&cpu_lock);
875
876 return (0);
877 }
878
879 /*
880 * Move threads from specified partition to cp_default. If `force' is specified,
881 * move all threads, otherwise move only soft-bound threads.
882 */
883 static int
884 cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
885 {
886 void *projbuf, *zonebuf;
887 kthread_t *t;
888 proc_t *p;
994
995 /*
996 * Reset the pointers in any offline processors so they won't
997 * try to rejoin the destroyed partition when they're turned
998 * online.
999 */
1000 first_cp = cp = CPU;
1001 do {
1002 if (cp->cpu_part == pp) {
1003 ASSERT(cp->cpu_flags & CPU_OFFLINE);
1004 cp->cpu_part = newpp;
1005 }
1006 cp = cp->cpu_next;
1007 } while (cp != first_cp);
1008
1009 /*
1010 * Pause all CPUs while changing the partition list, to make sure
1011 * the clock thread (which traverses the list without holding
1012 * cpu_lock) isn't running.
1013 */
1014 pause_cpus(NULL);
1015 pp->cp_prev->cp_next = pp->cp_next;
1016 pp->cp_next->cp_prev = pp->cp_prev;
1017 if (cp_list_head == pp)
1018 cp_list_head = pp->cp_next;
1019 start_cpus();
1020
1021 if (cp_id_next > pp->cp_id)
1022 cp_id_next = pp->cp_id;
1023
1024 if (pp->cp_kstat)
1025 kstat_delete(pp->cp_kstat);
1026
1027 cp_numparts--;
1028
1029 disp_kp_free(&pp->cp_kp_queue);
1030
1031 cpupart_lpl_teardown(pp);
1032
1033 kmem_free(pp, sizeof (cpupart_t));
1034 mutex_exit(&cpu_lock);
|
424 }
425 }
426
427 /*
428 * Before we actually start changing data structures, notify
429 * the cyclic subsystem that we want to move this CPU out of its
430 * partition.
431 */
432 if (!cyclic_move_out(cp)) {
433 /*
434 * This CPU must be the last CPU in a processor set with
435 * a bound cyclic.
436 */
437 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
438 pg_cpupart_out(cp, newpp);
439 pg_cpupart_in(cp, oldpp);
440 cpu_inmotion = NULL;
441 return (EBUSY);
442 }
443
444 pause_cpus(cp, NULL);
445
446 if (move_threads) {
447 /*
448 * The thread on cpu before the pause thread may have read
449 * cpu_inmotion before we raised the barrier above. Check
450 * again.
451 */
452 if (disp_bound_threads(cp, 1)) {
453 start_cpus();
454 goto again;
455 }
456
457 }
458
459 /*
460 * Now that CPUs are paused, let the PG subsystem perform
461 * any necessary data structure updates.
462 */
463 pg_cpupart_move(cp, oldpp, newpp);
464
848 pp->cp_gen = 0;
849 DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
850 *psid = CPTOPS(pp->cp_id);
851 disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
852 cpupart_kstat_create(pp);
853 cpupart_lpl_initialize(pp);
854
855 bitset_init(&pp->cp_cmt_pgs);
856
857 /*
858 * Initialize and size the partition's bitset of halted CPUs.
859 */
860 bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
861 bitset_resize(&pp->cp_haltset, max_ncpus);
862
863 /*
864 * Pause all CPUs while changing the partition list, to make sure
865 * the clock thread (which traverses the list without holding
866 * cpu_lock) isn't running.
867 */
868 pause_cpus(NULL, NULL);
869 pp->cp_next = cp_list_head;
870 pp->cp_prev = cp_list_head->cp_prev;
871 cp_list_head->cp_prev->cp_next = pp;
872 cp_list_head->cp_prev = pp;
873 start_cpus();
874 mutex_exit(&cpu_lock);
875
876 return (0);
877 }
878
879 /*
880 * Move threads from specified partition to cp_default. If `force' is specified,
881 * move all threads, otherwise move only soft-bound threads.
882 */
883 static int
884 cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
885 {
886 void *projbuf, *zonebuf;
887 kthread_t *t;
888 proc_t *p;
994
995 /*
996 * Reset the pointers in any offline processors so they won't
997 * try to rejoin the destroyed partition when they're turned
998 * online.
999 */
1000 first_cp = cp = CPU;
1001 do {
1002 if (cp->cpu_part == pp) {
1003 ASSERT(cp->cpu_flags & CPU_OFFLINE);
1004 cp->cpu_part = newpp;
1005 }
1006 cp = cp->cpu_next;
1007 } while (cp != first_cp);
1008
1009 /*
1010 * Pause all CPUs while changing the partition list, to make sure
1011 * the clock thread (which traverses the list without holding
1012 * cpu_lock) isn't running.
1013 */
1014 pause_cpus(NULL, NULL);
1015 pp->cp_prev->cp_next = pp->cp_next;
1016 pp->cp_next->cp_prev = pp->cp_prev;
1017 if (cp_list_head == pp)
1018 cp_list_head = pp->cp_next;
1019 start_cpus();
1020
1021 if (cp_id_next > pp->cp_id)
1022 cp_id_next = pp->cp_id;
1023
1024 if (pp->cp_kstat)
1025 kstat_delete(pp->cp_kstat);
1026
1027 cp_numparts--;
1028
1029 disp_kp_free(&pp->cp_kp_queue);
1030
1031 cpupart_lpl_teardown(pp);
1032
1033 kmem_free(pp, sizeof (cpupart_t));
1034 mutex_exit(&cpu_lock);
|