Print this page
XXXX pass in cpu_pause_func via pause_cpus


 379          * Memory must be allocated out of pause/start_cpus() scope because
 380          * kmem_zalloc() can't be called with KM_SLEEP flag within that scope.
 381          */
 382         if (cpu_idle_cb_curr == cpu_idle_cb_max) {
 383                 cnt_new = cpu_idle_cb_max + CPU_IDLE_ARRAY_CAPACITY_INC;
 384                 buf_new = (char *)kmem_zalloc(cnt_new *
 385                     sizeof (cpu_idle_cb_item_t), KM_SLEEP);
 386         }
 387 
 388         /* Try to acquire cpu_lock if not held yet. */
 389         if (!MUTEX_HELD(&cpu_lock)) {
 390                 mutex_enter(&cpu_lock);
 391                 unlock = 1;
 392         }
 393         /*
 394          * Pause all other CPUs (and let them run pause thread).
 395          * It's guaranteed that no other threads will access cpu_idle_cb_array
 396          * after pause_cpus().
 397          */
 398         if (!cpus_paused()) {
 399                 pause_cpus(NULL);
 400                 unpause = 1;
 401         }
 402 
 403         /* Copy content to new buffer if needed. */
 404         if (buf_new != NULL) {
 405                 buf_old = (char *)cpu_idle_cb_array;
 406                 cnt_old = cpu_idle_cb_max;
 407                 if (buf_old != NULL) {
 408                         ASSERT(cnt_old != 0);
 409                         bcopy(cpu_idle_cb_array, buf_new,
 410                             sizeof (cpu_idle_cb_item_t) * cnt_old);
 411                 }
 412                 cpu_idle_cb_array = (cpu_idle_cb_item_t *)buf_new;
 413                 cpu_idle_cb_max = cnt_new;
 414         }
 415 
 416         /* Insert into array according to priority. */
 417         ASSERT(cpu_idle_cb_curr < cpu_idle_cb_max);
 418         for (i = cpu_idle_cb_curr; i > 0; i--) {
 419                 if (cpu_idle_cb_array[i - 1].impl->priority >= cip->priority) {


 445 static void
 446 cpu_idle_remove_callback(cpu_idle_cb_impl_t *cip)
 447 {
 448         int i, found = 0;
 449         int unlock = 0, unpause = 0;
 450         cpu_idle_cb_state_t *sp;
 451 
 452         ASSERT(MUTEX_HELD(&cpu_idle_cb_lock));
 453 
 454         /* Try to acquire cpu_lock if not held yet. */
 455         if (!MUTEX_HELD(&cpu_lock)) {
 456                 mutex_enter(&cpu_lock);
 457                 unlock = 1;
 458         }
 459         /*
 460          * Pause all other CPUs.
 461          * It's guaranteed that no other threads will access cpu_idle_cb_array
 462          * after pause_cpus().
 463          */
 464         if (!cpus_paused()) {
 465                 pause_cpus(NULL);
 466                 unpause = 1;
 467         }
 468 
 469         /* Remove cip from array. */
 470         for (i = 0; i < cpu_idle_cb_curr; i++) {
 471                 if (found == 0) {
 472                         if (cpu_idle_cb_array[i].impl == cip) {
 473                                 found = 1;
 474                         }
 475                 } else {
 476                         cpu_idle_cb_array[i - 1] = cpu_idle_cb_array[i];
 477                 }
 478         }
 479         ASSERT(found != 0);
 480         cpu_idle_cb_curr--;
 481 
 482         /*
 483          * Reset property ready flag for all CPUs if no registered callback
 484          * left because cpu_idle_enter/exit will stop updating property if
 485          * there's no callback registered.




 379          * Memory must be allocated out of pause/start_cpus() scope because
 380          * kmem_zalloc() can't be called with KM_SLEEP flag within that scope.
 381          */
 382         if (cpu_idle_cb_curr == cpu_idle_cb_max) {
 383                 cnt_new = cpu_idle_cb_max + CPU_IDLE_ARRAY_CAPACITY_INC;
 384                 buf_new = (char *)kmem_zalloc(cnt_new *
 385                     sizeof (cpu_idle_cb_item_t), KM_SLEEP);
 386         }
 387 
 388         /* Try to acquire cpu_lock if not held yet. */
 389         if (!MUTEX_HELD(&cpu_lock)) {
 390                 mutex_enter(&cpu_lock);
 391                 unlock = 1;
 392         }
 393         /*
 394          * Pause all other CPUs (and let them run pause thread).
 395          * It's guaranteed that no other threads will access cpu_idle_cb_array
 396          * after pause_cpus().
 397          */
 398         if (!cpus_paused()) {
 399                 pause_cpus(NULL, NULL);
 400                 unpause = 1;
 401         }
 402 
 403         /* Copy content to new buffer if needed. */
 404         if (buf_new != NULL) {
 405                 buf_old = (char *)cpu_idle_cb_array;
 406                 cnt_old = cpu_idle_cb_max;
 407                 if (buf_old != NULL) {
 408                         ASSERT(cnt_old != 0);
 409                         bcopy(cpu_idle_cb_array, buf_new,
 410                             sizeof (cpu_idle_cb_item_t) * cnt_old);
 411                 }
 412                 cpu_idle_cb_array = (cpu_idle_cb_item_t *)buf_new;
 413                 cpu_idle_cb_max = cnt_new;
 414         }
 415 
 416         /* Insert into array according to priority. */
 417         ASSERT(cpu_idle_cb_curr < cpu_idle_cb_max);
 418         for (i = cpu_idle_cb_curr; i > 0; i--) {
 419                 if (cpu_idle_cb_array[i - 1].impl->priority >= cip->priority) {


 445 static void
 446 cpu_idle_remove_callback(cpu_idle_cb_impl_t *cip)
 447 {
 448         int i, found = 0;
 449         int unlock = 0, unpause = 0;
 450         cpu_idle_cb_state_t *sp;
 451 
 452         ASSERT(MUTEX_HELD(&cpu_idle_cb_lock));
 453 
 454         /* Try to acquire cpu_lock if not held yet. */
 455         if (!MUTEX_HELD(&cpu_lock)) {
 456                 mutex_enter(&cpu_lock);
 457                 unlock = 1;
 458         }
 459         /*
 460          * Pause all other CPUs.
 461          * It's guaranteed that no other threads will access cpu_idle_cb_array
 462          * after pause_cpus().
 463          */
 464         if (!cpus_paused()) {
 465                 pause_cpus(NULL, NULL);
 466                 unpause = 1;
 467         }
 468 
 469         /* Remove cip from array. */
 470         for (i = 0; i < cpu_idle_cb_curr; i++) {
 471                 if (found == 0) {
 472                         if (cpu_idle_cb_array[i].impl == cip) {
 473                                 found = 1;
 474                         }
 475                 } else {
 476                         cpu_idle_cb_array[i - 1] = cpu_idle_cb_array[i];
 477                 }
 478         }
 479         ASSERT(found != 0);
 480         cpu_idle_cb_curr--;
 481 
 482         /*
 483          * Reset property ready flag for all CPUs if no registered callback
 484          * left because cpu_idle_enter/exit will stop updating property if
 485          * there's no callback registered.