Print this page
5042 stop using deprecated atomic functions


   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #pragma ident   "%Z%%M% %I%     %E% SMI"
  27 
  28 #include <sys/types.h>
  29 #include <sys/param.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/mutex.h>
  32 #include <sys/systm.h>
  33 #include <sys/sysmacros.h>
  34 #include <sys/machsystm.h>
  35 #include <sys/archsystm.h>
  36 #include <sys/x_call.h>
  37 #include <sys/promif.h>
  38 #include <sys/prom_isa.h>
  39 #include <sys/privregs.h>
  40 #include <sys/vmem.h>
  41 #include <sys/atomic.h>
  42 #include <sys/panic.h>
  43 #include <sys/rwlock.h>
  44 #include <sys/reboot.h>
  45 #include <sys/kdi.h>
  46 #include <sys/kdi_machimpl.h>
  47 


 256                 if (panicstr)
 257                         return; /* just return if we are currently panicking */
 258 
 259                 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 260                         /*
 261                          * Disable premption, and reload the current CPU.  We
 262                          * can't move from a mutex_ready cpu to a non-ready cpu
 263                          * so we don't need to re-check cp->cpu_m.mutex_ready.
 264                          */
 265                         kpreempt_disable();
 266                         cp = CPU;
 267                         ASSERT(cp->cpu_m.mutex_ready);
 268 
 269                         /*
 270                          * Try the lock.  If we don't get the lock, re-enable
 271                          * preemption and see if we should sleep.  If we are
 272                          * already the lock holder, remove the effect of the
 273                          * previous kpreempt_disable() before returning since
 274                          * preemption was disabled by an earlier kern_preprom.
 275                          */
 276                         prcp = casptr((void *)&prom_cpu, NULL, cp);
 277                         if (prcp == NULL ||
 278                             (prcp == cp && prom_thread == curthread)) {
 279                                 if (prcp == cp)
 280                                         kpreempt_enable();
 281                                 break;
 282                         }
 283 
 284                         kpreempt_enable();
 285 
 286                         /*
 287                          * We have to be very careful here since both prom_cpu
 288                          * and prcp->cpu_m.mutex_ready can be changed at any
 289                          * time by a non mutex_ready cpu holding the lock.
 290                          * If the owner is mutex_ready, holding prom_mutex
 291                          * prevents kern_postprom() from completing.  If the
 292                          * owner isn't mutex_ready, we only know it will clear
 293                          * prom_cpu before changing cpu_m.mutex_ready, so we
 294                          * issue a membar after checking mutex_ready and then
 295                          * re-verify that prom_cpu is still held by the same
 296                          * cpu before actually proceeding to cv_wait().
 297                          */
 298                         mutex_enter(&prom_mutex);
 299                         prcp = prom_cpu;
 300                         if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
 301                                 membar_consumer();
 302                                 if (prcp == prom_cpu)
 303                                         cv_wait(&prom_cv, &prom_mutex);
 304                         }
 305                         mutex_exit(&prom_mutex);
 306 
 307                 } else {
 308                         /*
 309                          * If we are not yet mutex_ready, just attempt to grab
 310                          * the lock.  If we get it or already hold it, break.
 311                          */
 312                         ASSERT(getpil() == PIL_MAX);
 313                         prcp = casptr((void *)&prom_cpu, NULL, cp);
 314                         if (prcp == NULL || prcp == cp)
 315                                 break;
 316                 }
 317         }
 318 
 319         /*
 320          * We now hold the prom_cpu lock.  Increment the hold count by one
 321          * and assert our current state before returning to the caller.
 322          */
 323         atomic_add_32(&prom_holdcnt, 1);
 324         ASSERT(prom_holdcnt >= 1);
 325         prom_thread = curthread;
 326 }
 327 
 328 /*
 329  * Drop the prom lock if it is held by the current CPU.  If the lock is held
 330  * recursively, return without clearing prom_cpu.  If the hold count is now
 331  * zero, clear prom_cpu and cv_signal any waiting CPU.
 332  */
 333 void




   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 


  26 #include <sys/types.h>
  27 #include <sys/param.h>
  28 #include <sys/cmn_err.h>
  29 #include <sys/mutex.h>
  30 #include <sys/systm.h>
  31 #include <sys/sysmacros.h>
  32 #include <sys/machsystm.h>
  33 #include <sys/archsystm.h>
  34 #include <sys/x_call.h>
  35 #include <sys/promif.h>
  36 #include <sys/prom_isa.h>
  37 #include <sys/privregs.h>
  38 #include <sys/vmem.h>
  39 #include <sys/atomic.h>
  40 #include <sys/panic.h>
  41 #include <sys/rwlock.h>
  42 #include <sys/reboot.h>
  43 #include <sys/kdi.h>
  44 #include <sys/kdi_machimpl.h>
  45 


 254                 if (panicstr)
 255                         return; /* just return if we are currently panicking */
 256 
 257                 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 258                         /*
 259                          * Disable premption, and reload the current CPU.  We
 260                          * can't move from a mutex_ready cpu to a non-ready cpu
 261                          * so we don't need to re-check cp->cpu_m.mutex_ready.
 262                          */
 263                         kpreempt_disable();
 264                         cp = CPU;
 265                         ASSERT(cp->cpu_m.mutex_ready);
 266 
 267                         /*
 268                          * Try the lock.  If we don't get the lock, re-enable
 269                          * preemption and see if we should sleep.  If we are
 270                          * already the lock holder, remove the effect of the
 271                          * previous kpreempt_disable() before returning since
 272                          * preemption was disabled by an earlier kern_preprom.
 273                          */
 274                         prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
 275                         if (prcp == NULL ||
 276                             (prcp == cp && prom_thread == curthread)) {
 277                                 if (prcp == cp)
 278                                         kpreempt_enable();
 279                                 break;
 280                         }
 281 
 282                         kpreempt_enable();
 283 
 284                         /*
 285                          * We have to be very careful here since both prom_cpu
 286                          * and prcp->cpu_m.mutex_ready can be changed at any
 287                          * time by a non mutex_ready cpu holding the lock.
 288                          * If the owner is mutex_ready, holding prom_mutex
 289                          * prevents kern_postprom() from completing.  If the
 290                          * owner isn't mutex_ready, we only know it will clear
 291                          * prom_cpu before changing cpu_m.mutex_ready, so we
 292                          * issue a membar after checking mutex_ready and then
 293                          * re-verify that prom_cpu is still held by the same
 294                          * cpu before actually proceeding to cv_wait().
 295                          */
 296                         mutex_enter(&prom_mutex);
 297                         prcp = prom_cpu;
 298                         if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
 299                                 membar_consumer();
 300                                 if (prcp == prom_cpu)
 301                                         cv_wait(&prom_cv, &prom_mutex);
 302                         }
 303                         mutex_exit(&prom_mutex);
 304 
 305                 } else {
 306                         /*
 307                          * If we are not yet mutex_ready, just attempt to grab
 308                          * the lock.  If we get it or already hold it, break.
 309                          */
 310                         ASSERT(getpil() == PIL_MAX);
 311                         prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
 312                         if (prcp == NULL || prcp == cp)
 313                                 break;
 314                 }
 315         }
 316 
 317         /*
 318          * We now hold the prom_cpu lock.  Increment the hold count by one
 319          * and assert our current state before returning to the caller.
 320          */
 321         atomic_add_32(&prom_holdcnt, 1);
 322         ASSERT(prom_holdcnt >= 1);
 323         prom_thread = curthread;
 324 }
 325 
 326 /*
 327  * Drop the prom lock if it is held by the current CPU.  If the lock is held
 328  * recursively, return without clearing prom_cpu.  If the hold count is now
 329  * zero, clear prom_cpu and cv_signal any waiting CPU.
 330  */
 331 void