Print this page
5042 stop using deprecated atomic functions

*** 553,563 **** for (;;) { old = eqp->eq_pend; eep->eqe_prev = old; membar_producer(); ! if (casptr(&eqp->eq_pend, old, eep) == old) break; } atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1); --- 553,563 ---- for (;;) { old = eqp->eq_pend; eep->eqe_prev = old; membar_producer(); ! if (atomic_cas_ptr(&eqp->eq_pend, old, eep) == old) break; } atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1);
*** 594,612 **** * the first element on the pending list and then attempt to compare- * and-swap NULL to the pending list. We use membar_producer() to * make sure that eq_ptail will be visible to errorq_panic() below * before the pending list is NULLed out. This section is labeled * case (1) for errorq_panic, below. If eq_ptail is not yet set (1A) ! * eq_pend has all the pending errors. If casptr fails or has not ! * been called yet (1B), eq_pend still has all the pending errors. ! * If casptr succeeds (1C), eq_ptail has all the pending errors. */ while ((eep = eqp->eq_pend) != NULL) { eqp->eq_ptail = eep; membar_producer(); ! if (casptr(&eqp->eq_pend, eep, NULL) == eep) break; } /* * If no errors were pending, assert that eq_ptail is set to NULL, --- 594,613 ---- * the first element on the pending list and then attempt to compare- * and-swap NULL to the pending list. We use membar_producer() to * make sure that eq_ptail will be visible to errorq_panic() below * before the pending list is NULLed out. This section is labeled * case (1) for errorq_panic, below. If eq_ptail is not yet set (1A) ! * eq_pend has all the pending errors. If atomic_cas_ptr fails or ! * has not been called yet (1B), eq_pend still has all the pending ! * errors. If atomic_cas_ptr succeeds (1C), eq_ptail has all the ! * pending errors. */ while ((eep = eqp->eq_pend) != NULL) { eqp->eq_ptail = eep; membar_producer(); ! if (atomic_cas_ptr(&eqp->eq_pend, eep, NULL) == eep) break; } /* * If no errors were pending, assert that eq_ptail is set to NULL,
*** 748,764 **** continue; /* do not drain this queue on this pass */ loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64; /* ! * In case (1B) above, eq_ptail may be set but the casptr may ! * not have been executed yet or may have failed. Either way, ! * we must log errors in chronological order. So we search ! * the pending list for the error pointed to by eq_ptail. If ! * it is found, we know that all subsequent errors are also ! * still on the pending list, so just NULL out eq_ptail and let ! * errorq_drain(), below, take care of the logging. */ for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) { if (eep == eqp->eq_ptail) { ASSERT(eqp->eq_phead == NULL); eqp->eq_ptail = NULL; --- 749,766 ---- continue; /* do not drain this queue on this pass */ loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64; /* ! * In case (1B) above, eq_ptail may be set but the ! * atomic_cas_ptr may not have been executed yet or may have ! * failed. Either way, we must log errors in chronological ! * order. So we search the pending list for the error ! * pointed to by eq_ptail. If it is found, we know that all ! * subsequent errors are also still on the pending list, so ! * just NULL out eq_ptail and let errorq_drain(), below, ! * take care of the logging. */ for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) { if (eep == eqp->eq_ptail) { ASSERT(eqp->eq_phead == NULL); eqp->eq_ptail = NULL;
*** 788,799 **** * eq_phead will be set to the oldest error on the processing * list. We log each error and return it to the free pool. * * Unlike errorq_drain(), we don't need to worry about updating * eq_phead because errorq_panic() will be called at most once. ! * However, we must use casptr to update the freelist in case ! * errors are still being enqueued during panic. */ for (eep = eqp->eq_phead; eep != NULL; eep = nep) { eqp->eq_func(eqp->eq_private, eep->eqe_data, eep); eqp->eq_kstat.eqk_logged.value.ui64++; --- 790,802 ---- * eq_phead will be set to the oldest error on the processing * list. We log each error and return it to the free pool. * * Unlike errorq_drain(), we don't need to worry about updating * eq_phead because errorq_panic() will be called at most once. ! * However, we must use atomic_cas_ptr to update the ! * freelist in case errors are still being enqueued during ! * panic. */ for (eep = eqp->eq_phead; eep != NULL; eep = nep) { eqp->eq_func(eqp->eq_private, eep->eqe_data, eep); eqp->eq_kstat.eqk_logged.value.ui64++;
*** 912,922 **** for (;;) { old = eqp->eq_pend; eqep->eqe_prev = old; membar_producer(); ! if (casptr(&eqp->eq_pend, old, eqep) == old) break; } atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1); --- 915,925 ---- for (;;) { old = eqp->eq_pend; eqep->eqe_prev = old; membar_producer(); ! if (atomic_cas_ptr(&eqp->eq_pend, old, eqep) == old) break; } atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1);