Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
@@ -1234,11 +1234,11 @@
* Here we should add dl_cnt before post recv, because
* we would have to make sure dl_cnt is updated before
* the corresponding ibd_rc_process_rx() is called.
*/
ASSERT(state->rc_srq_rwqe_list.dl_cnt < state->rc_srq_size);
- atomic_add_32(&state->rc_srq_rwqe_list.dl_cnt, 1);
+ atomic_inc_32(&state->rc_srq_rwqe_list.dl_cnt);
if (ibt_post_srq(state->rc_srq_hdl, &rwqe->w_rwr, 1, NULL) !=
IBT_SUCCESS) {
atomic_dec_32(&state->rc_srq_rwqe_list.dl_cnt);
DPRINT(40, "ibd_rc_post_srq : ibt_post_srq() failed");
return (DDI_FAILURE);
@@ -1256,11 +1256,11 @@
/*
* Here we should add dl_cnt before post recv, because we would
* have to make sure dl_cnt has already updated before
* corresponding ibd_rc_process_rx() is called.
*/
- atomic_add_32(&chan->rx_wqe_list.dl_cnt, 1);
+ atomic_inc_32(&chan->rx_wqe_list.dl_cnt);
if (ibt_post_recv(chan->chan_hdl, &rwqe->w_rwr, 1, NULL) !=
IBT_SUCCESS) {
atomic_dec_32(&chan->rx_wqe_list.dl_cnt);
DPRINT(40, "ibd_rc_post_rwqe : failed in ibt_post_recv()");
return (DDI_FAILURE);
@@ -1497,15 +1497,14 @@
/*
* Record how many rwqe has been occupied by upper
* network layer
*/
if (state->rc_enable_srq) {
- atomic_add_32(&state->rc_srq_rwqe_list.
- dl_bufs_outstanding, 1);
+ atomic_inc_32(
+ &state->rc_srq_rwqe_list.dl_bufs_outstanding);
} else {
- atomic_add_32(&chan->rx_wqe_list.
- dl_bufs_outstanding, 1);
+ atomic_inc_32(&chan->rx_wqe_list.dl_bufs_outstanding);
}
mp = rwqe->rwqe_im_mblk;
} else {
atomic_add_64(&state->rc_rcv_copy_byte, wc->wc_bytes_xfer);
atomic_inc_64(&state->rc_rcv_copy_pkt);
@@ -1667,11 +1666,11 @@
*/
if (ibd_rc_post_rwqe(chan, rwqe) == DDI_FAILURE) {
ibd_rc_free_rwqe(chan, rwqe);
return;
}
- atomic_add_32(&chan->rx_wqe_list.dl_bufs_outstanding, -1);
+ atomic_dec_32(&chan->rx_wqe_list.dl_bufs_outstanding);
}
/*
* Common code for interrupt handling as well as for polling
* for all completed wqe's while detaching.