3115 mblk_t *bp;
3116 mcp_kreq_ether_send_t *req;
3117 struct myri10ge_tx_copybuf *cp;
3118 caddr_t rptr, ptr;
3119 int mblen, count, cum_len, mss_resid, tx_req, pkt_size_tmp;
3120 int resid, avail, idx, hdr_size_tmp, tx_boundary;
3121 int rdma_count;
3122 uint32_t seglen, len, boundary, low, high_swapped;
3123 uint16_t pseudo_hdr_offset = htons(mss);
3124 uint8_t flags;
3125
3126 tx_boundary = mgp->tx_boundary;
3127 hdr_size_tmp = hdr_size;
3128 resid = tx_boundary;
3129 count = 1;
3130 mutex_enter(&tx->lock);
3131
3132 /* check to see if the slots are really there */
3133 avail = tx->mask - (tx->req - tx->done);
3134 if (unlikely(avail <= MYRI10GE_MAX_SEND_DESC_TSO)) {
3135 atomic_add_32(&tx->stall, 1);
3136 mutex_exit(&tx->lock);
3137 return (EBUSY);
3138 }
3139
3140 /* copy */
3141 cum_len = -hdr_size;
3142 count = 0;
3143 req = req_list;
3144 idx = tx->mask & tx->req;
3145 cp = &tx->cp[idx];
3146 low = ntohl(cp->dma.low);
3147 ptr = cp->va;
3148 cp->len = 0;
3149 if (mss) {
3150 int payload = pkt_size - hdr_size;
3151 uint16_t opackets = (payload / mss) + ((payload % mss) != 0);
3152 tx->info[idx].ostat.opackets = opackets;
3153 tx->info[idx].ostat.obytes = (opackets - 1) * hdr_size
3154 + pkt_size;
3155 }
3356 max_segs = MXGEFW_MAX_SEND_DESC;
3357 mss = 0;
3358 }
3359 req = req_list;
3360 cksum_offset = 0;
3361 pseudo_hdr_offset = 0;
3362
3363 /* leave an extra slot keep the ring from wrapping */
3364 avail = tx->mask - (tx->req - tx->done);
3365
3366 /*
3367 * If we have > MXGEFW_MAX_SEND_DESC, then any over-length
3368 * message will need to be pulled up in order to fit.
3369 * Otherwise, we are low on transmit descriptors, it is
3370 * probably better to stall and try again rather than pullup a
3371 * message to fit.
3372 */
3373
3374 if (avail < max_segs) {
3375 err = EBUSY;
3376 atomic_add_32(&tx->stall_early, 1);
3377 goto stall;
3378 }
3379
3380 /* find out how long the frame is and how many segments it is */
3381 count = 0;
3382 odd_flag = 0;
3383 pkt_size = 0;
3384 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
3385 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3386 dblk_t *dbp;
3387 mblen = MBLKL(bp);
3388 if (mblen == 0) {
3389 /*
3390 * we can't simply skip over 0-length mblks
3391 * because the hardware can't deal with them,
3392 * and we could leak them.
3393 */
3394 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_zero_len);
3395 err = EIO;
3396 goto pullup;
3621 } else {
3622 myri10ge_tx_stat(&tx_info[0].stat,
3623 (struct ether_header *)(void *)mp->b_rptr, 1, pkt_size);
3624 }
3625 mutex_enter(&tx->lock);
3626
3627 /* check to see if the slots are really there */
3628 avail = tx->mask - (tx->req - tx->done);
3629 if (unlikely(avail <= count)) {
3630 mutex_exit(&tx->lock);
3631 err = 0;
3632 goto late_stall;
3633 }
3634
3635 myri10ge_send_locked(tx, req_list, tx_info, count);
3636 mutex_exit(&tx->lock);
3637 return (DDI_SUCCESS);
3638
3639 late_stall:
3640 try_pullup = 0;
3641 atomic_add_32(&tx->stall_late, 1);
3642
3643 abort_with_handles:
3644 /* unbind and free handles from previous mblks */
3645 for (i = 0; i < count; i++) {
3646 bp = tx_info[i].m;
3647 tx_info[i].m = 0;
3648 if (bp) {
3649 dma_handle = tx_info[i].handle;
3650 (void) ddi_dma_unbind_handle(dma_handle->h);
3651 dma_handle->next = handles;
3652 handles = dma_handle;
3653 tx_info[i].handle = NULL;
3654 tx_info[i].m = NULL;
3655 }
3656 }
3657 myri10ge_free_tx_handle_slist(tx, handles);
3658 pullup:
3659 if (try_pullup) {
3660 err = myri10ge_pullup(ss, mp);
3661 if (err != DDI_SUCCESS && try_pullup == 2) {
3662 /* drop */
3663 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3664 freemsg(mp);
3665 return (0);
3666 }
3667 try_pullup = 0;
3668 goto again;
3669 }
3670
3671 stall:
3672 if (err != 0) {
3673 if (err == EBUSY) {
3674 atomic_add_32(&tx->stall, 1);
3675 } else {
3676 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3677 }
3678 }
3679 return (err);
3680 }
3681
3682 static mblk_t *
3683 myri10ge_send_wrapper(void *arg, mblk_t *mp)
3684 {
3685 struct myri10ge_slice_state *ss = arg;
3686 int err = 0;
3687 mcp_kreq_ether_send_t *req_list;
3688 #if defined(__i386)
3689 /*
3690 * We need about 2.5KB of scratch space to handle transmits.
3691 * i86pc has only 8KB of kernel stack space, so we malloc the
3692 * scratch space there rather than keeping it on the stack.
3693 */
3694 size_t req_size, tx_info_size;
|
3115 mblk_t *bp;
3116 mcp_kreq_ether_send_t *req;
3117 struct myri10ge_tx_copybuf *cp;
3118 caddr_t rptr, ptr;
3119 int mblen, count, cum_len, mss_resid, tx_req, pkt_size_tmp;
3120 int resid, avail, idx, hdr_size_tmp, tx_boundary;
3121 int rdma_count;
3122 uint32_t seglen, len, boundary, low, high_swapped;
3123 uint16_t pseudo_hdr_offset = htons(mss);
3124 uint8_t flags;
3125
3126 tx_boundary = mgp->tx_boundary;
3127 hdr_size_tmp = hdr_size;
3128 resid = tx_boundary;
3129 count = 1;
3130 mutex_enter(&tx->lock);
3131
3132 /* check to see if the slots are really there */
3133 avail = tx->mask - (tx->req - tx->done);
3134 if (unlikely(avail <= MYRI10GE_MAX_SEND_DESC_TSO)) {
3135 atomic_inc_32(&tx->stall);
3136 mutex_exit(&tx->lock);
3137 return (EBUSY);
3138 }
3139
3140 /* copy */
3141 cum_len = -hdr_size;
3142 count = 0;
3143 req = req_list;
3144 idx = tx->mask & tx->req;
3145 cp = &tx->cp[idx];
3146 low = ntohl(cp->dma.low);
3147 ptr = cp->va;
3148 cp->len = 0;
3149 if (mss) {
3150 int payload = pkt_size - hdr_size;
3151 uint16_t opackets = (payload / mss) + ((payload % mss) != 0);
3152 tx->info[idx].ostat.opackets = opackets;
3153 tx->info[idx].ostat.obytes = (opackets - 1) * hdr_size
3154 + pkt_size;
3155 }
3356 max_segs = MXGEFW_MAX_SEND_DESC;
3357 mss = 0;
3358 }
3359 req = req_list;
3360 cksum_offset = 0;
3361 pseudo_hdr_offset = 0;
3362
3363 /* leave an extra slot keep the ring from wrapping */
3364 avail = tx->mask - (tx->req - tx->done);
3365
3366 /*
3367 * If we have > MXGEFW_MAX_SEND_DESC, then any over-length
3368 * message will need to be pulled up in order to fit.
3369 * Otherwise, we are low on transmit descriptors, it is
3370 * probably better to stall and try again rather than pullup a
3371 * message to fit.
3372 */
3373
3374 if (avail < max_segs) {
3375 err = EBUSY;
3376 atomic_inc_32(&tx->stall_early);
3377 goto stall;
3378 }
3379
3380 /* find out how long the frame is and how many segments it is */
3381 count = 0;
3382 odd_flag = 0;
3383 pkt_size = 0;
3384 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
3385 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3386 dblk_t *dbp;
3387 mblen = MBLKL(bp);
3388 if (mblen == 0) {
3389 /*
3390 * we can't simply skip over 0-length mblks
3391 * because the hardware can't deal with them,
3392 * and we could leak them.
3393 */
3394 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_zero_len);
3395 err = EIO;
3396 goto pullup;
3621 } else {
3622 myri10ge_tx_stat(&tx_info[0].stat,
3623 (struct ether_header *)(void *)mp->b_rptr, 1, pkt_size);
3624 }
3625 mutex_enter(&tx->lock);
3626
3627 /* check to see if the slots are really there */
3628 avail = tx->mask - (tx->req - tx->done);
3629 if (unlikely(avail <= count)) {
3630 mutex_exit(&tx->lock);
3631 err = 0;
3632 goto late_stall;
3633 }
3634
3635 myri10ge_send_locked(tx, req_list, tx_info, count);
3636 mutex_exit(&tx->lock);
3637 return (DDI_SUCCESS);
3638
3639 late_stall:
3640 try_pullup = 0;
3641 atomic_inc_32(&tx->stall_late);
3642
3643 abort_with_handles:
3644 /* unbind and free handles from previous mblks */
3645 for (i = 0; i < count; i++) {
3646 bp = tx_info[i].m;
3647 tx_info[i].m = 0;
3648 if (bp) {
3649 dma_handle = tx_info[i].handle;
3650 (void) ddi_dma_unbind_handle(dma_handle->h);
3651 dma_handle->next = handles;
3652 handles = dma_handle;
3653 tx_info[i].handle = NULL;
3654 tx_info[i].m = NULL;
3655 }
3656 }
3657 myri10ge_free_tx_handle_slist(tx, handles);
3658 pullup:
3659 if (try_pullup) {
3660 err = myri10ge_pullup(ss, mp);
3661 if (err != DDI_SUCCESS && try_pullup == 2) {
3662 /* drop */
3663 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3664 freemsg(mp);
3665 return (0);
3666 }
3667 try_pullup = 0;
3668 goto again;
3669 }
3670
3671 stall:
3672 if (err != 0) {
3673 if (err == EBUSY) {
3674 atomic_inc_32(&tx->stall);
3675 } else {
3676 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3677 }
3678 }
3679 return (err);
3680 }
3681
3682 static mblk_t *
3683 myri10ge_send_wrapper(void *arg, mblk_t *mp)
3684 {
3685 struct myri10ge_slice_state *ss = arg;
3686 int err = 0;
3687 mcp_kreq_ether_send_t *req_list;
3688 #if defined(__i386)
3689 /*
3690 * We need about 2.5KB of scratch space to handle transmits.
3691 * i86pc has only 8KB of kernel stack space, so we malloc the
3692 * scratch space there rather than keeping it on the stack.
3693 */
3694 size_t req_size, tx_info_size;
|