Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/rge/rge_rxtx.c
+++ new/usr/src/uts/common/io/rge/rge_rxtx.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include "rge.h"
27 27
28 28 #define U32TOPTR(x) ((void *)(uintptr_t)(uint32_t)(x))
29 29 #define PTRTOU32(x) ((uint32_t)(uintptr_t)(void *)(x))
30 30
31 31 /*
32 32 * ========== RX side routines ==========
33 33 */
34 34
35 35 #define RGE_DBG RGE_DBG_RECV /* debug flag for this code */
36 36
37 37 static uint32_t rge_atomic_reserve(uint32_t *count_p, uint32_t n);
38 38 #pragma inline(rge_atomic_reserve)
39 39
40 40 static uint32_t
41 41 rge_atomic_reserve(uint32_t *count_p, uint32_t n)
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
42 42 {
43 43 uint32_t oldval;
44 44 uint32_t newval;
45 45
46 46 /* ATOMICALLY */
47 47 do {
48 48 oldval = *count_p;
49 49 newval = oldval - n;
50 50 if (oldval <= n)
51 51 return (0); /* no resources left */
52 - } while (cas32(count_p, oldval, newval) != oldval);
52 + } while (atomic_cas_32(count_p, oldval, newval) != oldval);
53 53
54 54 return (newval);
55 55 }
56 56
57 57 /*
58 58 * Atomically increment a counter
59 59 */
60 60 static void rge_atomic_renounce(uint32_t *count_p, uint32_t n);
61 61 #pragma inline(rge_atomic_renounce)
62 62
63 63 static void
64 64 rge_atomic_renounce(uint32_t *count_p, uint32_t n)
65 65 {
66 66 uint32_t oldval;
67 67 uint32_t newval;
68 68
69 69 /* ATOMICALLY */
70 70 do {
71 71 oldval = *count_p;
72 72 newval = oldval + n;
73 - } while (cas32(count_p, oldval, newval) != oldval);
73 + } while (atomic_cas_32(count_p, oldval, newval) != oldval);
74 74 }
75 75
76 76 /*
77 77 * Callback code invoked from STREAMs when the recv data buffer is free
78 78 * for recycling.
79 79 */
80 80 void
81 81 rge_rx_recycle(caddr_t arg)
82 82 {
83 83 rge_t *rgep;
84 84 dma_buf_t *rx_buf;
85 85 sw_rbd_t *free_srbdp;
86 86 uint32_t slot_recy;
87 87
88 88 rx_buf = (dma_buf_t *)arg;
89 89 rgep = (rge_t *)rx_buf->private;
90 90
91 91 /*
92 92 * In rge_unattach() and rge_attach(), this callback function will
93 93 * also be called to free mp in rge_fini_rings() and rge_init_rings().
94 94 * In such situation, we shouldn't do below desballoc(), otherwise,
95 95 * there'll be memory leak.
96 96 */
97 97 if (rgep->rge_mac_state == RGE_MAC_UNATTACH ||
98 98 rgep->rge_mac_state == RGE_MAC_ATTACH)
99 99 return;
100 100
101 101 /*
102 102 * Recycle the data buffer again
103 103 * and fill them in free ring
104 104 */
105 105 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
106 106 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
107 107 if (rx_buf->mp == NULL) {
108 108 rge_problem(rgep, "rge_rx_recycle: desballoc() failed");
109 109 return;
110 110 }
111 111 mutex_enter(rgep->rc_lock);
112 112 slot_recy = rgep->rc_next;
113 113 free_srbdp = &rgep->free_srbds[slot_recy];
114 114
115 115 ASSERT(free_srbdp->rx_buf == NULL);
116 116 free_srbdp->rx_buf = rx_buf;
117 117 rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS);
118 118 rge_atomic_renounce(&rgep->rx_free, 1);
119 119 if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS)
120 120 rgep->rx_bcopy = B_FALSE;
121 121 ASSERT(rgep->rx_free <= RGE_BUF_SLOTS);
122 122
123 123 mutex_exit(rgep->rc_lock);
124 124 }
125 125
126 126 static int rge_rx_refill(rge_t *rgep, uint32_t slot);
127 127 #pragma inline(rge_rx_refill)
128 128
129 129 static int
130 130 rge_rx_refill(rge_t *rgep, uint32_t slot)
131 131 {
132 132 dma_buf_t *free_buf;
133 133 rge_bd_t *hw_rbd_p;
134 134 sw_rbd_t *srbdp;
135 135 uint32_t free_slot;
136 136
137 137 srbdp = &rgep->sw_rbds[slot];
138 138 hw_rbd_p = &rgep->rx_ring[slot];
139 139 free_slot = rgep->rf_next;
140 140 free_buf = rgep->free_srbds[free_slot].rx_buf;
141 141 if (free_buf != NULL) {
142 142 srbdp->rx_buf = free_buf;
143 143 rgep->free_srbds[free_slot].rx_buf = NULL;
144 144 hw_rbd_p->host_buf_addr = RGE_BSWAP_32(rgep->head_room +
145 145 + free_buf->pbuf.cookie.dmac_laddress);
146 146 hw_rbd_p->host_buf_addr_hi =
147 147 RGE_BSWAP_32(free_buf->pbuf.cookie.dmac_laddress >> 32);
148 148 rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS);
149 149 return (1);
150 150 } else {
151 151 /*
152 152 * This situation shouldn't happen
153 153 */
154 154 rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL",
155 155 free_slot);
156 156 rgep->rx_bcopy = B_TRUE;
157 157 return (0);
158 158 }
159 159 }
160 160
161 161 static mblk_t *rge_receive_packet(rge_t *rgep, uint32_t slot);
162 162 #pragma inline(rge_receive_packet)
163 163
164 164 static mblk_t *
165 165 rge_receive_packet(rge_t *rgep, uint32_t slot)
166 166 {
167 167 rge_bd_t *hw_rbd_p;
168 168 sw_rbd_t *srbdp;
169 169 uchar_t *dp;
170 170 mblk_t *mp;
171 171 uint8_t *rx_ptr;
172 172 uint32_t rx_status;
173 173 uint_t packet_len;
174 174 uint_t minsize;
175 175 uint_t maxsize;
176 176 uint32_t proto;
177 177 uint32_t pflags;
178 178 struct ether_vlan_header *ehp;
179 179 uint16_t vtag = 0;
180 180
181 181 hw_rbd_p = &rgep->rx_ring[slot];
182 182 srbdp = &rgep->sw_rbds[slot];
183 183
184 184 /*
185 185 * Read receive status
186 186 */
187 187 rx_status = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_FLAGS_MASK;
188 188
189 189 /*
190 190 * Handle error packet
191 191 */
192 192 if (!(rx_status & BD_FLAG_PKT_END)) {
193 193 RGE_DEBUG(("rge_receive_packet: not a complete packat"));
194 194 return (NULL);
195 195 }
196 196 if (rx_status & RBD_FLAG_ERROR) {
197 197 if (rx_status & RBD_FLAG_CRC_ERR)
198 198 rgep->stats.crc_err++;
199 199 if (rx_status & RBD_FLAG_RUNT)
200 200 rgep->stats.in_short++;
201 201 /*
202 202 * Set chip_error flag to reset chip:
203 203 * (suggested in Realtek programming guide.)
204 204 */
205 205 RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
206 206 rx_status));
207 207 mutex_enter(rgep->genlock);
208 208 rgep->rge_chip_state = RGE_CHIP_ERROR;
209 209 mutex_exit(rgep->genlock);
210 210 return (NULL);
211 211 }
212 212
213 213 /*
214 214 * Handle size error packet
215 215 */
216 216 packet_len = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_LEN_MASK;
217 217 packet_len -= ETHERFCSL;
218 218 minsize = ETHERMIN;
219 219 pflags = RGE_BSWAP_32(hw_rbd_p->vlan_tag);
220 220 if (pflags & RBD_VLAN_PKT)
221 221 minsize -= VLAN_TAGSZ;
222 222 maxsize = rgep->ethmax_size;
223 223 if (packet_len < minsize || packet_len > maxsize) {
224 224 RGE_DEBUG(("rge_receive_packet: len err = %d", packet_len));
225 225 return (NULL);
226 226 }
227 227
228 228 DMA_SYNC(srbdp->rx_buf->pbuf, DDI_DMA_SYNC_FORKERNEL);
229 229 if (rgep->rx_bcopy || packet_len <= RGE_RECV_COPY_SIZE ||
230 230 !rge_atomic_reserve(&rgep->rx_free, 1)) {
231 231 /*
232 232 * Allocate buffer to receive this good packet
233 233 */
234 234 mp = allocb(packet_len + RGE_HEADROOM, 0);
235 235 if (mp == NULL) {
236 236 RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
237 237 rgep->stats.no_rcvbuf++;
238 238 return (NULL);
239 239 }
240 240
241 241 /*
242 242 * Copy the data found into the new cluster
243 243 */
244 244 rx_ptr = DMA_VPTR(srbdp->rx_buf->pbuf);
245 245 mp->b_rptr = dp = mp->b_rptr + RGE_HEADROOM;
246 246 bcopy(rx_ptr + rgep->head_room, dp, packet_len);
247 247 mp->b_wptr = dp + packet_len;
248 248 } else {
249 249 mp = srbdp->rx_buf->mp;
250 250 mp->b_rptr += rgep->head_room;
251 251 mp->b_wptr = mp->b_rptr + packet_len;
252 252 mp->b_next = mp->b_cont = NULL;
253 253 /*
254 254 * Refill the current receive bd buffer
255 255 * if fails, will just keep the mp.
256 256 */
257 257 if (!rge_rx_refill(rgep, slot))
258 258 return (NULL);
259 259 }
260 260 rgep->stats.rbytes += packet_len;
261 261 rgep->stats.rpackets ++;
262 262
263 263 /*
264 264 * VLAN packet ?
265 265 */
266 266 if (pflags & RBD_VLAN_PKT)
267 267 vtag = pflags & RBD_VLAN_TAG;
268 268 if (vtag) {
269 269 vtag = TCI_CHIP2OS(vtag);
270 270 /*
271 271 * As h/w strips the VLAN tag from incoming packet, we need
272 272 * insert VLAN tag into this packet before send up here.
273 273 */
274 274 (void) memmove(mp->b_rptr - VLAN_TAGSZ, mp->b_rptr,
275 275 2 * ETHERADDRL);
276 276 mp->b_rptr -= VLAN_TAGSZ;
277 277 ehp = (struct ether_vlan_header *)mp->b_rptr;
278 278 ehp->ether_tpid = htons(ETHERTYPE_VLAN);
279 279 ehp->ether_tci = htons(vtag);
280 280 rgep->stats.rbytes += VLAN_TAGSZ;
281 281 }
282 282
283 283 /*
284 284 * Check h/w checksum offload status
285 285 */
286 286 pflags = 0;
287 287 proto = rx_status & RBD_FLAG_PROTOCOL;
288 288 if ((proto == RBD_FLAG_TCP && !(rx_status & RBD_TCP_CKSUM_ERR)) ||
289 289 (proto == RBD_FLAG_UDP && !(rx_status & RBD_UDP_CKSUM_ERR)))
290 290 pflags |= HCK_FULLCKSUM_OK;
291 291 if (proto != RBD_FLAG_NONE_IP && !(rx_status & RBD_IP_CKSUM_ERR))
292 292 pflags |= HCK_IPV4_HDRCKSUM_OK;
293 293 if (pflags != 0) {
294 294 mac_hcksum_set(mp, 0, 0, 0, 0, pflags);
295 295 }
296 296
297 297 return (mp);
298 298 }
299 299
300 300 /*
301 301 * Accept the packets received in rx ring.
302 302 *
303 303 * Returns a chain of mblks containing the received data, to be
304 304 * passed up to mac_rx().
305 305 * The routine returns only when a complete scan has been performed
306 306 * without finding any packets to receive.
307 307 * This function must SET the OWN bit of BD to indicate the packets
308 308 * it has accepted from the ring.
309 309 */
310 310 static mblk_t *rge_receive_ring(rge_t *rgep);
311 311 #pragma inline(rge_receive_ring)
312 312
313 313 static mblk_t *
314 314 rge_receive_ring(rge_t *rgep)
315 315 {
316 316 rge_bd_t *hw_rbd_p;
317 317 mblk_t *head;
318 318 mblk_t **tail;
319 319 mblk_t *mp;
320 320 uint32_t slot;
321 321
322 322 ASSERT(mutex_owned(rgep->rx_lock));
323 323
324 324 /*
325 325 * Sync (all) the receive ring descriptors
326 326 * before accepting the packets they describe
327 327 */
328 328 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL);
329 329 slot = rgep->rx_next;
330 330 hw_rbd_p = &rgep->rx_ring[slot];
331 331 head = NULL;
332 332 tail = &head;
333 333
334 334 while (!(hw_rbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN))) {
335 335 if ((mp = rge_receive_packet(rgep, slot)) != NULL) {
336 336 *tail = mp;
337 337 tail = &mp->b_next;
338 338 }
339 339
340 340 /*
341 341 * Clear RBD flags
342 342 */
343 343 hw_rbd_p->flags_len =
344 344 RGE_BSWAP_32(rgep->rxbuf_size - rgep->head_room);
345 345 HW_RBD_INIT(hw_rbd_p, slot);
346 346 slot = NEXT(slot, RGE_RECV_SLOTS);
347 347 hw_rbd_p = &rgep->rx_ring[slot];
348 348 }
349 349
350 350 rgep->rx_next = slot;
351 351 return (head);
352 352 }
353 353
354 354 /*
355 355 * Receive all ready packets.
356 356 */
357 357 void rge_receive(rge_t *rgep);
358 358 #pragma no_inline(rge_receive)
359 359
360 360 void
361 361 rge_receive(rge_t *rgep)
362 362 {
363 363 mblk_t *mp;
364 364
365 365 mutex_enter(rgep->rx_lock);
366 366 mp = rge_receive_ring(rgep);
367 367 mutex_exit(rgep->rx_lock);
368 368
369 369 if (mp != NULL)
370 370 mac_rx(rgep->mh, NULL, mp);
371 371 }
372 372
373 373
374 374 #undef RGE_DBG
375 375 #define RGE_DBG RGE_DBG_SEND /* debug flag for this code */
376 376
377 377
378 378 /*
379 379 * ========== Send-side recycle routines ==========
380 380 */
381 381 static uint32_t rge_send_claim(rge_t *rgep);
382 382 #pragma inline(rge_send_claim)
383 383
384 384 static uint32_t
385 385 rge_send_claim(rge_t *rgep)
386 386 {
387 387 uint32_t slot;
388 388 uint32_t next;
389 389
390 390 mutex_enter(rgep->tx_lock);
391 391 slot = rgep->tx_next;
392 392 next = NEXT(slot, RGE_SEND_SLOTS);
393 393 rgep->tx_next = next;
394 394 rgep->tx_flow++;
395 395 mutex_exit(rgep->tx_lock);
396 396
397 397 /*
398 398 * We check that our invariants still hold:
399 399 * + the slot and next indexes are in range
400 400 * + the slot must not be the last one (i.e. the *next*
401 401 * index must not match the next-recycle index), 'cos
402 402 * there must always be at least one free slot in a ring
403 403 */
404 404 ASSERT(slot < RGE_SEND_SLOTS);
405 405 ASSERT(next < RGE_SEND_SLOTS);
406 406 ASSERT(next != rgep->tc_next);
407 407
408 408 return (slot);
409 409 }
410 410
411 411 /*
412 412 * We don't want to call this function every time after a successful
413 413 * h/w transmit done in ISR. Instead, we call this function in the
414 414 * rge_send() when there're few or no free tx BDs remained.
415 415 */
416 416 void rge_send_recycle(rge_t *rgep);
417 417 #pragma inline(rge_send_recycle)
418 418
419 419 void
420 420 rge_send_recycle(rge_t *rgep)
421 421 {
422 422 rge_bd_t *hw_sbd_p;
423 423 uint32_t tc_tail;
424 424 uint32_t tc_head;
425 425 uint32_t n;
426 426
427 427 mutex_enter(rgep->tc_lock);
428 428 tc_head = rgep->tc_next;
429 429 tc_tail = rgep->tc_tail;
430 430 if (tc_head == tc_tail)
431 431 goto resched;
432 432
433 433 do {
434 434 tc_tail = LAST(tc_tail, RGE_SEND_SLOTS);
435 435 hw_sbd_p = &rgep->tx_ring[tc_tail];
436 436 if (tc_tail == tc_head) {
437 437 if (hw_sbd_p->flags_len &
438 438 RGE_BSWAP_32(BD_FLAG_HW_OWN)) {
439 439 /*
440 440 * Recyled nothing: bump the watchdog counter,
441 441 * thus guaranteeing that it's nonzero
442 442 * (watchdog activated).
443 443 */
444 444 if (rgep->watchdog == 0)
445 445 rgep->watchdog = 1;
446 446 mutex_exit(rgep->tc_lock);
447 447 return;
448 448 }
449 449 break;
450 450 }
451 451 } while (hw_sbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN));
452 452
453 453 /*
454 454 * Recyled something :-)
455 455 */
456 456 rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS);
457 457 n = rgep->tc_next - tc_head;
458 458 if (rgep->tc_next < tc_head)
459 459 n += RGE_SEND_SLOTS;
460 460 rge_atomic_renounce(&rgep->tx_free, n);
461 461 rgep->watchdog = 0;
462 462 ASSERT(rgep->tx_free <= RGE_SEND_SLOTS);
463 463
464 464 resched:
465 465 mutex_exit(rgep->tc_lock);
466 466 if (rgep->resched_needed &&
467 467 rgep->rge_mac_state == RGE_MAC_STARTED) {
468 468 rgep->resched_needed = B_FALSE;
469 469 mac_tx_update(rgep->mh);
470 470 }
471 471 }
472 472
473 473 /*
474 474 * Send a message by copying it into a preallocated (and premapped) buffer
475 475 */
476 476 static void rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci);
477 477 #pragma inline(rge_send_copy)
478 478
479 479 static void
480 480 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci)
481 481 {
482 482 rge_bd_t *hw_sbd_p;
483 483 sw_sbd_t *ssbdp;
484 484 mblk_t *bp;
485 485 char *txb;
486 486 uint32_t slot;
487 487 size_t totlen;
488 488 size_t mblen;
489 489 uint32_t pflags;
490 490 struct ether_header *ethhdr;
491 491 struct ip *ip_hdr;
492 492
493 493 /*
494 494 * IMPORTANT:
495 495 * Up to the point where it claims a place, a send_msg()
496 496 * routine can indicate failure by returning B_FALSE. Once it's
497 497 * claimed a place, it mustn't fail.
498 498 *
499 499 * In this version, there's no setup to be done here, and there's
500 500 * nothing that can fail, so we can go straight to claiming our
501 501 * already-reserved place on the train.
502 502 *
503 503 * This is the point of no return!
504 504 */
505 505 slot = rge_send_claim(rgep);
506 506 ssbdp = &rgep->sw_sbds[slot];
507 507
508 508 /*
509 509 * Copy the data into a pre-mapped buffer, which avoids the
510 510 * overhead (and complication) of mapping/unmapping STREAMS
511 511 * buffers and keeping hold of them until the DMA has completed.
512 512 *
513 513 * Because all buffers are the same size, and larger than the
514 514 * longest single valid message, we don't have to bother about
515 515 * splitting the message across multiple buffers either.
516 516 */
517 517 txb = DMA_VPTR(ssbdp->pbuf);
518 518 totlen = 0;
519 519 bp = mp;
520 520 if (tci != 0) {
521 521 /*
522 522 * Do not copy the vlan tag
523 523 */
524 524 bcopy(bp->b_rptr, txb, 2 * ETHERADDRL);
525 525 txb += 2 * ETHERADDRL;
526 526 totlen += 2 * ETHERADDRL;
527 527 mblen = MBLKL(bp);
528 528 ASSERT(mblen >= 2 * ETHERADDRL + VLAN_TAGSZ);
529 529 mblen -= 2 * ETHERADDRL + VLAN_TAGSZ;
530 530 if ((totlen += mblen) <= rgep->ethmax_size) {
531 531 bcopy(bp->b_rptr + 2 * ETHERADDRL + VLAN_TAGSZ,
532 532 txb, mblen);
533 533 txb += mblen;
534 534 }
535 535 bp = bp->b_cont;
536 536 rgep->stats.obytes += VLAN_TAGSZ;
537 537 }
538 538 for (; bp != NULL; bp = bp->b_cont) {
539 539 mblen = MBLKL(bp);
540 540 if ((totlen += mblen) <= rgep->ethmax_size) {
541 541 bcopy(bp->b_rptr, txb, mblen);
542 542 txb += mblen;
543 543 }
544 544 }
545 545 rgep->stats.obytes += totlen;
546 546 rgep->stats.tx_pre_ismax = rgep->stats.tx_cur_ismax;
547 547 if (totlen == rgep->ethmax_size)
548 548 rgep->stats.tx_cur_ismax = B_TRUE;
549 549 else
550 550 rgep->stats.tx_cur_ismax = B_FALSE;
551 551
552 552 /*
553 553 * We'e reached the end of the chain; and we should have
554 554 * collected no more than ETHERMAX bytes into our buffer.
555 555 */
556 556 ASSERT(bp == NULL);
557 557 ASSERT(totlen <= rgep->ethmax_size);
558 558 DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
559 559
560 560 /*
561 561 * Update the hardware send buffer descriptor flags
562 562 */
563 563 hw_sbd_p = &rgep->tx_ring[slot];
564 564 ASSERT(hw_sbd_p == ssbdp->desc.mem_va);
565 565 hw_sbd_p->flags_len = RGE_BSWAP_32(totlen & SBD_LEN_MASK);
566 566 if (tci != 0) {
567 567 tci = TCI_OS2CHIP(tci);
568 568 hw_sbd_p->vlan_tag = RGE_BSWAP_32(tci);
569 569 hw_sbd_p->vlan_tag |= RGE_BSWAP_32(SBD_VLAN_PKT);
570 570 } else {
571 571 hw_sbd_p->vlan_tag = 0;
572 572 }
573 573
574 574 /*
575 575 * h/w checksum offload flags
576 576 */
577 577 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
578 578 if (pflags & HCK_FULLCKSUM) {
579 579 ASSERT(totlen >= sizeof (struct ether_header) +
580 580 sizeof (struct ip));
581 581 ethhdr = (struct ether_header *)(DMA_VPTR(ssbdp->pbuf));
582 582 /*
583 583 * Is the packet an IP(v4) packet?
584 584 */
585 585 if (ntohs(ethhdr->ether_type) == ETHERTYPE_IP) {
586 586 ip_hdr = (struct ip *)
587 587 ((uint8_t *)DMA_VPTR(ssbdp->pbuf) +
588 588 sizeof (struct ether_header));
589 589 if (ip_hdr->ip_p == IPPROTO_TCP)
590 590 hw_sbd_p->flags_len |=
591 591 RGE_BSWAP_32(SBD_FLAG_TCP_CKSUM);
592 592 else if (ip_hdr->ip_p == IPPROTO_UDP)
593 593 hw_sbd_p->flags_len |=
594 594 RGE_BSWAP_32(SBD_FLAG_UDP_CKSUM);
595 595 }
596 596 }
597 597 if (pflags & HCK_IPV4_HDRCKSUM)
598 598 hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_IP_CKSUM);
599 599
600 600 HW_SBD_SET(hw_sbd_p, slot);
601 601
602 602 /*
603 603 * We're done.
604 604 * The message can be freed right away, as we've already
605 605 * copied the contents ...
606 606 */
607 607 freemsg(mp);
608 608 }
609 609
610 610 static boolean_t
611 611 rge_send(rge_t *rgep, mblk_t *mp)
612 612 {
613 613 struct ether_vlan_header *ehp;
614 614 uint16_t tci;
615 615
616 616 ASSERT(mp->b_next == NULL);
617 617
618 618 /*
619 619 * Try to reserve a place in the transmit ring.
620 620 */
621 621 if (!rge_atomic_reserve(&rgep->tx_free, 1)) {
622 622 RGE_DEBUG(("rge_send: no free slots"));
623 623 rgep->stats.defer++;
624 624 rgep->resched_needed = B_TRUE;
625 625 return (B_FALSE);
626 626 }
627 627
628 628 /*
629 629 * Determine if the packet is VLAN tagged.
630 630 */
631 631 ASSERT(MBLKL(mp) >= sizeof (struct ether_header));
632 632 tci = 0;
633 633 ehp = (struct ether_vlan_header *)mp->b_rptr;
634 634 if (ehp->ether_tpid == htons(ETHERTYPE_VLAN))
635 635 tci = ntohs(ehp->ether_tci);
636 636
637 637 /*
638 638 * We've reserved a place :-)
639 639 * These ASSERTions check that our invariants still hold:
640 640 * there must still be at least one free place
641 641 * there must be at least one place NOT free (ours!)
642 642 */
643 643 ASSERT(rgep->tx_free < RGE_SEND_SLOTS);
644 644 rge_send_copy(rgep, mp, tci);
645 645
646 646 /*
647 647 * Trigger chip h/w transmit ...
648 648 */
649 649 mutex_enter(rgep->tx_lock);
650 650 if (--rgep->tx_flow == 0) {
651 651 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
652 652 rgep->tc_tail = rgep->tx_next;
653 653 }
654 654 rgep->stats.opackets++;
655 655 mutex_exit(rgep->tx_lock);
656 656
657 657 return (B_TRUE);
658 658 }
659 659
660 660 uint_t
661 661 rge_reschedule(caddr_t arg1, caddr_t arg2)
662 662 {
663 663 rge_t *rgep;
664 664
665 665 rgep = (rge_t *)arg1;
666 666 _NOTE(ARGUNUSED(arg2))
667 667
668 668 rge_send_recycle(rgep);
669 669
670 670 if (rgep->chipid.is_pcie && rgep->tx_free != RGE_SEND_SLOTS) {
671 671 /*
672 672 * It's observed that in current Realtek PCI-E chips, tx
673 673 * request of the second fragment for upper layer packets
674 674 * will be ignored if the hardware transmission is in
675 675 * progress and will not be processed when the tx engine
676 676 * is idle. So one solution is to re-issue the requests
677 677 * if there are untransmitted packets after tx interrupts
678 678 * occur.
679 679 */
680 680 rge_tx_trigger(rgep);
681 681 }
682 682
683 683 return (DDI_INTR_CLAIMED);
684 684 }
685 685
686 686 /*
687 687 * rge_m_tx() - send a chain of packets
688 688 */
689 689 mblk_t *
690 690 rge_m_tx(void *arg, mblk_t *mp)
691 691 {
692 692 rge_t *rgep = arg; /* private device info */
693 693 mblk_t *next;
694 694 mblk_t *mp_org = mp;
695 695
696 696 ASSERT(mp != NULL);
697 697
698 698 rw_enter(rgep->errlock, RW_READER);
699 699 if ((rgep->rge_mac_state != RGE_MAC_STARTED) ||
700 700 (rgep->rge_chip_state != RGE_CHIP_RUNNING) ||
701 701 (rgep->param_link_up != LINK_STATE_UP)) {
702 702 rw_exit(rgep->errlock);
703 703 RGE_DEBUG(("rge_m_tx: tx doesn't work"));
704 704 freemsgchain(mp);
705 705 return (NULL);
706 706 }
707 707
708 708 while (mp != NULL) {
709 709 next = mp->b_next;
710 710 mp->b_next = NULL;
711 711
712 712 if (!rge_send(rgep, mp)) {
713 713 mp->b_next = next;
714 714 break;
715 715 }
716 716
717 717 mp = next;
718 718 }
719 719 if (mp != mp_org) {
720 720 rge_tx_trigger(rgep);
721 721 }
722 722 rw_exit(rgep->errlock);
723 723
724 724 return (mp);
725 725 }
↓ open down ↓ |
642 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX