Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/bnxe/bnxe_rx.c
+++ new/usr/src/uts/common/io/bnxe/bnxe_rx.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2014 QLogic Corporation
24 24 * The contents of this file are subject to the terms of the
25 25 * QLogic End User License (the "License").
26 26 * You may not use this file except in compliance with the License.
27 27 *
28 28 * You can obtain a copy of the License at
29 29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 30 * QLogic_End_User_Software_License.txt
31 31 * See the License for the specific language governing permissions
32 32 * and limitations under the License.
33 33 */
34 34
35 35 /*
36 36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
37 37 */
38 38
39 39 #include "bnxe.h"
40 40
41 41
42 42 ddi_dma_attr_t bnxeRxDmaAttrib =
43 43 {
44 44 DMA_ATTR_V0, /* dma_attr_version */
45 45 0, /* dma_attr_addr_lo */
46 46 0xffffffffffffffff, /* dma_attr_addr_hi */
47 47 0xffffffffffffffff, /* dma_attr_count_max */
48 48 BNXE_DMA_ALIGNMENT, /* dma_attr_align */
49 49 0xffffffff, /* dma_attr_burstsizes */
50 50 1, /* dma_attr_minxfer */
51 51 0xffffffffffffffff, /* dma_attr_maxxfer */
52 52 0xffffffffffffffff, /* dma_attr_seg */
53 53 1, /* dma_attr_sgllen */
54 54 1, /* dma_attr_granular */
55 55 0, /* dma_attr_flags */
56 56 };
57 57
58 58
59 59 static void BnxeRxPostBuffers(um_device_t * pUM,
60 60 int idx,
61 61 s_list_t * pReclaimList)
62 62 {
63 63 lm_rx_chain_t * pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
64 64 u32_t returnedBytes = 0;
65 65 lm_packet_t * pLmPkt;
66 66
67 67 /* return bytes from reclaimed list to LM */
68 68 pLmPkt = (lm_packet_t *)s_list_peek_head(pReclaimList);
69 69 while (pLmPkt)
70 70 {
71 71 returnedBytes += pLmPkt->size;
72 72 pLmPkt = (lm_packet_t *)s_list_next_entry(&pLmPkt->link);
73 73 }
74 74
75 75 BNXE_LOCK_ENTER_RX(pUM, idx);
76 76
77 77 if (pUM->rxq[idx].rxLowWater > s_list_entry_cnt(&pLmRxChain->active_descq))
78 78 {
79 79 pUM->rxq[idx].rxLowWater = s_list_entry_cnt(&pLmRxChain->active_descq);
80 80 }
81 81
82 82 lm_return_packet_bytes(&pUM->lm_dev, idx, returnedBytes);
83 83
84 84 s_list_add_tail(&pLmRxChain->common.free_descq, pReclaimList);
85 85 s_list_clear(pReclaimList);
86 86
87 87 #if 0
88 88 /*
89 89 * Don't post buffers if we don't have too many free buffers and there are a
90 90 * lot of buffers already posted.
91 91 */
92 92 if (lm_bd_chain_avail_bds(&pLmRxChain->bd_chain) < 32)
93 93 {
94 94 BNXE_LOCK_EXIT_RX(pUM, idx);
95 95 return;
96 96 }
97 97
98 98 /*
99 99 * Don't post buffers if there aren't really that many to post yet.
100 100 */
101 101 if (s_list_entry_cnt(&pLmRxChain->common.free_descq) < 32)
102 102 {
103 103 BNXE_LOCK_EXIT_RX(pUM, idx);
104 104 return;
105 105 }
106 106 #endif
107 107
108 108 lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
109 109
110 110 BNXE_LOCK_EXIT_RX(pUM, idx);
111 111 }
112 112
113 113
114 114 static u32_t BnxeRxPktDescrSize(um_device_t * pUM)
115 115 {
116 116 u32_t descSize;
117 117
118 118 (void)pUM;
119 119
120 120 descSize = sizeof(um_rxpacket_t) + SIZEOF_SIG;
121 121
122 122 return ALIGN_VALUE_TO_WORD_BOUNDARY(descSize);
123 123 }
124 124
125 125
126 126 static void BnxeRxPktDescrFree(um_device_t * pUM,
127 127 um_rxpacket_t * pRxPkt)
128 128 {
129 129 u32_t descSize;
130 130 caddr_t pMem;
131 131
132 132 BnxeDbgBreakIfFastPath(pUM, SIG(pRxPkt) != L2PACKET_RX_SIG);
133 133
134 134 descSize = BnxeRxPktDescrSize(pUM);
135 135 pMem = (caddr_t)pRxPkt - SIZEOF_SIG;
136 136
137 137 kmem_free(pMem, descSize);
138 138 }
139 139
140 140
141 141 static void BnxeRxPktFree(char * free_arg)
142 142 {
143 143 um_rxpacket_t * pRxPkt = (um_rxpacket_t *)free_arg;
144 144 um_device_t * pUM = (um_device_t *)pRxPkt->pUM;
145 145 int idx = pRxPkt->idx;
146 146 s_list_t doneRxQ;
147 147
148 148 if (pUM->magic != BNXE_MAGIC)
149 149 {
150 150 /*
151 151 * Oh my! The free_arg data got corrupted. Log a message and leak this
152 152 * packet. We don't decrement the 'up in the stack count' since we
153 153 * can't be sure this packet really was a packet we previously sent up.
154 154 */
155 155 BnxeLogWarn(NULL, "ERROR freeing packet - UM is invalid! (%p)", pRxPkt);
156 156 return;
157 157 }
158 158
159 159 if (pUM->rxBufSignature[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] !=
160 160 pRxPkt->signature)
161 161 {
162 162 /*
163 163 * The stack is freeing a packet that was from a previous plumb of
164 164 * the interface.
165 165 */
166 166 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
167 167 pRxPkt->rx_info.mem_virt = NULL;
168 168 pRxPkt->rx_info.mem_size = 0;
169 169
170 170 ddi_dma_unbind_handle(pRxPkt->dmaHandle);
171 171 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
172 172 ddi_dma_free_handle(&pRxPkt->dmaHandle);
173 173
174 174 BnxeRxPktDescrFree(pUM, pRxPkt);
175 175 }
176 176 else
177 177 {
178 178 s_list_clear(&doneRxQ);
179 179
180 180 BNXE_LOCK_ENTER_DONERX(pUM, idx);
181 181
182 182 s_list_push_tail(&pUM->rxq[idx].doneRxQ,
183 183 &((lm_packet_t *)pRxPkt)->link);
184 184
185 185 /* post packets when a bunch are ready */
186 186 if (s_list_entry_cnt(&pUM->rxq[idx].doneRxQ) >= pUM->devParams.maxRxFree)
187 187 {
188 188 doneRxQ = pUM->rxq[idx].doneRxQ;
189 189 s_list_clear(&pUM->rxq[idx].doneRxQ);
190 190 }
191 191
192 192 BNXE_LOCK_EXIT_DONERX(pUM, idx);
193 193
194 194 if (s_list_entry_cnt(&doneRxQ))
195 195 {
196 196 BnxeRxPostBuffers(pUM, idx, &doneRxQ);
197 197 }
198 198 }
199 199
200 200 atomic_dec_32(&pUM->rxq[idx].rxBufUpInStack);
201 201 }
202 202
203 203
204 204 boolean_t BnxeWaitForPacketsFromClient(um_device_t * pUM,
205 205 int cliIdx)
206 206 {
207 207 int i, idx, cnt=0, tot=0;
208 208
209 209 switch (cliIdx)
210 210 {
↓ open down ↓ |
210 lines elided |
↑ open up ↑ |
211 211 case LM_CLI_IDX_FCOE:
212 212
213 213 for (i = 0; i < 5; i++)
214 214 {
215 215 if ((cnt = pUM->rxq[FCOE_CID(&pUM->lm_dev)].rxBufUpInStack) == 0)
216 216 {
217 217 break;
218 218 }
219 219
220 220 /* twiddle our thumbs for one second */
221 - delay(drv_usectohz(1000000));
221 + delay(drv_sectohz(1));
222 222 }
223 223
224 224 if (cnt)
225 225 {
226 226 BnxeLogWarn(pUM, "%d packets still held by FCoE (chain %d)!",
227 227 cnt, FCOE_CID(&pUM->lm_dev));
228 228 return B_FALSE;
229 229 }
230 230
231 231 break;
232 232
233 233 case LM_CLI_IDX_NDIS:
234 234
235 235 tot = 0;
236 236
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
237 237 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
238 238 {
239 239 for (i = 0; i < 5; i++)
240 240 {
241 241 if ((cnt = pUM->rxq[idx].rxBufUpInStack) == 0)
242 242 {
243 243 break;
244 244 }
245 245
246 246 /* twiddle our thumbs for one second */
247 - delay(drv_usectohz(1000000));
247 + delay(drv_sectohz(1));
248 248 }
249 249
250 250 tot += cnt;
251 251 }
252 252
253 253 if (tot)
254 254 {
255 255 BnxeLogWarn(pUM, "%d packets still held by the stack (chain %d)!",
256 256 tot, idx);
257 257 return B_FALSE;
258 258 }
259 259
260 260 break;
261 261
262 262 default:
263 263
264 264 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeWaitForPacketsFromClient (%d)", cliIdx);
265 265 break;
266 266 }
267 267
268 268 return B_TRUE;
269 269 }
270 270
271 271
272 272 /* numBytes is only valid when polling is TRUE */
273 273 mblk_t * BnxeRxRingProcess(um_device_t * pUM,
274 274 int idx,
275 275 boolean_t polling,
276 276 int numBytes)
277 277 {
278 278 RxQueue * pRxQ;
279 279 lm_rx_chain_t * pLmRxChain;
280 280 u32_t activeDescqCount;
281 281 boolean_t forceCopy;
282 282 um_rxpacket_t * pRxPkt;
283 283 lm_packet_t * pLmPkt;
284 284 u32_t pktLen;
285 285 boolean_t dataCopied;
286 286 u32_t notCopiedCount;
287 287 mblk_t * pMblk;
288 288 int ofldFlags;
289 289 mblk_t * head = NULL;
290 290 mblk_t * tail = NULL;
291 291 s_list_t rxList;
292 292 s_list_t reclaimList;
293 293 int procBytes = 0;
294 294 s_list_t tmpList;
295 295 sp_cqes_info sp_cqes;
296 296 u32_t pktsRxed;
297 297
298 298 pRxQ = &pUM->rxq[idx];
299 299
300 300 s_list_clear(&tmpList);
301 301
302 302 /* get the list of packets received */
303 303 BNXE_LOCK_ENTER_RX(pUM, idx);
304 304
305 305 pktsRxed = lm_get_packets_rcvd(&pUM->lm_dev, idx, &tmpList, &sp_cqes);
306 306
307 307 /* grab any waiting packets */
308 308 rxList = pRxQ->waitRxQ;
309 309 s_list_clear(&pRxQ->waitRxQ);
310 310
311 311 /* put any new packets at the end of the queue */
312 312 s_list_add_tail(&rxList, &tmpList);
313 313
314 314 BNXE_LOCK_EXIT_RX(pUM, idx);
315 315
316 316 /* now complete the ramrods */
317 317 lm_complete_ramrods(&pUM->lm_dev, &sp_cqes);
318 318
319 319 if (s_list_entry_cnt(&rxList) == 0)
320 320 {
321 321 return NULL;
322 322 }
323 323
324 324 s_list_clear(&reclaimList);
325 325 notCopiedCount = 0;
326 326
327 327 pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
328 328
329 329 activeDescqCount = s_list_entry_cnt(&pLmRxChain->active_descq);
330 330
331 331 forceCopy = (activeDescqCount <
332 332 (pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] >> 3));
333 333
334 334 /* send the packets up the stack */
335 335 while (1)
336 336 {
337 337 pRxPkt = (um_rxpacket_t *)s_list_pop_head(&rxList);
338 338 if (pRxPkt == NULL)
339 339 {
340 340 break;
341 341 }
342 342
343 343 pLmPkt = &(pRxPkt->lm_pkt);
344 344
345 345 if (pLmPkt->status != LM_STATUS_SUCCESS)
346 346 {
347 347 /* XXX increment error stat? */
348 348 s_list_push_tail(&reclaimList, &pLmPkt->link);
349 349 continue;
350 350 }
351 351
352 352 pktLen = pLmPkt->size;
353 353
354 354 if (polling == TRUE)
355 355 {
356 356 /* When polling an rx ring we can only process up to numBytes */
357 357 if ((procBytes + pktLen) <= numBytes)
358 358 {
359 359 /* continue to process this packet */
360 360 procBytes += pktLen;
361 361 }
362 362 else
363 363 {
364 364 /* put this packet not processed back on the list (front) */
365 365 s_list_push_head(&rxList, &pRxPkt->lm_pkt.link);
366 366 break;
367 367 }
368 368 }
369 369
370 370 (void)ddi_dma_sync(pRxPkt->dmaHandle,
371 371 0,
372 372 pktLen,
373 373 DDI_DMA_SYNC_FORKERNEL);
374 374
375 375 if (pUM->fmCapabilities &&
376 376 BnxeCheckDmaHandle(pRxPkt->dmaHandle) != DDI_FM_OK)
377 377 {
378 378 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
379 379 }
380 380
381 381 dataCopied = B_FALSE;
382 382
383 383 if (forceCopy ||
384 384 (pUM->devParams.rxCopyThreshold &&
385 385 (pktLen < pUM->devParams.rxCopyThreshold)))
386 386 {
387 387 if ((pMblk = allocb(pktLen, BPRI_MED)) == NULL)
388 388 {
389 389 pRxQ->rxDiscards++;
390 390 s_list_push_tail(&reclaimList, &pLmPkt->link);
391 391 continue;
392 392 }
393 393
394 394 /* copy the packet into the new mblk */
395 395 bcopy((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
396 396 pMblk->b_rptr, pktLen);
397 397 pMblk->b_wptr = (pMblk->b_rptr + pktLen);
398 398 dataCopied = B_TRUE;
399 399
400 400 pRxQ->rxCopied++;
401 401
402 402 goto BnxeRxRingProcess_sendup;
403 403 }
404 404
405 405 if ((activeDescqCount == 0) && (s_list_entry_cnt(&rxList) == 0))
406 406 {
407 407 /*
408 408 * If the hardware is out of receive buffers and we are on the last
409 409 * receive packet then drop the packet. We do this because we might
410 410 * not be able to allocate any new receive buffers before the ISR
411 411 * completes. If this happens, the driver will enter an infinite
412 412 * interrupt loop where the hardware is requesting rx buffers the
413 413 * driver cannot allocate. To prevent a system livelock we leave
414 414 * one buffer perpetually available. Note that we do this after
415 415 * giving the double copy code a chance to claim the packet.
416 416 */
417 417
418 418 /* FIXME
419 419 * Make sure to add one more to the rx packet descriptor count
420 420 * before allocating them.
421 421 */
422 422
423 423 pRxQ->rxDiscards++;
424 424 s_list_push_tail(&reclaimList, &pLmPkt->link);
425 425 continue;
426 426 }
427 427
428 428 /*
429 429 * If we got here then the packet wasn't copied so we need to create a
430 430 * new mblk_t which references the lm_packet_t buffer.
431 431 */
432 432
433 433 pRxPkt->freeRtn.free_func = BnxeRxPktFree;
434 434 pRxPkt->freeRtn.free_arg = (char *)pRxPkt;
435 435 pRxPkt->pUM = (void *)pUM;
436 436 pRxPkt->idx = idx;
437 437
438 438 if ((pMblk = desballoc((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
439 439 pktLen,
440 440 BPRI_MED,
441 441 &pRxPkt->freeRtn)) == NULL)
442 442 {
443 443 pRxQ->rxDiscards++;
444 444 s_list_push_tail(&reclaimList, &pLmPkt->link);
445 445 continue;
446 446 }
447 447
448 448 pMblk->b_wptr = (pMblk->b_rptr + pktLen);
449 449
450 450 BnxeRxRingProcess_sendup:
451 451
452 452 /*
453 453 * Check if the checksum was offloaded so we can pass the result to
454 454 * the stack.
455 455 */
456 456 ofldFlags = 0;
457 457
458 458 if ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_IP_CKSUM) &&
459 459 (pRxPkt->rx_info.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD))
460 460 {
461 461 ofldFlags |= HCK_IPV4_HDRCKSUM_OK;
462 462 }
463 463
464 464 if (((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_TCP_CKSUM) &&
465 465 (pRxPkt->rx_info.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
466 466 ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_UDP_CKSUM) &&
467 467 (pRxPkt->rx_info.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD)))
468 468 {
469 469 ofldFlags |= HCK_FULLCKSUM_OK;
470 470 }
471 471
472 472 if (ofldFlags != 0)
473 473 {
474 474 mac_hcksum_set(pMblk, 0, 0, 0, 0, ofldFlags);
475 475 }
476 476
477 477 /*
478 478 * If the packet data was copied into a new recieve buffer then put this
479 479 * descriptor in a list to be reclaimed later. If not, then increment a
480 480 * counter so we can track how many of our descriptors are held by the
481 481 * stack.
482 482 */
483 483 if (dataCopied == B_TRUE)
484 484 {
485 485 s_list_push_tail(&reclaimList, &pLmPkt->link);
486 486 }
487 487 else
488 488 {
489 489 notCopiedCount++;
490 490 }
491 491
492 492 if (head == NULL)
493 493 {
494 494 head = pMblk;
495 495 }
496 496 else
497 497 {
498 498 tail->b_next = pMblk;
499 499 }
500 500
501 501 tail = pMblk;
502 502 tail->b_next = NULL;
503 503
504 504 #if 0
505 505 BnxeDumpPkt(pUM,
506 506 (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev))) ?
507 507 "<- FCoE L2 RX <-" : "<- L2 RX <-",
508 508 pMblk, B_TRUE);
509 509 #endif
510 510 }
511 511
512 512 if (head)
513 513 {
514 514 if (notCopiedCount)
515 515 {
516 516 /* track all non-copied packets that will be held by the stack */
517 517 atomic_add_32(&pUM->rxq[idx].rxBufUpInStack, notCopiedCount);
518 518 }
519 519
520 520 /* pass the mblk chain up the stack */
521 521 if (polling == FALSE)
522 522 {
523 523
524 524 /* XXX NEED TO ADD STATS FOR RX PATH UPCALLS */
525 525
526 526 if (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev)))
527 527 {
528 528 /* XXX verify fcoe frees all packets on success or error */
529 529 if (pUM->fcoe.pDev && pUM->fcoe.bind.cliIndicateRx)
530 530 {
531 531 pUM->fcoe.bind.cliIndicateRx(pUM->fcoe.pDev, head);
532 532 }
533 533 else
534 534 {
535 535 /* FCoE isn't bound? Reclaim the chain... */
536 536 freemsgchain(head);
537 537 head = NULL;
538 538 }
539 539 }
540 540 else
541 541 {
542 542 #if defined(BNXE_RINGS) && (defined(__S11) || defined(__S12))
543 543 mac_rx_ring(pUM->pMac,
544 544 pUM->rxq[idx].ringHandle,
545 545 head,
546 546 pUM->rxq[idx].genNumber);
547 547 #else
548 548 mac_rx(pUM->pMac,
549 549 pUM->macRxResourceHandles[idx],
550 550 head);
551 551 #endif
552 552 }
553 553 }
554 554 }
555 555
556 556 if ((polling == TRUE) && s_list_entry_cnt(&rxList))
557 557 {
558 558 /* put the packets not processed back on the list (front) */
559 559 BNXE_LOCK_ENTER_RX(pUM, idx);
560 560 s_list_add_head(&pRxQ->waitRxQ, &rxList);
561 561 BNXE_LOCK_EXIT_RX(pUM, idx);
562 562 }
563 563
564 564 if (s_list_entry_cnt(&reclaimList))
565 565 {
566 566 BnxeRxPostBuffers(pUM, idx, &reclaimList);
567 567 }
568 568
569 569 return (polling == TRUE) ? head : NULL;
570 570 }
571 571
572 572
573 573 /*
574 574 * Dumping packets simply moves all packets from the waiting queue to the free
575 575 * queue. Note that the packets are not posted back to the LM.
576 576 */
577 577 static void BnxeRxRingDump(um_device_t * pUM,
578 578 int idx)
579 579 {
580 580 s_list_t tmpList;
581 581
582 582 BNXE_LOCK_ENTER_RX(pUM, idx);
583 583
584 584 tmpList = pUM->rxq[idx].waitRxQ;
585 585 s_list_clear(&pUM->rxq[idx].waitRxQ);
586 586
587 587 s_list_add_tail(&LM_RXQ(&pUM->lm_dev, idx).common.free_descq, &tmpList);
588 588
589 589 BNXE_LOCK_EXIT_RX(pUM, idx);
590 590 }
591 591
592 592
593 593 /*
594 594 * Aborting packets stops all rx processing by dumping the currently waiting
595 595 * packets and aborting all the rx descriptors currently posted in the LM.
596 596 */
597 597 static void BnxeRxPktsAbortIdx(um_device_t * pUM,
598 598 int idx)
599 599 {
600 600 BnxeRxRingDump(pUM, idx);
601 601
602 602 BNXE_LOCK_ENTER_RX(pUM, idx);
603 603 lm_abort(&pUM->lm_dev, ABORT_OP_RX_CHAIN, idx);
604 604 BNXE_LOCK_EXIT_RX(pUM, idx);
605 605 }
606 606
607 607
608 608 void BnxeRxPktsAbort(um_device_t * pUM,
609 609 int cliIdx)
610 610 {
611 611 int idx;
612 612
613 613 switch (cliIdx)
614 614 {
615 615 case LM_CLI_IDX_FCOE:
616 616
617 617 BnxeRxPktsAbortIdx(pUM, FCOE_CID(&pUM->lm_dev));
618 618 break;
619 619
620 620 case LM_CLI_IDX_NDIS:
621 621
622 622 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
623 623 {
624 624 BnxeRxPktsAbortIdx(pUM, idx);
625 625 }
626 626
627 627 break;
628 628
629 629 default:
630 630
631 631 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsAbort (%d)", cliIdx);
632 632 break;
633 633 }
634 634 }
635 635
636 636
637 637 static int BnxeRxBufAlloc(um_device_t * pUM,
638 638 int idx,
639 639 um_rxpacket_t * pRxPkt)
640 640 {
641 641 ddi_dma_cookie_t cookie;
642 642 u32_t count;
643 643 size_t length;
644 644 int rc;
645 645
646 646 if ((rc = ddi_dma_alloc_handle(pUM->pDev,
647 647 &bnxeRxDmaAttrib,
648 648 DDI_DMA_DONTWAIT,
649 649 NULL,
650 650 &pRxPkt->dmaHandle)) != DDI_SUCCESS)
651 651 {
652 652 BnxeLogWarn(pUM, "Failed to alloc DMA handle for rx buffer");
653 653 return -1;
654 654 }
655 655
656 656 pRxPkt->rx_info.mem_size = MAX_L2_CLI_BUFFER_SIZE(&pUM->lm_dev, idx);
657 657
658 658 if ((rc = ddi_dma_mem_alloc(pRxPkt->dmaHandle,
659 659 pRxPkt->rx_info.mem_size,
660 660 &bnxeAccessAttribBUF,
661 661 DDI_DMA_STREAMING,
662 662 DDI_DMA_DONTWAIT,
663 663 NULL,
664 664 (caddr_t *)&pRxPkt->rx_info.mem_virt,
665 665 &length,
666 666 &pRxPkt->dmaAccHandle)) != DDI_SUCCESS)
667 667 {
668 668 BnxeLogWarn(pUM, "Failed to alloc DMA memory for rx buffer");
669 669 ddi_dma_free_handle(&pRxPkt->dmaHandle);
670 670 return -1;
671 671 }
672 672
673 673 if ((rc = ddi_dma_addr_bind_handle(pRxPkt->dmaHandle,
674 674 NULL,
675 675 (caddr_t)pRxPkt->rx_info.mem_virt,
676 676 pRxPkt->rx_info.mem_size,
677 677 DDI_DMA_READ | DDI_DMA_STREAMING,
678 678 DDI_DMA_DONTWAIT,
679 679 NULL,
680 680 &cookie,
681 681 &count)) != DDI_DMA_MAPPED)
682 682 {
683 683 BnxeLogWarn(pUM, "Failed to bind DMA address for rx buffer");
684 684 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
685 685 ddi_dma_free_handle(&pRxPkt->dmaHandle);
686 686 return -1;
687 687 }
688 688
689 689 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = cookie.dmac_laddress;
690 690
691 691 return 0;
692 692 }
693 693
694 694
695 695 static int BnxeRxPktsInitPostBuffersIdx(um_device_t * pUM,
696 696 int idx)
697 697 {
698 698 BNXE_LOCK_ENTER_RX(pUM, idx);
699 699 lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
700 700 BNXE_LOCK_EXIT_RX(pUM, idx);
701 701
702 702 return 0;
703 703 }
704 704
705 705
706 706 int BnxeRxPktsInitPostBuffers(um_device_t * pUM,
707 707 int cliIdx)
708 708 {
709 709 int idx;
710 710
711 711 switch (cliIdx)
712 712 {
713 713 case LM_CLI_IDX_FCOE:
714 714
715 715 BnxeRxPktsInitPostBuffersIdx(pUM, FCOE_CID(&pUM->lm_dev));
716 716 break;
717 717
718 718 case LM_CLI_IDX_NDIS:
719 719
720 720 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
721 721 {
722 722 BnxeRxPktsInitPostBuffersIdx(pUM, idx);
723 723 }
724 724
725 725 break;
726 726
727 727 default:
728 728
729 729 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
730 730 break;
731 731 }
732 732
733 733 return 0;
734 734 }
735 735
736 736
737 737 static int BnxeRxPktsInitIdx(um_device_t * pUM,
738 738 int idx)
739 739 {
740 740 lm_device_t * pLM = &pUM->lm_dev;
741 741 lm_rx_chain_t * pLmRxChain;
742 742 um_rxpacket_t * pRxPkt;
743 743 lm_packet_t * pLmPkt;
744 744 u8_t * pTmp;
745 745 int postCnt, i;
746 746
747 747 BNXE_LOCK_ENTER_RX(pUM, idx);
748 748
749 749 pLmRxChain = &LM_RXQ(pLM, idx);
750 750
751 751 s_list_clear(&pUM->rxq[idx].doneRxQ);
752 752 pUM->rxq[idx].rxLowWater = pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
753 753 pUM->rxq[idx].rxDiscards = 0;
754 754 pUM->rxq[idx].rxCopied = 0;
755 755
756 756 s_list_clear(&pUM->rxq[idx].waitRxQ);
757 757
758 758 /* allocate the packet descriptors */
759 759 for (i = 0;
760 760 i < pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
761 761 i++)
762 762 {
763 763 if ((pTmp = kmem_zalloc(BnxeRxPktDescrSize(pUM),
764 764 KM_NOSLEEP)) == NULL)
765 765 {
766 766 BnxeLogWarn(pUM, "Failed to alloc an rx packet descriptor!!!");
767 767 break; /* continue without error */
768 768 }
769 769
770 770 pRxPkt = (um_rxpacket_t *)(pTmp + SIZEOF_SIG);
771 771 SIG(pRxPkt) = L2PACKET_RX_SIG;
772 772 pRxPkt->signature = pUM->rxBufSignature[LM_CHAIN_IDX_CLI(pLM, idx)];
773 773
774 774 pLmPkt = (lm_packet_t *)pRxPkt;
775 775 pLmPkt->u1.rx.hash_val_ptr = &pRxPkt->hash_value;
776 776 pLmPkt->l2pkt_rx_info = &pRxPkt->rx_info;
777 777
778 778 if (BnxeRxBufAlloc(pUM, idx, pRxPkt) != 0)
779 779 {
780 780 BnxeRxPktDescrFree(pUM, pRxPkt);
781 781 break; /* continue without error */
782 782 }
783 783
784 784 s_list_push_tail(&pLmRxChain->common.free_descq, &pLmPkt->link);
785 785 }
786 786
787 787 postCnt = s_list_entry_cnt(&pLmRxChain->common.free_descq);
788 788
789 789 if (postCnt != pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)])
790 790 {
791 791 BnxeLogWarn(pUM, "%d rx buffers requested and only %d allocated!!!",
792 792 pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)],
793 793 postCnt);
794 794 }
795 795
796 796 BNXE_LOCK_EXIT_RX(pUM, idx);
797 797
798 798 return 0;
799 799 }
800 800
801 801
802 802 int BnxeRxPktsInit(um_device_t * pUM,
803 803 int cliIdx)
804 804 {
805 805 int idx;
806 806
807 807 /* set the rx buffer signature for this plumb */
808 808 atomic_swap_32(&pUM->rxBufSignature[cliIdx], (u32_t)ddi_get_time());
809 809
810 810 switch (cliIdx)
811 811 {
812 812 case LM_CLI_IDX_FCOE:
813 813
814 814 BnxeRxPktsInitIdx(pUM, FCOE_CID(&pUM->lm_dev));
815 815 break;
816 816
817 817 case LM_CLI_IDX_NDIS:
818 818
819 819 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
820 820 {
821 821 BnxeRxPktsInitIdx(pUM, idx);
822 822 }
823 823
824 824 break;
825 825
826 826 default:
827 827
828 828 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
829 829 break;
830 830 }
831 831
832 832 return 0;
833 833 }
834 834
835 835
836 836 static void BnxeRxPktsFiniIdx(um_device_t * pUM,
837 837 int idx)
838 838 {
839 839 lm_rx_chain_t * pLmRxChain;
840 840 um_rxpacket_t * pRxPkt;
841 841 s_list_t tmpList;
842 842
843 843 pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
844 844
845 845 s_list_clear(&tmpList);
846 846
847 847 BNXE_LOCK_ENTER_RX(pUM, idx);
848 848 s_list_add_tail(&tmpList, &pLmRxChain->common.free_descq);
849 849 s_list_clear(&pLmRxChain->common.free_descq);
850 850 BNXE_LOCK_EXIT_RX(pUM, idx);
851 851
852 852 BNXE_LOCK_ENTER_DONERX(pUM, idx);
853 853 s_list_add_tail(&tmpList, &pUM->rxq[idx].doneRxQ);
854 854 s_list_clear(&pUM->rxq[idx].doneRxQ);
855 855 BNXE_LOCK_EXIT_DONERX(pUM, idx);
856 856
857 857 if (s_list_entry_cnt(&tmpList) !=
858 858 pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)])
859 859 {
860 860 BnxeLogWarn(pUM, "WARNING Missing RX packets (idx:%d) (%lu / %d - %u in stack)",
861 861 idx, s_list_entry_cnt(&tmpList),
862 862 pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)],
863 863 pUM->rxq[idx].rxBufUpInStack);
864 864 }
865 865
866 866 /*
867 867 * Back out all the packets in the "available for hardware use" queue.
868 868 * Free the buffers associated with the descriptors as we go.
869 869 */
870 870 while (1)
871 871 {
872 872 pRxPkt = (um_rxpacket_t *)s_list_pop_head(&tmpList);
873 873 if (pRxPkt == NULL)
874 874 {
875 875 break;
876 876 }
877 877
878 878 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
879 879 pRxPkt->rx_info.mem_virt = NULL;
880 880 pRxPkt->rx_info.mem_size = 0;
881 881
882 882 ddi_dma_unbind_handle(pRxPkt->dmaHandle);
883 883 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
884 884 ddi_dma_free_handle(&pRxPkt->dmaHandle);
885 885
886 886 BnxeRxPktDescrFree(pUM, pRxPkt);
887 887 }
888 888 }
889 889
890 890
891 891 void BnxeRxPktsFini(um_device_t * pUM,
892 892 int cliIdx)
893 893 {
894 894 int idx;
895 895
896 896 /* reset the signature for this unplumb */
897 897 atomic_swap_32(&pUM->rxBufSignature[cliIdx], 0);
898 898
899 899 switch (cliIdx)
900 900 {
901 901 case LM_CLI_IDX_FCOE:
902 902
903 903 BnxeRxPktsFiniIdx(pUM, FCOE_CID(&pUM->lm_dev));
904 904 break;
905 905
906 906 case LM_CLI_IDX_NDIS:
907 907
908 908 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
909 909 {
910 910 BnxeRxPktsFiniIdx(pUM, idx);
911 911 }
912 912
913 913 break;
914 914
915 915 default:
916 916
917 917 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsFini (%d)", cliIdx);
918 918 break;
919 919 }
920 920 }
921 921
↓ open down ↓ |
664 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX