Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_rxdma.c
+++ new/usr/src/uts/common/io/nxge/nxge_rxdma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/nxge/nxge_impl.h>
28 28 #include <sys/nxge/nxge_rxdma.h>
29 29 #include <sys/nxge/nxge_hio.h>
30 30
31 31 #if !defined(_BIG_ENDIAN)
32 32 #include <npi_rx_rd32.h>
33 33 #endif
34 34 #include <npi_rx_rd64.h>
35 35 #include <npi_rx_wr64.h>
36 36
37 37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 40 (rdc + nxgep->pt_config.hw_config.start_rdc)
41 41
42 42 /*
43 43 * Globals: tunable parameters (/etc/system or adb)
44 44 *
45 45 */
46 46 extern uint32_t nxge_rbr_size;
47 47 extern uint32_t nxge_rcr_size;
48 48 extern uint32_t nxge_rbr_spare_size;
49 49 extern uint16_t nxge_rdc_buf_offset;
50 50
51 51 extern uint32_t nxge_mblks_pending;
52 52
53 53 /*
54 54 * Tunable to reduce the amount of time spent in the
55 55 * ISR doing Rx Processing.
56 56 */
57 57 extern uint32_t nxge_max_rx_pkts;
58 58
59 59 /*
60 60 * Tunables to manage the receive buffer blocks.
61 61 *
62 62 * nxge_rx_threshold_hi: copy all buffers.
63 63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
65 65 */
66 66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
67 67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
68 68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
69 69
70 70 extern uint32_t nxge_cksum_offload;
71 71
72 72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73 73 static void nxge_unmap_rxdma(p_nxge_t, int);
74 74
75 75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
76 76
77 77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78 78 static void nxge_rxdma_hw_stop(p_nxge_t, int);
79 79
80 80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
81 81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
82 82 uint32_t,
83 83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
84 84 p_rx_mbox_t *);
85 85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
86 86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
87 87
88 88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
89 89 uint16_t,
90 90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
91 91 p_rx_rcr_ring_t *, p_rx_mbox_t *);
92 92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
93 93 p_rx_rcr_ring_t, p_rx_mbox_t);
94 94
95 95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
96 96 uint16_t,
97 97 p_nxge_dma_common_t *,
98 98 p_rx_rbr_ring_t *, uint32_t);
99 99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
100 100 p_rx_rbr_ring_t);
101 101
102 102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
103 103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
104 104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
105 105
106 106 static mblk_t *
107 107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
108 108
109 109 static void nxge_receive_packet(p_nxge_t,
110 110 p_rx_rcr_ring_t,
111 111 p_rcr_entry_t,
112 112 boolean_t *,
113 113 mblk_t **, mblk_t **);
114 114
115 115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
116 116
117 117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
118 118 static void nxge_freeb(p_rx_msg_t);
119 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120 120
121 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 122 uint32_t, uint32_t);
123 123
124 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 125 p_rx_rbr_ring_t);
126 126
127 127
128 128 static nxge_status_t
129 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
130 130
131 131 nxge_status_t
132 132 nxge_rx_port_fatal_err_recover(p_nxge_t);
133 133
134 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135 135
136 136 nxge_status_t
137 137 nxge_init_rxdma_channels(p_nxge_t nxgep)
138 138 {
139 139 nxge_grp_set_t *set = &nxgep->rx_set;
140 140 int i, count, channel;
141 141 nxge_grp_t *group;
142 142 dc_map_t map;
143 143 int dev_gindex;
144 144
145 145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
146 146
147 147 if (!isLDOMguest(nxgep)) {
148 148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149 149 cmn_err(CE_NOTE, "hw_start_common");
150 150 return (NXGE_ERROR);
151 151 }
152 152 }
153 153
154 154 /*
155 155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 156 * We only have 8 hardware RDC tables, but we may have
157 157 * up to 16 logical (software-defined) groups of RDCS,
158 158 * if we make use of layer 3 & 4 hardware classification.
159 159 */
160 160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161 161 if ((1 << i) & set->lg.map) {
162 162 group = set->group[i];
163 163 dev_gindex =
164 164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165 165 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166 166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167 167 if ((1 << channel) & map) {
168 168 if ((nxge_grp_dc_add(nxgep,
169 169 group, VP_BOUND_RX, channel)))
170 170 goto init_rxdma_channels_exit;
171 171 }
172 172 }
173 173 }
174 174 if (++count == set->lg.count)
175 175 break;
176 176 }
177 177
178 178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179 179 return (NXGE_OK);
180 180
181 181 init_rxdma_channels_exit:
182 182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183 183 if ((1 << i) & set->lg.map) {
184 184 group = set->group[i];
185 185 dev_gindex =
186 186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187 187 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188 188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189 189 if ((1 << channel) & map) {
190 190 nxge_grp_dc_remove(nxgep,
191 191 VP_BOUND_RX, channel);
192 192 }
193 193 }
194 194 }
195 195 if (++count == set->lg.count)
196 196 break;
197 197 }
198 198
199 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200 200 return (NXGE_ERROR);
201 201 }
202 202
203 203 nxge_status_t
204 204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205 205 {
206 206 nxge_status_t status;
207 207
208 208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209 209
210 210 status = nxge_map_rxdma(nxge, channel);
211 211 if (status != NXGE_OK) {
212 212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213 213 "<== nxge_init_rxdma: status 0x%x", status));
214 214 return (status);
215 215 }
216 216
217 217 #if defined(sun4v)
218 218 if (isLDOMguest(nxge)) {
219 219 /* set rcr_ring */
220 220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
221 221
222 222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
223 223 if (status != NXGE_OK) {
224 224 nxge_unmap_rxdma(nxge, channel);
225 225 return (status);
226 226 }
227 227 }
228 228 #endif
229 229
230 230 status = nxge_rxdma_hw_start(nxge, channel);
231 231 if (status != NXGE_OK) {
232 232 nxge_unmap_rxdma(nxge, channel);
233 233 }
234 234
235 235 if (!nxge->statsp->rdc_ksp[channel])
236 236 nxge_setup_rdc_kstats(nxge, channel);
237 237
238 238 NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239 239 "<== nxge_init_rxdma_channel: status 0x%x", status));
240 240
241 241 return (status);
242 242 }
243 243
244 244 void
245 245 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
246 246 {
247 247 nxge_grp_set_t *set = &nxgep->rx_set;
248 248 int rdc;
249 249
250 250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
251 251
252 252 if (set->owned.map == 0) {
253 253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254 254 "nxge_uninit_rxdma_channels: no channels"));
255 255 return;
256 256 }
257 257
258 258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259 259 if ((1 << rdc) & set->owned.map) {
260 260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261 261 }
262 262 }
263 263
264 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265 265 }
266 266
267 267 void
268 268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269 269 {
270 270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271 271
272 272 if (nxgep->statsp->rdc_ksp[channel]) {
273 273 kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274 274 nxgep->statsp->rdc_ksp[channel] = 0;
275 275 }
276 276
277 277 nxge_rxdma_hw_stop(nxgep, channel);
278 278 nxge_unmap_rxdma(nxgep, channel);
279 279
280 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
281 281 }
282 282
283 283 nxge_status_t
284 284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
285 285 {
286 286 npi_handle_t handle;
287 287 npi_status_t rs = NPI_SUCCESS;
288 288 nxge_status_t status = NXGE_OK;
289 289
290 290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
291 291
292 292 handle = NXGE_DEV_NPI_HANDLE(nxgep);
293 293 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
294 294
295 295 if (rs != NPI_SUCCESS) {
296 296 status = NXGE_ERROR | rs;
297 297 }
298 298
299 299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300 300
301 301 return (status);
302 302 }
303 303
304 304 void
305 305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
306 306 {
307 307 nxge_grp_set_t *set = &nxgep->rx_set;
308 308 int rdc;
309 309
310 310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
311 311
312 312 if (!isLDOMguest(nxgep)) {
313 313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314 314 (void) npi_rxdma_dump_fzc_regs(handle);
315 315 }
316 316
317 317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318 318 NXGE_DEBUG_MSG((nxgep, TX_CTL,
319 319 "nxge_rxdma_regs_dump_channels: "
320 320 "NULL ring pointer(s)"));
321 321 return;
322 322 }
323 323
324 324 if (set->owned.map == 0) {
325 325 NXGE_DEBUG_MSG((nxgep, RX_CTL,
326 326 "nxge_rxdma_regs_dump_channels: no channels"));
327 327 return;
328 328 }
329 329
330 330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331 331 if ((1 << rdc) & set->owned.map) {
332 332 rx_rbr_ring_t *ring =
333 333 nxgep->rx_rbr_rings->rbr_rings[rdc];
334 334 if (ring) {
335 335 (void) nxge_dump_rxdma_channel(nxgep, rdc);
336 336 }
337 337 }
338 338 }
339 339
340 340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
341 341 }
342 342
343 343 nxge_status_t
344 344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
345 345 {
346 346 npi_handle_t handle;
347 347 npi_status_t rs = NPI_SUCCESS;
348 348 nxge_status_t status = NXGE_OK;
349 349
350 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
351 351
352 352 handle = NXGE_DEV_NPI_HANDLE(nxgep);
353 353 rs = npi_rxdma_dump_rdc_regs(handle, channel);
354 354
355 355 if (rs != NPI_SUCCESS) {
356 356 status = NXGE_ERROR | rs;
357 357 }
358 358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
359 359 return (status);
360 360 }
361 361
362 362 nxge_status_t
363 363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
364 364 p_rx_dma_ent_msk_t mask_p)
365 365 {
366 366 npi_handle_t handle;
367 367 npi_status_t rs = NPI_SUCCESS;
368 368 nxge_status_t status = NXGE_OK;
369 369
370 370 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
371 371 "<== nxge_init_rxdma_channel_event_mask"));
372 372
373 373 handle = NXGE_DEV_NPI_HANDLE(nxgep);
374 374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
375 375 if (rs != NPI_SUCCESS) {
376 376 status = NXGE_ERROR | rs;
377 377 }
378 378
379 379 return (status);
380 380 }
381 381
382 382 nxge_status_t
383 383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
384 384 p_rx_dma_ctl_stat_t cs_p)
385 385 {
386 386 npi_handle_t handle;
387 387 npi_status_t rs = NPI_SUCCESS;
388 388 nxge_status_t status = NXGE_OK;
389 389
390 390 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
391 391 "<== nxge_init_rxdma_channel_cntl_stat"));
392 392
393 393 handle = NXGE_DEV_NPI_HANDLE(nxgep);
394 394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
395 395
396 396 if (rs != NPI_SUCCESS) {
397 397 status = NXGE_ERROR | rs;
398 398 }
399 399
400 400 return (status);
401 401 }
402 402
403 403 /*
404 404 * nxge_rxdma_cfg_rdcgrp_default_rdc
405 405 *
406 406 * Set the default RDC for an RDC Group (Table)
407 407 *
408 408 * Arguments:
409 409 * nxgep
410 410 * rdcgrp The group to modify
411 411 * rdc The new default RDC.
412 412 *
413 413 * Notes:
414 414 *
415 415 * NPI/NXGE function calls:
416 416 * npi_rxdma_cfg_rdc_table_default_rdc()
417 417 *
418 418 * Registers accessed:
419 419 * RDC_TBL_REG: FZC_ZCP + 0x10000
420 420 *
421 421 * Context:
422 422 * Service domain
423 423 */
424 424 nxge_status_t
425 425 nxge_rxdma_cfg_rdcgrp_default_rdc(
426 426 p_nxge_t nxgep,
427 427 uint8_t rdcgrp,
428 428 uint8_t rdc)
429 429 {
430 430 npi_handle_t handle;
431 431 npi_status_t rs = NPI_SUCCESS;
432 432 p_nxge_dma_pt_cfg_t p_dma_cfgp;
433 433 p_nxge_rdc_grp_t rdc_grp_p;
434 434 uint8_t actual_rdcgrp, actual_rdc;
435 435
436 436 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
437 437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
439 439
440 440 handle = NXGE_DEV_NPI_HANDLE(nxgep);
441 441
442 442 /*
443 443 * This has to be rewritten. Do we even allow this anymore?
444 444 */
445 445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446 446 RDC_MAP_IN(rdc_grp_p->map, rdc);
447 447 rdc_grp_p->def_rdc = rdc;
448 448
449 449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
450 450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
451 451
452 452 rs = npi_rxdma_cfg_rdc_table_default_rdc(
453 453 handle, actual_rdcgrp, actual_rdc);
454 454
455 455 if (rs != NPI_SUCCESS) {
456 456 return (NXGE_ERROR | rs);
457 457 }
458 458 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
459 459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
460 460 return (NXGE_OK);
461 461 }
462 462
463 463 nxge_status_t
464 464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
465 465 {
466 466 npi_handle_t handle;
467 467
468 468 uint8_t actual_rdc;
469 469 npi_status_t rs = NPI_SUCCESS;
470 470
471 471 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
472 472 " ==> nxge_rxdma_cfg_port_default_rdc"));
473 473
474 474 handle = NXGE_DEV_NPI_HANDLE(nxgep);
475 475 actual_rdc = rdc; /* XXX Hack! */
476 476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
477 477
478 478
479 479 if (rs != NPI_SUCCESS) {
480 480 return (NXGE_ERROR | rs);
481 481 }
482 482 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
483 483 " <== nxge_rxdma_cfg_port_default_rdc"));
484 484
485 485 return (NXGE_OK);
486 486 }
487 487
488 488 nxge_status_t
489 489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490 490 uint16_t pkts)
491 491 {
492 492 npi_status_t rs = NPI_SUCCESS;
493 493 npi_handle_t handle;
494 494 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
495 495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 496 handle = NXGE_DEV_NPI_HANDLE(nxgep);
497 497
498 498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
499 499
500 500 if (rs != NPI_SUCCESS) {
501 501 return (NXGE_ERROR | rs);
502 502 }
503 503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
504 504 return (NXGE_OK);
505 505 }
506 506
507 507 nxge_status_t
508 508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509 509 uint16_t tout, uint8_t enable)
510 510 {
511 511 npi_status_t rs = NPI_SUCCESS;
512 512 npi_handle_t handle;
513 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 514 handle = NXGE_DEV_NPI_HANDLE(nxgep);
515 515 if (enable == 0) {
516 516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
517 517 } else {
518 518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
519 519 tout);
520 520 }
521 521
522 522 if (rs != NPI_SUCCESS) {
523 523 return (NXGE_ERROR | rs);
524 524 }
525 525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
526 526 return (NXGE_OK);
527 527 }
528 528
529 529 nxge_status_t
530 530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
531 531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
532 532 {
533 533 npi_handle_t handle;
534 534 rdc_desc_cfg_t rdc_desc;
535 535 p_rcrcfig_b_t cfgb_p;
536 536 npi_status_t rs = NPI_SUCCESS;
537 537
538 538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
539 539 handle = NXGE_DEV_NPI_HANDLE(nxgep);
540 540 /*
541 541 * Use configuration data composed at init time.
542 542 * Write to hardware the receive ring configurations.
543 543 */
544 544 rdc_desc.mbox_enable = 1;
545 545 rdc_desc.mbox_addr = mbox_p->mbox_addr;
546 546 NXGE_DEBUG_MSG((nxgep, RX_CTL,
547 547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 548 mbox_p->mbox_addr, rdc_desc.mbox_addr));
549 549
550 550 rdc_desc.rbr_len = rbr_p->rbb_max;
551 551 rdc_desc.rbr_addr = rbr_p->rbr_addr;
552 552
553 553 switch (nxgep->rx_bksize_code) {
554 554 case RBR_BKSIZE_4K:
555 555 rdc_desc.page_size = SIZE_4KB;
556 556 break;
557 557 case RBR_BKSIZE_8K:
558 558 rdc_desc.page_size = SIZE_8KB;
559 559 break;
560 560 case RBR_BKSIZE_16K:
561 561 rdc_desc.page_size = SIZE_16KB;
562 562 break;
563 563 case RBR_BKSIZE_32K:
564 564 rdc_desc.page_size = SIZE_32KB;
565 565 break;
566 566 }
567 567
568 568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
569 569 rdc_desc.valid0 = 1;
570 570
571 571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
572 572 rdc_desc.valid1 = 1;
573 573
574 574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
575 575 rdc_desc.valid2 = 1;
576 576
577 577 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
578 578 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
579 579
580 580 rdc_desc.rcr_len = rcr_p->comp_size;
581 581 rdc_desc.rcr_addr = rcr_p->rcr_addr;
582 582
583 583 cfgb_p = &(rcr_p->rcr_cfgb);
584 584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585 585 /* For now, disable this timeout in a guest domain. */
586 586 if (isLDOMguest(nxgep)) {
587 587 rdc_desc.rcr_timeout = 0;
588 588 rdc_desc.rcr_timeout_enable = 0;
589 589 } else {
590 590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591 591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592 592 }
593 593
594 594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
595 595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
597 597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
598 598 "size 0 %d size 1 %d size 2 %d",
599 599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
600 600 rbr_p->npi_pkt_buf_size2));
601 601
602 602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
603 603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
604 604 &rdc_desc, B_TRUE);
605 605 else
606 606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
607 607 &rdc_desc, B_FALSE);
608 608 if (rs != NPI_SUCCESS) {
609 609 return (NXGE_ERROR | rs);
610 610 }
611 611
612 612 /*
613 613 * Enable the timeout and threshold.
614 614 */
615 615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
616 616 rdc_desc.rcr_threshold);
617 617 if (rs != NPI_SUCCESS) {
618 618 return (NXGE_ERROR | rs);
619 619 }
620 620
621 621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
622 622 rdc_desc.rcr_timeout);
623 623 if (rs != NPI_SUCCESS) {
624 624 return (NXGE_ERROR | rs);
625 625 }
626 626
627 627 if (!isLDOMguest(nxgep)) {
628 628 /* Enable the DMA */
629 629 rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630 630 if (rs != NPI_SUCCESS) {
631 631 return (NXGE_ERROR | rs);
632 632 }
633 633 }
634 634
635 635 /* Kick the DMA engine. */
636 636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637 637
638 638 if (!isLDOMguest(nxgep)) {
639 639 /* Clear the rbr empty bit */
640 640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641 641 }
642 642
643 643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
644 644
645 645 return (NXGE_OK);
646 646 }
647 647
648 648 nxge_status_t
649 649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
650 650 {
651 651 npi_handle_t handle;
652 652 npi_status_t rs = NPI_SUCCESS;
653 653
654 654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
655 655 handle = NXGE_DEV_NPI_HANDLE(nxgep);
656 656
657 657 /* disable the DMA */
658 658 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
659 659 if (rs != NPI_SUCCESS) {
660 660 NXGE_DEBUG_MSG((nxgep, RX_CTL,
661 661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
662 662 rs));
663 663 return (NXGE_ERROR | rs);
664 664 }
665 665
666 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
667 667 return (NXGE_OK);
668 668 }
669 669
670 670 nxge_status_t
671 671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
672 672 {
673 673 npi_handle_t handle;
674 674 nxge_status_t status = NXGE_OK;
675 675
676 676 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 677 "<== nxge_init_rxdma_channel_rcrflush"));
678 678
679 679 handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 680 npi_rxdma_rdc_rcr_flush(handle, channel);
681 681
682 682 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
683 683 "<== nxge_init_rxdma_channel_rcrflsh"));
684 684 return (status);
685 685
686 686 }
687 687
688 688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
689 689
690 690 #define TO_LEFT -1
691 691 #define TO_RIGHT 1
692 692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 695 #define NO_HINT 0xffffffff
696 696
697 697 /*ARGSUSED*/
698 698 nxge_status_t
699 699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700 700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701 701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
702 702 {
703 703 int bufsize;
704 704 uint64_t pktbuf_pp;
705 705 uint64_t dvma_addr;
706 706 rxring_info_t *ring_info;
707 707 int base_side, end_side;
708 708 int r_index, l_index, anchor_index;
709 709 int found, search_done;
710 710 uint32_t offset, chunk_size, block_size, page_size_mask;
711 711 uint32_t chunk_index, block_index, total_index;
712 712 int max_iterations, iteration;
713 713 rxbuf_index_info_t *bufinfo;
714 714
715 715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
716 716
717 717 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
718 718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
719 719 pkt_buf_addr_pp,
720 720 pktbufsz_type));
721 721 #if defined(__i386)
722 722 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
723 723 #else
724 724 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
725 725 #endif
726 726
727 727 switch (pktbufsz_type) {
728 728 case 0:
729 729 bufsize = rbr_p->pkt_buf_size0;
730 730 break;
731 731 case 1:
732 732 bufsize = rbr_p->pkt_buf_size1;
733 733 break;
734 734 case 2:
735 735 bufsize = rbr_p->pkt_buf_size2;
736 736 break;
737 737 case RCR_SINGLE_BLOCK:
738 738 bufsize = 0;
739 739 anchor_index = 0;
740 740 break;
741 741 default:
742 742 return (NXGE_ERROR);
743 743 }
744 744
745 745 if (rbr_p->num_blocks == 1) {
746 746 anchor_index = 0;
747 747 ring_info = rbr_p->ring_info;
748 748 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
749 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
750 750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
751 751 "buf_pp $%p btype %d anchor_index %d "
752 752 "bufinfo $%p",
753 753 pkt_buf_addr_pp,
754 754 pktbufsz_type,
755 755 anchor_index,
756 756 bufinfo));
757 757
758 758 goto found_index;
759 759 }
760 760
761 761 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
762 762 "==> nxge_rxbuf_pp_to_vp: "
763 763 "buf_pp $%p btype %d anchor_index %d",
764 764 pkt_buf_addr_pp,
765 765 pktbufsz_type,
766 766 anchor_index));
767 767
768 768 ring_info = rbr_p->ring_info;
769 769 found = B_FALSE;
770 770 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
771 771 iteration = 0;
772 772 max_iterations = ring_info->max_iterations;
773 773 /*
774 774 * First check if this block has been seen
775 775 * recently. This is indicated by a hint which
776 776 * is initialized when the first buffer of the block
777 777 * is seen. The hint is reset when the last buffer of
778 778 * the block has been processed.
779 779 * As three block sizes are supported, three hints
780 780 * are kept. The idea behind the hints is that once
781 781 * the hardware uses a block for a buffer of that
782 782 * size, it will use it exclusively for that size
783 783 * and will use it until it is exhausted. It is assumed
784 784 * that there would a single block being used for the same
785 785 * buffer sizes at any given time.
786 786 */
787 787 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
788 788 anchor_index = ring_info->hint[pktbufsz_type];
789 789 dvma_addr = bufinfo[anchor_index].dvma_addr;
790 790 chunk_size = bufinfo[anchor_index].buf_size;
791 791 if ((pktbuf_pp >= dvma_addr) &&
792 792 (pktbuf_pp < (dvma_addr + chunk_size))) {
793 793 found = B_TRUE;
794 794 /*
795 795 * check if this is the last buffer in the block
796 796 * If so, then reset the hint for the size;
797 797 */
798 798
799 799 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
800 800 ring_info->hint[pktbufsz_type] = NO_HINT;
801 801 }
802 802 }
803 803
804 804 if (found == B_FALSE) {
805 805 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
806 806 "==> nxge_rxbuf_pp_to_vp: (!found)"
807 807 "buf_pp $%p btype %d anchor_index %d",
808 808 pkt_buf_addr_pp,
809 809 pktbufsz_type,
810 810 anchor_index));
811 811
812 812 /*
813 813 * This is the first buffer of the block of this
814 814 * size. Need to search the whole information
815 815 * array.
816 816 * the search algorithm uses a binary tree search
817 817 * algorithm. It assumes that the information is
818 818 * already sorted with increasing order
819 819 * info[0] < info[1] < info[2] .... < info[n-1]
820 820 * where n is the size of the information array
821 821 */
822 822 r_index = rbr_p->num_blocks - 1;
823 823 l_index = 0;
824 824 search_done = B_FALSE;
825 825 anchor_index = MID_INDEX(r_index, l_index);
826 826 while (search_done == B_FALSE) {
827 827 if ((r_index == l_index) ||
828 828 (iteration >= max_iterations))
829 829 search_done = B_TRUE;
830 830 end_side = TO_RIGHT; /* to the right */
831 831 base_side = TO_LEFT; /* to the left */
832 832 /* read the DVMA address information and sort it */
833 833 dvma_addr = bufinfo[anchor_index].dvma_addr;
834 834 chunk_size = bufinfo[anchor_index].buf_size;
835 835 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
836 836 "==> nxge_rxbuf_pp_to_vp: (searching)"
837 837 "buf_pp $%p btype %d "
838 838 "anchor_index %d chunk_size %d dvmaaddr $%p",
839 839 pkt_buf_addr_pp,
840 840 pktbufsz_type,
841 841 anchor_index,
842 842 chunk_size,
843 843 dvma_addr));
844 844
845 845 if (pktbuf_pp >= dvma_addr)
846 846 base_side = TO_RIGHT; /* to the right */
847 847 if (pktbuf_pp < (dvma_addr + chunk_size))
848 848 end_side = TO_LEFT; /* to the left */
849 849
850 850 switch (base_side + end_side) {
851 851 case IN_MIDDLE:
852 852 /* found */
853 853 found = B_TRUE;
854 854 search_done = B_TRUE;
855 855 if ((pktbuf_pp + bufsize) <
856 856 (dvma_addr + chunk_size))
857 857 ring_info->hint[pktbufsz_type] =
858 858 bufinfo[anchor_index].buf_index;
859 859 break;
860 860 case BOTH_RIGHT:
861 861 /* not found: go to the right */
862 862 l_index = anchor_index + 1;
863 863 anchor_index = MID_INDEX(r_index, l_index);
864 864 break;
865 865
866 866 case BOTH_LEFT:
867 867 /* not found: go to the left */
868 868 r_index = anchor_index - 1;
869 869 anchor_index = MID_INDEX(r_index, l_index);
870 870 break;
871 871 default: /* should not come here */
872 872 return (NXGE_ERROR);
873 873 }
874 874 iteration++;
875 875 }
876 876
877 877 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
878 878 "==> nxge_rxbuf_pp_to_vp: (search done)"
879 879 "buf_pp $%p btype %d anchor_index %d",
880 880 pkt_buf_addr_pp,
881 881 pktbufsz_type,
882 882 anchor_index));
883 883 }
884 884
885 885 if (found == B_FALSE) {
886 886 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
887 887 "==> nxge_rxbuf_pp_to_vp: (search failed)"
888 888 "buf_pp $%p btype %d anchor_index %d",
889 889 pkt_buf_addr_pp,
890 890 pktbufsz_type,
891 891 anchor_index));
892 892 return (NXGE_ERROR);
893 893 }
894 894
895 895 found_index:
896 896 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
897 897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
898 898 "buf_pp $%p btype %d bufsize %d anchor_index %d",
899 899 pkt_buf_addr_pp,
900 900 pktbufsz_type,
901 901 bufsize,
902 902 anchor_index));
903 903
904 904 /* index of the first block in this chunk */
905 905 chunk_index = bufinfo[anchor_index].start_index;
906 906 dvma_addr = bufinfo[anchor_index].dvma_addr;
907 907 page_size_mask = ring_info->block_size_mask;
908 908
909 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
910 910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
911 911 "buf_pp $%p btype %d bufsize %d "
912 912 "anchor_index %d chunk_index %d dvma $%p",
913 913 pkt_buf_addr_pp,
914 914 pktbufsz_type,
915 915 bufsize,
916 916 anchor_index,
917 917 chunk_index,
918 918 dvma_addr));
919 919
920 920 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
921 921 block_size = rbr_p->block_size; /* System block(page) size */
922 922
923 923 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
924 924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
925 925 "buf_pp $%p btype %d bufsize %d "
926 926 "anchor_index %d chunk_index %d dvma $%p "
927 927 "offset %d block_size %d",
928 928 pkt_buf_addr_pp,
929 929 pktbufsz_type,
930 930 bufsize,
931 931 anchor_index,
932 932 chunk_index,
933 933 dvma_addr,
934 934 offset,
935 935 block_size));
936 936
937 937 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
938 938
939 939 block_index = (offset / block_size); /* index within chunk */
940 940 total_index = chunk_index + block_index;
941 941
942 942
943 943 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
944 944 "==> nxge_rxbuf_pp_to_vp: "
945 945 "total_index %d dvma_addr $%p "
946 946 "offset %d block_size %d "
947 947 "block_index %d ",
948 948 total_index, dvma_addr,
949 949 offset, block_size,
950 950 block_index));
951 951 #if defined(__i386)
952 952 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
953 953 (uint32_t)offset);
954 954 #else
955 955 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
956 956 (uint64_t)offset);
957 957 #endif
958 958
959 959 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
960 960 "==> nxge_rxbuf_pp_to_vp: "
961 961 "total_index %d dvma_addr $%p "
962 962 "offset %d block_size %d "
963 963 "block_index %d "
964 964 "*pkt_buf_addr_p $%p",
965 965 total_index, dvma_addr,
966 966 offset, block_size,
967 967 block_index,
968 968 *pkt_buf_addr_p));
969 969
970 970
971 971 *msg_index = total_index;
972 972 *bufoffset = (offset & page_size_mask);
973 973
974 974 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
975 975 "==> nxge_rxbuf_pp_to_vp: get msg index: "
976 976 "msg_index %d bufoffset_index %d",
977 977 *msg_index,
978 978 *bufoffset));
979 979
980 980 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
981 981
982 982 return (NXGE_OK);
983 983 }
984 984
985 985 /*
986 986 * used by quick sort (qsort) function
987 987 * to perform comparison
988 988 */
989 989 static int
990 990 nxge_sort_compare(const void *p1, const void *p2)
991 991 {
992 992
993 993 rxbuf_index_info_t *a, *b;
994 994
995 995 a = (rxbuf_index_info_t *)p1;
996 996 b = (rxbuf_index_info_t *)p2;
997 997
998 998 if (a->dvma_addr > b->dvma_addr)
999 999 return (1);
1000 1000 if (a->dvma_addr < b->dvma_addr)
1001 1001 return (-1);
1002 1002 return (0);
1003 1003 }
1004 1004
1005 1005
1006 1006
1007 1007 /*
1008 1008 * grabbed this sort implementation from common/syscall/avl.c
1009 1009 *
1010 1010 */
1011 1011 /*
1012 1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1013 1013 * v = Ptr to array/vector of objs
1014 1014 * n = # objs in the array
1015 1015 * s = size of each obj (must be multiples of a word size)
1016 1016 * f = ptr to function to compare two objs
1017 1017 * returns (-1 = less than, 0 = equal, 1 = greater than
1018 1018 */
1019 1019 void
1020 1020 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1021 1021 {
1022 1022 int g, i, j, ii;
1023 1023 unsigned int *p1, *p2;
1024 1024 unsigned int tmp;
1025 1025
1026 1026 /* No work to do */
1027 1027 if (v == NULL || n <= 1)
1028 1028 return;
1029 1029 /* Sanity check on arguments */
1030 1030 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1031 1031 ASSERT(s > 0);
1032 1032
1033 1033 for (g = n / 2; g > 0; g /= 2) {
1034 1034 for (i = g; i < n; i++) {
1035 1035 for (j = i - g; j >= 0 &&
1036 1036 (*f)(v + j * s, v + (j + g) * s) == 1;
1037 1037 j -= g) {
1038 1038 p1 = (unsigned *)(v + j * s);
1039 1039 p2 = (unsigned *)(v + (j + g) * s);
1040 1040 for (ii = 0; ii < s / 4; ii++) {
1041 1041 tmp = *p1;
1042 1042 *p1++ = *p2;
1043 1043 *p2++ = tmp;
1044 1044 }
1045 1045 }
1046 1046 }
1047 1047 }
1048 1048 }
1049 1049
1050 1050 /*
1051 1051 * Initialize data structures required for rxdma
1052 1052 * buffer dvma->vmem address lookup
1053 1053 */
1054 1054 /*ARGSUSED*/
1055 1055 static nxge_status_t
1056 1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1057 1057 {
1058 1058
1059 1059 int index;
1060 1060 rxring_info_t *ring_info;
1061 1061 int max_iteration = 0, max_index = 0;
1062 1062
1063 1063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1064 1064
1065 1065 ring_info = rbrp->ring_info;
1066 1066 ring_info->hint[0] = NO_HINT;
1067 1067 ring_info->hint[1] = NO_HINT;
1068 1068 ring_info->hint[2] = NO_HINT;
1069 1069 max_index = rbrp->num_blocks;
1070 1070
1071 1071 /* read the DVMA address information and sort it */
1072 1072 /* do init of the information array */
1073 1073
1074 1074
1075 1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1076 1076 " nxge_rxbuf_index_info_init Sort ptrs"));
1077 1077
1078 1078 /* sort the array */
1079 1079 nxge_ksort((void *)ring_info->buffer, max_index,
1080 1080 sizeof (rxbuf_index_info_t), nxge_sort_compare);
1081 1081
1082 1082
1083 1083
1084 1084 for (index = 0; index < max_index; index++) {
1085 1085 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1086 1086 " nxge_rxbuf_index_info_init: sorted chunk %d "
1087 1087 " ioaddr $%p kaddr $%p size %x",
1088 1088 index, ring_info->buffer[index].dvma_addr,
1089 1089 ring_info->buffer[index].kaddr,
1090 1090 ring_info->buffer[index].buf_size));
1091 1091 }
1092 1092
1093 1093 max_iteration = 0;
1094 1094 while (max_index >= (1ULL << max_iteration))
1095 1095 max_iteration++;
1096 1096 ring_info->max_iterations = max_iteration + 1;
1097 1097 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1098 1098 " nxge_rxbuf_index_info_init Find max iter %d",
1099 1099 ring_info->max_iterations));
1100 1100
1101 1101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1102 1102 return (NXGE_OK);
1103 1103 }
1104 1104
1105 1105 /* ARGSUSED */
1106 1106 void
1107 1107 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1108 1108 {
1109 1109 #ifdef NXGE_DEBUG
1110 1110
1111 1111 uint32_t bptr;
1112 1112 uint64_t pp;
1113 1113
1114 1114 bptr = entry_p->bits.hdw.pkt_buf_addr;
1115 1115
1116 1116 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1117 1117 "\trcr entry $%p "
1118 1118 "\trcr entry 0x%0llx "
1119 1119 "\trcr entry 0x%08x "
1120 1120 "\trcr entry 0x%08x "
1121 1121 "\tvalue 0x%0llx\n"
1122 1122 "\tmulti = %d\n"
1123 1123 "\tpkt_type = 0x%x\n"
1124 1124 "\tzero_copy = %d\n"
1125 1125 "\tnoport = %d\n"
1126 1126 "\tpromis = %d\n"
1127 1127 "\terror = 0x%04x\n"
1128 1128 "\tdcf_err = 0x%01x\n"
1129 1129 "\tl2_len = %d\n"
1130 1130 "\tpktbufsize = %d\n"
1131 1131 "\tpkt_buf_addr = $%p\n"
1132 1132 "\tpkt_buf_addr (<< 6) = $%p\n",
1133 1133 entry_p,
1134 1134 *(int64_t *)entry_p,
1135 1135 *(int32_t *)entry_p,
1136 1136 *(int32_t *)((char *)entry_p + 32),
1137 1137 entry_p->value,
1138 1138 entry_p->bits.hdw.multi,
1139 1139 entry_p->bits.hdw.pkt_type,
1140 1140 entry_p->bits.hdw.zero_copy,
1141 1141 entry_p->bits.hdw.noport,
1142 1142 entry_p->bits.hdw.promis,
1143 1143 entry_p->bits.hdw.error,
1144 1144 entry_p->bits.hdw.dcf_err,
1145 1145 entry_p->bits.hdw.l2_len,
1146 1146 entry_p->bits.hdw.pktbufsz,
1147 1147 bptr,
1148 1148 entry_p->bits.ldw.pkt_buf_addr));
1149 1149
1150 1150 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1151 1151 RCR_PKT_BUF_ADDR_SHIFT;
1152 1152
1153 1153 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1154 1154 pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1155 1155 #endif
1156 1156 }
1157 1157
1158 1158 void
1159 1159 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1160 1160 {
1161 1161 npi_handle_t handle;
1162 1162 rbr_stat_t rbr_stat;
1163 1163 addr44_t hd_addr;
1164 1164 addr44_t tail_addr;
1165 1165 uint16_t qlen;
1166 1166
1167 1167 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1168 1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1169 1169
1170 1170 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1171 1171
1172 1172 /* RBR head */
1173 1173 hd_addr.addr = 0;
1174 1174 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1175 1175 #if defined(__i386)
1176 1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1177 1177 (void *)(uint32_t)hd_addr.addr);
1178 1178 #else
1179 1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1180 1180 (void *)hd_addr.addr);
1181 1181 #endif
1182 1182
1183 1183 /* RBR stats */
1184 1184 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1185 1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1186 1186
1187 1187 /* RCR tail */
1188 1188 tail_addr.addr = 0;
1189 1189 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1190 1190 #if defined(__i386)
1191 1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1192 1192 (void *)(uint32_t)tail_addr.addr);
1193 1193 #else
1194 1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1195 1195 (void *)tail_addr.addr);
1196 1196 #endif
1197 1197
1198 1198 /* RCR qlen */
1199 1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1200 1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1201 1201
1202 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1203 1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1204 1204 }
1205 1205
1206 1206 nxge_status_t
1207 1207 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1208 1208 {
1209 1209 nxge_grp_set_t *set = &nxgep->rx_set;
1210 1210 nxge_status_t status;
1211 1211 npi_status_t rs;
1212 1212 int rdc;
1213 1213
1214 1214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1215 1215 "==> nxge_rxdma_hw_mode: mode %d", enable));
1216 1216
1217 1217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1218 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1219 1219 "<== nxge_rxdma_mode: not initialized"));
1220 1220 return (NXGE_ERROR);
1221 1221 }
1222 1222
1223 1223 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1224 1224 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1225 1225 "<== nxge_tx_port_fatal_err_recover: "
1226 1226 "NULL ring pointer(s)"));
1227 1227 return (NXGE_ERROR);
1228 1228 }
1229 1229
1230 1230 if (set->owned.map == 0) {
1231 1231 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1232 1232 "nxge_rxdma_regs_dump_channels: no channels"));
1233 1233 return (NULL);
1234 1234 }
1235 1235
1236 1236 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1237 1237 if ((1 << rdc) & set->owned.map) {
1238 1238 rx_rbr_ring_t *ring =
1239 1239 nxgep->rx_rbr_rings->rbr_rings[rdc];
1240 1240 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1241 1241 if (ring) {
1242 1242 if (enable) {
1243 1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 1244 "==> nxge_rxdma_hw_mode: "
1245 1245 "channel %d (enable)", rdc));
1246 1246 rs = npi_rxdma_cfg_rdc_enable
1247 1247 (handle, rdc);
1248 1248 } else {
1249 1249 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1250 1250 "==> nxge_rxdma_hw_mode: "
1251 1251 "channel %d disable)", rdc));
1252 1252 rs = npi_rxdma_cfg_rdc_disable
1253 1253 (handle, rdc);
1254 1254 }
1255 1255 }
1256 1256 }
1257 1257 }
1258 1258
1259 1259 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1260 1260
1261 1261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1262 1262 "<== nxge_rxdma_hw_mode: status 0x%x", status));
1263 1263
1264 1264 return (status);
1265 1265 }
1266 1266
1267 1267 void
1268 1268 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1269 1269 {
1270 1270 npi_handle_t handle;
1271 1271
1272 1272 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1273 1273 "==> nxge_rxdma_enable_channel: channel %d", channel));
1274 1274
1275 1275 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1276 1276 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
1277 1277
1278 1278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1279 1279 }
1280 1280
1281 1281 void
1282 1282 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1283 1283 {
1284 1284 npi_handle_t handle;
1285 1285
1286 1286 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1287 1287 "==> nxge_rxdma_disable_channel: channel %d", channel));
1288 1288
1289 1289 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1290 1290 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
1291 1291
1292 1292 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1293 1293 }
1294 1294
1295 1295 void
1296 1296 nxge_hw_start_rx(p_nxge_t nxgep)
1297 1297 {
1298 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1299 1299
1300 1300 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1301 1301 (void) nxge_rx_mac_enable(nxgep);
1302 1302
1303 1303 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1304 1304 }
1305 1305
1306 1306 /*ARGSUSED*/
1307 1307 void
1308 1308 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1309 1309 {
1310 1310 nxge_grp_set_t *set = &nxgep->rx_set;
1311 1311 int rdc;
1312 1312
1313 1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1314 1314
1315 1315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1316 1316 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1317 1317 "<== nxge_tx_port_fatal_err_recover: "
1318 1318 "NULL ring pointer(s)"));
1319 1319 return;
1320 1320 }
1321 1321
1322 1322 if (set->owned.map == 0) {
1323 1323 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1324 1324 "nxge_rxdma_regs_dump_channels: no channels"));
1325 1325 return;
1326 1326 }
1327 1327
1328 1328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1329 1329 if ((1 << rdc) & set->owned.map) {
1330 1330 rx_rbr_ring_t *ring =
1331 1331 nxgep->rx_rbr_rings->rbr_rings[rdc];
1332 1332 if (ring) {
1333 1333 nxge_rxdma_hw_stop(nxgep, rdc);
1334 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1335 1335 "==> nxge_fixup_rxdma_rings: "
1336 1336 "channel %d ring $%px",
1337 1337 rdc, ring));
1338 1338 (void) nxge_rxdma_fix_channel(nxgep, rdc);
1339 1339 }
1340 1340 }
1341 1341 }
1342 1342
1343 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1344 1344 }
1345 1345
1346 1346 void
1347 1347 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1348 1348 {
1349 1349 int ndmas;
1350 1350 p_rx_rbr_rings_t rx_rbr_rings;
1351 1351 p_rx_rbr_ring_t *rbr_rings;
1352 1352 p_rx_rcr_rings_t rx_rcr_rings;
1353 1353 p_rx_rcr_ring_t *rcr_rings;
1354 1354 p_rx_mbox_areas_t rx_mbox_areas_p;
1355 1355 p_rx_mbox_t *rx_mbox_p;
1356 1356 p_nxge_dma_pool_t dma_buf_poolp;
1357 1357 p_nxge_dma_pool_t dma_cntl_poolp;
1358 1358 p_rx_rbr_ring_t rbrp;
1359 1359 p_rx_rcr_ring_t rcrp;
1360 1360 p_rx_mbox_t mboxp;
1361 1361 p_nxge_dma_common_t dmap;
1362 1362 nxge_status_t status = NXGE_OK;
1363 1363
1364 1364 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1365 1365
1366 1366 (void) nxge_rxdma_stop_channel(nxgep, channel);
1367 1367
1368 1368 dma_buf_poolp = nxgep->rx_buf_pool_p;
1369 1369 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1370 1370
1371 1371 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1372 1372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1373 1373 "<== nxge_rxdma_fix_channel: buf not allocated"));
1374 1374 return;
1375 1375 }
1376 1376
1377 1377 ndmas = dma_buf_poolp->ndmas;
1378 1378 if (!ndmas) {
1379 1379 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1380 1380 "<== nxge_rxdma_fix_channel: no dma allocated"));
1381 1381 return;
1382 1382 }
1383 1383
1384 1384 rx_rbr_rings = nxgep->rx_rbr_rings;
1385 1385 rx_rcr_rings = nxgep->rx_rcr_rings;
1386 1386 rbr_rings = rx_rbr_rings->rbr_rings;
1387 1387 rcr_rings = rx_rcr_rings->rcr_rings;
1388 1388 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1389 1389 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1390 1390
1391 1391 /* Reinitialize the receive block and completion rings */
1392 1392 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1393 1393 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1394 1394 mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1395 1395
1396 1396 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1397 1397 rbrp->rbr_rd_index = 0;
1398 1398 rcrp->comp_rd_index = 0;
1399 1399 rcrp->comp_wt_index = 0;
1400 1400
1401 1401 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1402 1402 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1403 1403
1404 1404 status = nxge_rxdma_start_channel(nxgep, channel,
1405 1405 rbrp, rcrp, mboxp);
1406 1406 if (status != NXGE_OK) {
1407 1407 goto nxge_rxdma_fix_channel_fail;
1408 1408 }
1409 1409
1410 1410 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1411 1411 "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1412 1412 return;
1413 1413
1414 1414 nxge_rxdma_fix_channel_fail:
1415 1415 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1416 1416 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1417 1417 }
1418 1418
1419 1419 p_rx_rbr_ring_t
1420 1420 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1421 1421 {
1422 1422 nxge_grp_set_t *set = &nxgep->rx_set;
1423 1423 nxge_channel_t rdc;
1424 1424
1425 1425 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1426 1426 "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1427 1427
1428 1428 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1429 1429 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1430 1430 "<== nxge_rxdma_get_rbr_ring: "
1431 1431 "NULL ring pointer(s)"));
1432 1432 return (NULL);
1433 1433 }
1434 1434
1435 1435 if (set->owned.map == 0) {
1436 1436 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1437 1437 "<== nxge_rxdma_get_rbr_ring: no channels"));
1438 1438 return (NULL);
1439 1439 }
1440 1440
1441 1441 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1442 1442 if ((1 << rdc) & set->owned.map) {
1443 1443 rx_rbr_ring_t *ring =
1444 1444 nxgep->rx_rbr_rings->rbr_rings[rdc];
1445 1445 if (ring) {
1446 1446 if (channel == ring->rdc) {
1447 1447 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1448 1448 "==> nxge_rxdma_get_rbr_ring: "
1449 1449 "channel %d ring $%p", rdc, ring));
1450 1450 return (ring);
1451 1451 }
1452 1452 }
1453 1453 }
1454 1454 }
1455 1455
1456 1456 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1457 1457 "<== nxge_rxdma_get_rbr_ring: not found"));
1458 1458
1459 1459 return (NULL);
1460 1460 }
1461 1461
1462 1462 p_rx_rcr_ring_t
1463 1463 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1464 1464 {
1465 1465 nxge_grp_set_t *set = &nxgep->rx_set;
1466 1466 nxge_channel_t rdc;
1467 1467
1468 1468 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1469 1469 "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1470 1470
1471 1471 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1472 1472 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1473 1473 "<== nxge_rxdma_get_rcr_ring: "
1474 1474 "NULL ring pointer(s)"));
1475 1475 return (NULL);
1476 1476 }
1477 1477
1478 1478 if (set->owned.map == 0) {
1479 1479 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1480 1480 "<== nxge_rxdma_get_rbr_ring: no channels"));
1481 1481 return (NULL);
1482 1482 }
1483 1483
1484 1484 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1485 1485 if ((1 << rdc) & set->owned.map) {
1486 1486 rx_rcr_ring_t *ring =
1487 1487 nxgep->rx_rcr_rings->rcr_rings[rdc];
1488 1488 if (ring) {
1489 1489 if (channel == ring->rdc) {
1490 1490 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1491 1491 "==> nxge_rxdma_get_rcr_ring: "
1492 1492 "channel %d ring $%p", rdc, ring));
1493 1493 return (ring);
1494 1494 }
1495 1495 }
1496 1496 }
1497 1497 }
1498 1498
1499 1499 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1500 1500 "<== nxge_rxdma_get_rcr_ring: not found"));
1501 1501
1502 1502 return (NULL);
1503 1503 }
1504 1504
1505 1505 /*
1506 1506 * Static functions start here.
1507 1507 */
1508 1508 static p_rx_msg_t
1509 1509 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1510 1510 {
1511 1511 p_rx_msg_t nxge_mp = NULL;
1512 1512 p_nxge_dma_common_t dmamsg_p;
1513 1513 uchar_t *buffer;
1514 1514
1515 1515 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1516 1516 if (nxge_mp == NULL) {
1517 1517 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1518 1518 "Allocation of a rx msg failed."));
1519 1519 goto nxge_allocb_exit;
1520 1520 }
1521 1521
1522 1522 nxge_mp->use_buf_pool = B_FALSE;
1523 1523 if (dmabuf_p) {
1524 1524 nxge_mp->use_buf_pool = B_TRUE;
1525 1525 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1526 1526 *dmamsg_p = *dmabuf_p;
1527 1527 dmamsg_p->nblocks = 1;
1528 1528 dmamsg_p->block_size = size;
1529 1529 dmamsg_p->alength = size;
1530 1530 buffer = (uchar_t *)dmabuf_p->kaddrp;
1531 1531
1532 1532 dmabuf_p->kaddrp = (void *)
1533 1533 ((char *)dmabuf_p->kaddrp + size);
1534 1534 dmabuf_p->ioaddr_pp = (void *)
1535 1535 ((char *)dmabuf_p->ioaddr_pp + size);
1536 1536 dmabuf_p->alength -= size;
1537 1537 dmabuf_p->offset += size;
1538 1538 dmabuf_p->dma_cookie.dmac_laddress += size;
1539 1539 dmabuf_p->dma_cookie.dmac_size -= size;
1540 1540
1541 1541 } else {
1542 1542 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1543 1543 if (buffer == NULL) {
1544 1544 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1545 1545 "Allocation of a receive page failed."));
1546 1546 goto nxge_allocb_fail1;
1547 1547 }
1548 1548 }
1549 1549
1550 1550 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1551 1551 if (nxge_mp->rx_mblk_p == NULL) {
1552 1552 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1553 1553 goto nxge_allocb_fail2;
1554 1554 }
1555 1555
1556 1556 nxge_mp->buffer = buffer;
1557 1557 nxge_mp->block_size = size;
1558 1558 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1559 1559 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1560 1560 nxge_mp->ref_cnt = 1;
1561 1561 nxge_mp->free = B_TRUE;
1562 1562 nxge_mp->rx_use_bcopy = B_FALSE;
1563 1563
1564 1564 atomic_inc_32(&nxge_mblks_pending);
1565 1565
1566 1566 goto nxge_allocb_exit;
1567 1567
1568 1568 nxge_allocb_fail2:
1569 1569 if (!nxge_mp->use_buf_pool) {
1570 1570 KMEM_FREE(buffer, size);
1571 1571 }
1572 1572
1573 1573 nxge_allocb_fail1:
1574 1574 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1575 1575 nxge_mp = NULL;
1576 1576
1577 1577 nxge_allocb_exit:
1578 1578 return (nxge_mp);
1579 1579 }
1580 1580
1581 1581 p_mblk_t
1582 1582 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1583 1583 {
1584 1584 p_mblk_t mp;
1585 1585
1586 1586 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1587 1587 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1588 1588 "offset = 0x%08X "
1589 1589 "size = 0x%08X",
1590 1590 nxge_mp, offset, size));
1591 1591
1592 1592 mp = desballoc(&nxge_mp->buffer[offset], size,
1593 1593 0, &nxge_mp->freeb);
1594 1594 if (mp == NULL) {
1595 1595 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1596 1596 goto nxge_dupb_exit;
1597 1597 }
1598 1598 atomic_inc_32(&nxge_mp->ref_cnt);
1599 1599
1600 1600
1601 1601 nxge_dupb_exit:
1602 1602 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1603 1603 nxge_mp));
1604 1604 return (mp);
1605 1605 }
1606 1606
1607 1607 p_mblk_t
1608 1608 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1609 1609 {
1610 1610 p_mblk_t mp;
1611 1611 uchar_t *dp;
1612 1612
1613 1613 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1614 1614 if (mp == NULL) {
1615 1615 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1616 1616 goto nxge_dupb_bcopy_exit;
1617 1617 }
1618 1618 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1619 1619 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1620 1620 mp->b_wptr = dp + size;
1621 1621
1622 1622 nxge_dupb_bcopy_exit:
1623 1623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1624 1624 nxge_mp));
1625 1625 return (mp);
1626 1626 }
1627 1627
1628 1628 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1629 1629 p_rx_msg_t rx_msg_p);
1630 1630
1631 1631 void
1632 1632 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1633 1633 {
1634 1634 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1635 1635
1636 1636 /* Reuse this buffer */
1637 1637 rx_msg_p->free = B_FALSE;
1638 1638 rx_msg_p->cur_usage_cnt = 0;
1639 1639 rx_msg_p->max_usage_cnt = 0;
1640 1640 rx_msg_p->pkt_buf_size = 0;
1641 1641
1642 1642 if (rx_rbr_p->rbr_use_bcopy) {
1643 1643 rx_msg_p->rx_use_bcopy = B_FALSE;
1644 1644 atomic_dec_32(&rx_rbr_p->rbr_consumed);
1645 1645 }
1646 1646
1647 1647 /*
1648 1648 * Get the rbr header pointer and its offset index.
1649 1649 */
1650 1650 MUTEX_ENTER(&rx_rbr_p->post_lock);
1651 1651 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1652 1652 rx_rbr_p->rbr_wrap_mask);
1653 1653 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1654 1654 MUTEX_EXIT(&rx_rbr_p->post_lock);
1655 1655 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1656 1656 rx_rbr_p->rdc, 1);
1657 1657
1658 1658 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1659 1659 "<== nxge_post_page (channel %d post_next_index %d)",
1660 1660 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1661 1661
1662 1662 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1663 1663 }
1664 1664
1665 1665 void
1666 1666 nxge_freeb(p_rx_msg_t rx_msg_p)
1667 1667 {
1668 1668 size_t size;
1669 1669 uchar_t *buffer = NULL;
1670 1670 int ref_cnt;
1671 1671 boolean_t free_state = B_FALSE;
1672 1672
1673 1673 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1674 1674
1675 1675 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1676 1676 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
↓ open down ↓ |
1676 lines elided |
↑ open up ↑ |
1677 1677 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1678 1678 rx_msg_p, nxge_mblks_pending));
1679 1679
1680 1680 /*
1681 1681 * First we need to get the free state, then
1682 1682 * atomic decrement the reference count to prevent
1683 1683 * the race condition with the interrupt thread that
1684 1684 * is processing a loaned up buffer block.
1685 1685 */
1686 1686 free_state = rx_msg_p->free;
1687 - ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1687 + ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1688 1688 if (!ref_cnt) {
1689 1689 atomic_dec_32(&nxge_mblks_pending);
1690 1690 buffer = rx_msg_p->buffer;
1691 1691 size = rx_msg_p->block_size;
1692 1692 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1693 1693 "will free: rx_msg_p = $%p (block pending %d)",
1694 1694 rx_msg_p, nxge_mblks_pending));
1695 1695
1696 1696 if (!rx_msg_p->use_buf_pool) {
1697 1697 KMEM_FREE(buffer, size);
1698 1698 }
1699 1699
1700 1700 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1701 1701
1702 1702 if (ring) {
1703 1703 /*
1704 1704 * Decrement the receive buffer ring's reference
1705 1705 * count, too.
1706 1706 */
1707 1707 atomic_dec_32(&ring->rbr_ref_cnt);
1708 1708
1709 1709 /*
1710 1710 * Free the receive buffer ring, if
1711 1711 * 1. all the receive buffers have been freed
1712 1712 * 2. and we are in the proper state (that is,
1713 1713 * we are not UNMAPPING).
1714 1714 */
1715 1715 if (ring->rbr_ref_cnt == 0 &&
1716 1716 ring->rbr_state == RBR_UNMAPPED) {
1717 1717 /*
1718 1718 * Free receive data buffers,
1719 1719 * buffer index information
1720 1720 * (rxring_info) and
1721 1721 * the message block ring.
1722 1722 */
1723 1723 NXGE_DEBUG_MSG((NULL, RX_CTL,
1724 1724 "nxge_freeb:rx_msg_p = $%p "
1725 1725 "(block pending %d) free buffers",
1726 1726 rx_msg_p, nxge_mblks_pending));
1727 1727 nxge_rxdma_databuf_free(ring);
1728 1728 if (ring->ring_info) {
1729 1729 KMEM_FREE(ring->ring_info,
1730 1730 sizeof (rxring_info_t));
1731 1731 }
1732 1732
1733 1733 if (ring->rx_msg_ring) {
1734 1734 KMEM_FREE(ring->rx_msg_ring,
1735 1735 ring->tnblocks *
1736 1736 sizeof (p_rx_msg_t));
1737 1737 }
1738 1738 KMEM_FREE(ring, sizeof (*ring));
1739 1739 }
1740 1740 }
1741 1741 return;
1742 1742 }
1743 1743
1744 1744 /*
1745 1745 * Repost buffer.
1746 1746 */
1747 1747 if (free_state && (ref_cnt == 1) && ring) {
1748 1748 NXGE_DEBUG_MSG((NULL, RX_CTL,
1749 1749 "nxge_freeb: post page $%p:", rx_msg_p));
1750 1750 if (ring->rbr_state == RBR_POSTING)
1751 1751 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1752 1752 }
1753 1753
1754 1754 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1755 1755 }
1756 1756
1757 1757 uint_t
1758 1758 nxge_rx_intr(void *arg1, void *arg2)
1759 1759 {
1760 1760 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1761 1761 p_nxge_t nxgep = (p_nxge_t)arg2;
1762 1762 p_nxge_ldg_t ldgp;
1763 1763 uint8_t channel;
1764 1764 npi_handle_t handle;
1765 1765 rx_dma_ctl_stat_t cs;
1766 1766 p_rx_rcr_ring_t rcrp;
1767 1767 mblk_t *mp = NULL;
1768 1768
1769 1769 if (ldvp == NULL) {
1770 1770 NXGE_DEBUG_MSG((NULL, INT_CTL,
1771 1771 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1772 1772 nxgep, ldvp));
1773 1773 return (DDI_INTR_CLAIMED);
1774 1774 }
1775 1775
1776 1776 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1777 1777 nxgep = ldvp->nxgep;
1778 1778 }
1779 1779
1780 1780 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1781 1781 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1782 1782 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1783 1783 "<== nxge_rx_intr: interface not started or intialized"));
1784 1784 return (DDI_INTR_CLAIMED);
1785 1785 }
1786 1786
1787 1787 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1788 1788 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1789 1789 nxgep, ldvp));
1790 1790
1791 1791 /*
1792 1792 * Get the PIO handle.
1793 1793 */
1794 1794 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1795 1795
1796 1796 /*
1797 1797 * Get the ring to enable us to process packets.
1798 1798 */
1799 1799 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1800 1800
1801 1801 /*
1802 1802 * The RCR ring lock must be held when packets
1803 1803 * are being processed and the hardware registers are
1804 1804 * being read or written to prevent race condition
1805 1805 * among the interrupt thread, the polling thread
1806 1806 * (will cause fatal errors such as rcrincon bit set)
1807 1807 * and the setting of the poll_flag.
1808 1808 */
1809 1809 MUTEX_ENTER(&rcrp->lock);
1810 1810
1811 1811 /*
1812 1812 * Get the control and status for this channel.
1813 1813 */
1814 1814 channel = ldvp->channel;
1815 1815 ldgp = ldvp->ldgp;
1816 1816
1817 1817 if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1818 1818 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1819 1819 "<== nxge_rx_intr: channel is not started"));
1820 1820
1821 1821 /*
1822 1822 * We received an interrupt before the ring is started.
1823 1823 */
1824 1824 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1825 1825 &cs.value);
1826 1826 cs.value &= RX_DMA_CTL_STAT_WR1C;
1827 1827 cs.bits.hdw.mex = 1;
1828 1828 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1829 1829 cs.value);
1830 1830
1831 1831 /*
1832 1832 * Rearm this logical group if this is a single device
1833 1833 * group.
1834 1834 */
1835 1835 if (ldgp->nldvs == 1) {
1836 1836 if (isLDOMguest(nxgep)) {
1837 1837 nxge_hio_ldgimgn(nxgep, ldgp);
1838 1838 } else {
1839 1839 ldgimgm_t mgm;
1840 1840
1841 1841 mgm.value = 0;
1842 1842 mgm.bits.ldw.arm = 1;
1843 1843 mgm.bits.ldw.timer = ldgp->ldg_timer;
1844 1844
1845 1845 NXGE_REG_WR64(handle,
1846 1846 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1847 1847 mgm.value);
1848 1848 }
1849 1849 }
1850 1850 MUTEX_EXIT(&rcrp->lock);
1851 1851 return (DDI_INTR_CLAIMED);
1852 1852 }
1853 1853
1854 1854 ASSERT(rcrp->ldgp == ldgp);
1855 1855 ASSERT(rcrp->ldvp == ldvp);
1856 1856
1857 1857 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1858 1858
1859 1859 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1860 1860 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1861 1861 channel,
1862 1862 cs.value,
1863 1863 cs.bits.hdw.rcrto,
1864 1864 cs.bits.hdw.rcrthres));
1865 1865
1866 1866 if (!rcrp->poll_flag) {
1867 1867 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1868 1868 }
1869 1869
1870 1870 /* error events. */
1871 1871 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1872 1872 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1873 1873 }
1874 1874
1875 1875 /*
1876 1876 * Enable the mailbox update interrupt if we want
1877 1877 * to use mailbox. We probably don't need to use
1878 1878 * mailbox as it only saves us one pio read.
1879 1879 * Also write 1 to rcrthres and rcrto to clear
1880 1880 * these two edge triggered bits.
1881 1881 */
1882 1882 cs.value &= RX_DMA_CTL_STAT_WR1C;
1883 1883 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1884 1884 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1885 1885 cs.value);
1886 1886
1887 1887 /*
1888 1888 * If the polling mode is enabled, disable the interrupt.
1889 1889 */
1890 1890 if (rcrp->poll_flag) {
1891 1891 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1892 1892 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1893 1893 "(disabling interrupts)", channel, ldgp, ldvp));
1894 1894
1895 1895 /*
1896 1896 * Disarm this logical group if this is a single device
1897 1897 * group.
1898 1898 */
1899 1899 if (ldgp->nldvs == 1) {
1900 1900 if (isLDOMguest(nxgep)) {
1901 1901 ldgp->arm = B_FALSE;
1902 1902 nxge_hio_ldgimgn(nxgep, ldgp);
1903 1903 } else {
1904 1904 ldgimgm_t mgm;
1905 1905 mgm.value = 0;
1906 1906 mgm.bits.ldw.arm = 0;
1907 1907 NXGE_REG_WR64(handle,
1908 1908 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1909 1909 mgm.value);
1910 1910 }
1911 1911 }
1912 1912 } else {
1913 1913 /*
1914 1914 * Rearm this logical group if this is a single device
1915 1915 * group.
1916 1916 */
1917 1917 if (ldgp->nldvs == 1) {
1918 1918 if (isLDOMguest(nxgep)) {
1919 1919 nxge_hio_ldgimgn(nxgep, ldgp);
1920 1920 } else {
1921 1921 ldgimgm_t mgm;
1922 1922
1923 1923 mgm.value = 0;
1924 1924 mgm.bits.ldw.arm = 1;
1925 1925 mgm.bits.ldw.timer = ldgp->ldg_timer;
1926 1926
1927 1927 NXGE_REG_WR64(handle,
1928 1928 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1929 1929 mgm.value);
1930 1930 }
1931 1931 }
1932 1932
1933 1933 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1934 1934 "==> nxge_rx_intr: rdc %d ldgp $%p "
1935 1935 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1936 1936 }
1937 1937 MUTEX_EXIT(&rcrp->lock);
1938 1938
1939 1939 if (mp != NULL) {
1940 1940 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1941 1941 rcrp->rcr_gen_num);
1942 1942 }
1943 1943 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1944 1944 return (DDI_INTR_CLAIMED);
1945 1945 }
1946 1946
1947 1947 /*
1948 1948 * This routine is the main packet receive processing function.
1949 1949 * It gets the packet type, error code, and buffer related
1950 1950 * information from the receive completion entry.
1951 1951 * How many completion entries to process is based on the number of packets
1952 1952 * queued by the hardware, a hardware maintained tail pointer
1953 1953 * and a configurable receive packet count.
1954 1954 *
1955 1955 * A chain of message blocks will be created as result of processing
1956 1956 * the completion entries. This chain of message blocks will be returned and
1957 1957 * a hardware control status register will be updated with the number of
1958 1958 * packets were removed from the hardware queue.
1959 1959 *
1960 1960 * The RCR ring lock is held when entering this function.
1961 1961 */
1962 1962 static mblk_t *
1963 1963 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1964 1964 int bytes_to_pickup)
1965 1965 {
1966 1966 npi_handle_t handle;
1967 1967 uint8_t channel;
1968 1968 uint32_t comp_rd_index;
1969 1969 p_rcr_entry_t rcr_desc_rd_head_p;
1970 1970 p_rcr_entry_t rcr_desc_rd_head_pp;
1971 1971 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
1972 1972 uint16_t qlen, nrcr_read, npkt_read;
1973 1973 uint32_t qlen_hw;
1974 1974 boolean_t multi;
1975 1975 rcrcfig_b_t rcr_cfg_b;
1976 1976 int totallen = 0;
1977 1977 #if defined(_BIG_ENDIAN)
1978 1978 npi_status_t rs = NPI_SUCCESS;
1979 1979 #endif
1980 1980
1981 1981 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1982 1982 "channel %d", rcr_p->rdc));
1983 1983
1984 1984 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1985 1985 return (NULL);
1986 1986 }
1987 1987 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1988 1988 channel = rcr_p->rdc;
1989 1989
1990 1990 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1991 1991 "==> nxge_rx_pkts: START: rcr channel %d "
1992 1992 "head_p $%p head_pp $%p index %d ",
1993 1993 channel, rcr_p->rcr_desc_rd_head_p,
1994 1994 rcr_p->rcr_desc_rd_head_pp,
1995 1995 rcr_p->comp_rd_index));
1996 1996
1997 1997
1998 1998 #if !defined(_BIG_ENDIAN)
1999 1999 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
2000 2000 #else
2001 2001 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
2002 2002 if (rs != NPI_SUCCESS) {
2003 2003 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
2004 2004 "channel %d, get qlen failed 0x%08x",
2005 2005 channel, rs));
2006 2006 return (NULL);
2007 2007 }
2008 2008 #endif
2009 2009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2010 2010 "qlen %d", channel, qlen));
2011 2011
2012 2012
2013 2013
2014 2014 if (!qlen) {
2015 2015 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2016 2016 "==> nxge_rx_pkts:rcr channel %d "
2017 2017 "qlen %d (no pkts)", channel, qlen));
2018 2018
2019 2019 return (NULL);
2020 2020 }
2021 2021
2022 2022 comp_rd_index = rcr_p->comp_rd_index;
2023 2023
2024 2024 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2025 2025 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2026 2026 nrcr_read = npkt_read = 0;
2027 2027
2028 2028 /*
2029 2029 * Number of packets queued
2030 2030 * (The jumbo or multi packet will be counted as only one
2031 2031 * packets and it may take up more than one completion entry).
2032 2032 */
2033 2033 qlen_hw = (qlen < nxge_max_rx_pkts) ?
2034 2034 qlen : nxge_max_rx_pkts;
2035 2035 head_mp = NULL;
2036 2036 tail_mp = &head_mp;
2037 2037 nmp = mp_cont = NULL;
2038 2038 multi = B_FALSE;
2039 2039
2040 2040 while (qlen_hw) {
2041 2041
2042 2042 #ifdef NXGE_DEBUG
2043 2043 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2044 2044 #endif
2045 2045 /*
2046 2046 * Process one completion ring entry.
2047 2047 */
2048 2048 nxge_receive_packet(nxgep,
2049 2049 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2050 2050
2051 2051 /*
2052 2052 * message chaining modes
2053 2053 */
2054 2054 if (nmp) {
2055 2055 nmp->b_next = NULL;
2056 2056 if (!multi && !mp_cont) { /* frame fits a partition */
2057 2057 *tail_mp = nmp;
2058 2058 tail_mp = &nmp->b_next;
2059 2059 totallen += MBLKL(nmp);
2060 2060 nmp = NULL;
2061 2061 } else if (multi && !mp_cont) { /* first segment */
2062 2062 *tail_mp = nmp;
2063 2063 tail_mp = &nmp->b_cont;
2064 2064 totallen += MBLKL(nmp);
2065 2065 } else if (multi && mp_cont) { /* mid of multi segs */
2066 2066 *tail_mp = mp_cont;
2067 2067 tail_mp = &mp_cont->b_cont;
2068 2068 totallen += MBLKL(mp_cont);
2069 2069 } else if (!multi && mp_cont) { /* last segment */
2070 2070 *tail_mp = mp_cont;
2071 2071 tail_mp = &nmp->b_next;
2072 2072 totallen += MBLKL(mp_cont);
2073 2073 nmp = NULL;
2074 2074 }
2075 2075 }
2076 2076 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2077 2077 "==> nxge_rx_pkts: loop: rcr channel %d "
2078 2078 "before updating: multi %d "
2079 2079 "nrcr_read %d "
2080 2080 "npk read %d "
2081 2081 "head_pp $%p index %d ",
2082 2082 channel,
2083 2083 multi,
2084 2084 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2085 2085 comp_rd_index));
2086 2086
2087 2087 if (!multi) {
2088 2088 qlen_hw--;
2089 2089 npkt_read++;
2090 2090 }
2091 2091
2092 2092 /*
2093 2093 * Update the next read entry.
2094 2094 */
2095 2095 comp_rd_index = NEXT_ENTRY(comp_rd_index,
2096 2096 rcr_p->comp_wrap_mask);
2097 2097
2098 2098 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2099 2099 rcr_p->rcr_desc_first_p,
2100 2100 rcr_p->rcr_desc_last_p);
2101 2101
2102 2102 nrcr_read++;
2103 2103
2104 2104 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2105 2105 "<== nxge_rx_pkts: (SAM, process one packet) "
2106 2106 "nrcr_read %d",
2107 2107 nrcr_read));
2108 2108 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2109 2109 "==> nxge_rx_pkts: loop: rcr channel %d "
2110 2110 "multi %d "
2111 2111 "nrcr_read %d "
2112 2112 "npk read %d "
2113 2113 "head_pp $%p index %d ",
2114 2114 channel,
2115 2115 multi,
2116 2116 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2117 2117 comp_rd_index));
2118 2118
2119 2119 if ((bytes_to_pickup != -1) &&
2120 2120 (totallen >= bytes_to_pickup)) {
2121 2121 break;
2122 2122 }
2123 2123 }
2124 2124
2125 2125 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2126 2126 rcr_p->comp_rd_index = comp_rd_index;
2127 2127 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2128 2128 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2129 2129 (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2130 2130
2131 2131 rcr_p->intr_timeout = (nxgep->intr_timeout <
2132 2132 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2133 2133 nxgep->intr_timeout;
2134 2134
2135 2135 rcr_p->intr_threshold = (nxgep->intr_threshold <
2136 2136 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2137 2137 nxgep->intr_threshold;
2138 2138
2139 2139 rcr_cfg_b.value = 0x0ULL;
2140 2140 rcr_cfg_b.bits.ldw.entout = 1;
2141 2141 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2142 2142 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2143 2143
2144 2144 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2145 2145 channel, rcr_cfg_b.value);
2146 2146 }
2147 2147
2148 2148 cs.bits.ldw.pktread = npkt_read;
2149 2149 cs.bits.ldw.ptrread = nrcr_read;
2150 2150 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2151 2151 channel, cs.value);
2152 2152 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2153 2153 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2154 2154 "head_pp $%p index %016llx ",
2155 2155 channel,
2156 2156 rcr_p->rcr_desc_rd_head_pp,
2157 2157 rcr_p->comp_rd_index));
2158 2158 /*
2159 2159 * Update RCR buffer pointer read and number of packets
2160 2160 * read.
2161 2161 */
2162 2162
2163 2163 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2164 2164 "channel %d", rcr_p->rdc));
2165 2165
2166 2166 return (head_mp);
2167 2167 }
2168 2168
2169 2169 void
2170 2170 nxge_receive_packet(p_nxge_t nxgep,
2171 2171 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2172 2172 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2173 2173 {
2174 2174 p_mblk_t nmp = NULL;
2175 2175 uint64_t multi;
2176 2176 uint64_t dcf_err;
2177 2177 uint8_t channel;
2178 2178
2179 2179 boolean_t first_entry = B_TRUE;
2180 2180 boolean_t is_tcp_udp = B_FALSE;
2181 2181 boolean_t buffer_free = B_FALSE;
2182 2182 boolean_t error_send_up = B_FALSE;
2183 2183 uint8_t error_type;
2184 2184 uint16_t l2_len;
2185 2185 uint16_t skip_len;
2186 2186 uint8_t pktbufsz_type;
2187 2187 uint64_t rcr_entry;
2188 2188 uint64_t *pkt_buf_addr_pp;
2189 2189 uint64_t *pkt_buf_addr_p;
2190 2190 uint32_t buf_offset;
2191 2191 uint32_t bsize;
2192 2192 uint32_t error_disp_cnt;
2193 2193 uint32_t msg_index;
2194 2194 p_rx_rbr_ring_t rx_rbr_p;
2195 2195 p_rx_msg_t *rx_msg_ring_p;
2196 2196 p_rx_msg_t rx_msg_p;
2197 2197 uint16_t sw_offset_bytes = 0, hdr_size = 0;
2198 2198 nxge_status_t status = NXGE_OK;
2199 2199 boolean_t is_valid = B_FALSE;
2200 2200 p_nxge_rx_ring_stats_t rdc_stats;
2201 2201 uint32_t bytes_read;
2202 2202 uint64_t pkt_type;
2203 2203 uint64_t frag;
2204 2204 boolean_t pkt_too_long_err = B_FALSE;
2205 2205 #ifdef NXGE_DEBUG
2206 2206 int dump_len;
2207 2207 #endif
2208 2208 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2209 2209 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2210 2210
2211 2211 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2212 2212
2213 2213 multi = (rcr_entry & RCR_MULTI_MASK);
2214 2214 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2215 2215 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2216 2216
2217 2217 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2218 2218 frag = (rcr_entry & RCR_FRAG_MASK);
2219 2219
2220 2220 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2221 2221
2222 2222 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2223 2223 RCR_PKTBUFSZ_SHIFT);
2224 2224 #if defined(__i386)
2225 2225 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
2226 2226 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
2227 2227 #else
2228 2228 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2229 2229 RCR_PKT_BUF_ADDR_SHIFT);
2230 2230 #endif
2231 2231
2232 2232 channel = rcr_p->rdc;
2233 2233
2234 2234 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2235 2235 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2236 2236 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2237 2237 "error_type 0x%x pkt_type 0x%x "
2238 2238 "pktbufsz_type %d ",
2239 2239 rcr_desc_rd_head_p,
2240 2240 rcr_entry, pkt_buf_addr_pp, l2_len,
2241 2241 multi,
2242 2242 error_type,
2243 2243 pkt_type,
2244 2244 pktbufsz_type));
2245 2245
2246 2246 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2247 2247 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2248 2248 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2249 2249 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2250 2250 rcr_entry, pkt_buf_addr_pp, l2_len,
2251 2251 multi,
2252 2252 error_type,
2253 2253 pkt_type));
2254 2254
2255 2255 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2256 2256 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2257 2257 "full pkt_buf_addr_pp $%p l2_len %d",
2258 2258 rcr_entry, pkt_buf_addr_pp, l2_len));
2259 2259
2260 2260 /* get the stats ptr */
2261 2261 rdc_stats = rcr_p->rdc_stats;
2262 2262
2263 2263 if (!l2_len) {
2264 2264
2265 2265 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2266 2266 "<== nxge_receive_packet: failed: l2 length is 0."));
2267 2267 return;
2268 2268 }
2269 2269
2270 2270 /*
2271 2271 * Software workaround for BMAC hardware limitation that allows
2272 2272 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2273 2273 * instead of 0x2400 for jumbo.
2274 2274 */
2275 2275 if (l2_len > nxgep->mac.maxframesize) {
2276 2276 pkt_too_long_err = B_TRUE;
2277 2277 }
2278 2278
2279 2279 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2280 2280 l2_len -= ETHERFCSL;
2281 2281
2282 2282 /* shift 6 bits to get the full io address */
2283 2283 #if defined(__i386)
2284 2284 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
2285 2285 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2286 2286 #else
2287 2287 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2288 2288 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2289 2289 #endif
2290 2290 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2291 2291 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2292 2292 "full pkt_buf_addr_pp $%p l2_len %d",
2293 2293 rcr_entry, pkt_buf_addr_pp, l2_len));
2294 2294
2295 2295 rx_rbr_p = rcr_p->rx_rbr_p;
2296 2296 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2297 2297
2298 2298 if (first_entry) {
2299 2299 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2300 2300 RXDMA_HDR_SIZE_DEFAULT);
2301 2301
2302 2302 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2303 2303 "==> nxge_receive_packet: first entry 0x%016llx "
2304 2304 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2305 2305 rcr_entry, pkt_buf_addr_pp, l2_len,
2306 2306 hdr_size));
2307 2307 }
2308 2308
2309 2309 MUTEX_ENTER(&rx_rbr_p->lock);
2310 2310
2311 2311 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2312 2312 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2313 2313 "full pkt_buf_addr_pp $%p l2_len %d",
2314 2314 rcr_entry, pkt_buf_addr_pp, l2_len));
2315 2315
2316 2316 /*
2317 2317 * Packet buffer address in the completion entry points
2318 2318 * to the starting buffer address (offset 0).
2319 2319 * Use the starting buffer address to locate the corresponding
2320 2320 * kernel address.
2321 2321 */
2322 2322 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2323 2323 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2324 2324 &buf_offset,
2325 2325 &msg_index);
2326 2326
2327 2327 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2328 2328 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2329 2329 "full pkt_buf_addr_pp $%p l2_len %d",
2330 2330 rcr_entry, pkt_buf_addr_pp, l2_len));
2331 2331
2332 2332 if (status != NXGE_OK) {
2333 2333 MUTEX_EXIT(&rx_rbr_p->lock);
2334 2334 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2335 2335 "<== nxge_receive_packet: found vaddr failed %d",
2336 2336 status));
2337 2337 return;
2338 2338 }
2339 2339
2340 2340 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2341 2341 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2342 2342 "full pkt_buf_addr_pp $%p l2_len %d",
2343 2343 rcr_entry, pkt_buf_addr_pp, l2_len));
2344 2344
2345 2345 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2346 2346 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2347 2347 "full pkt_buf_addr_pp $%p l2_len %d",
2348 2348 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2349 2349
2350 2350 rx_msg_p = rx_msg_ring_p[msg_index];
2351 2351
2352 2352 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2353 2353 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2354 2354 "full pkt_buf_addr_pp $%p l2_len %d",
2355 2355 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2356 2356
2357 2357 switch (pktbufsz_type) {
2358 2358 case RCR_PKTBUFSZ_0:
2359 2359 bsize = rx_rbr_p->pkt_buf_size0_bytes;
2360 2360 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2361 2361 "==> nxge_receive_packet: 0 buf %d", bsize));
2362 2362 break;
2363 2363 case RCR_PKTBUFSZ_1:
2364 2364 bsize = rx_rbr_p->pkt_buf_size1_bytes;
2365 2365 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2366 2366 "==> nxge_receive_packet: 1 buf %d", bsize));
2367 2367 break;
2368 2368 case RCR_PKTBUFSZ_2:
2369 2369 bsize = rx_rbr_p->pkt_buf_size2_bytes;
2370 2370 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2371 2371 "==> nxge_receive_packet: 2 buf %d", bsize));
2372 2372 break;
2373 2373 case RCR_SINGLE_BLOCK:
2374 2374 bsize = rx_msg_p->block_size;
2375 2375 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2376 2376 "==> nxge_receive_packet: single %d", bsize));
2377 2377
2378 2378 break;
2379 2379 default:
2380 2380 MUTEX_EXIT(&rx_rbr_p->lock);
2381 2381 return;
2382 2382 }
2383 2383
2384 2384 switch (nxge_rdc_buf_offset) {
2385 2385 case SW_OFFSET_NO_OFFSET:
2386 2386 sw_offset_bytes = 0;
2387 2387 break;
2388 2388 case SW_OFFSET_64:
2389 2389 sw_offset_bytes = 64;
2390 2390 break;
2391 2391 case SW_OFFSET_128:
2392 2392 sw_offset_bytes = 128;
2393 2393 break;
2394 2394 case SW_OFFSET_192:
2395 2395 sw_offset_bytes = 192;
2396 2396 break;
2397 2397 case SW_OFFSET_256:
2398 2398 sw_offset_bytes = 256;
2399 2399 break;
2400 2400 case SW_OFFSET_320:
2401 2401 sw_offset_bytes = 320;
2402 2402 break;
2403 2403 case SW_OFFSET_384:
2404 2404 sw_offset_bytes = 384;
2405 2405 break;
2406 2406 case SW_OFFSET_448:
2407 2407 sw_offset_bytes = 448;
2408 2408 break;
2409 2409 default:
2410 2410 sw_offset_bytes = 0;
2411 2411 break;
2412 2412 }
2413 2413
2414 2414 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2415 2415 (buf_offset + sw_offset_bytes),
2416 2416 (hdr_size + l2_len),
2417 2417 DDI_DMA_SYNC_FORCPU);
2418 2418
2419 2419 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2420 2420 "==> nxge_receive_packet: after first dump:usage count"));
2421 2421
2422 2422 if (rx_msg_p->cur_usage_cnt == 0) {
2423 2423 if (rx_rbr_p->rbr_use_bcopy) {
2424 2424 atomic_inc_32(&rx_rbr_p->rbr_consumed);
2425 2425 if (rx_rbr_p->rbr_consumed <
2426 2426 rx_rbr_p->rbr_threshold_hi) {
2427 2427 if (rx_rbr_p->rbr_threshold_lo == 0 ||
2428 2428 ((rx_rbr_p->rbr_consumed >=
2429 2429 rx_rbr_p->rbr_threshold_lo) &&
2430 2430 (rx_rbr_p->rbr_bufsize_type >=
2431 2431 pktbufsz_type))) {
2432 2432 rx_msg_p->rx_use_bcopy = B_TRUE;
2433 2433 }
2434 2434 } else {
2435 2435 rx_msg_p->rx_use_bcopy = B_TRUE;
2436 2436 }
2437 2437 }
2438 2438 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2439 2439 "==> nxge_receive_packet: buf %d (new block) ",
2440 2440 bsize));
2441 2441
2442 2442 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2443 2443 rx_msg_p->pkt_buf_size = bsize;
2444 2444 rx_msg_p->cur_usage_cnt = 1;
2445 2445 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2446 2446 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2447 2447 "==> nxge_receive_packet: buf %d "
2448 2448 "(single block) ",
2449 2449 bsize));
2450 2450 /*
2451 2451 * Buffer can be reused once the free function
2452 2452 * is called.
2453 2453 */
2454 2454 rx_msg_p->max_usage_cnt = 1;
2455 2455 buffer_free = B_TRUE;
2456 2456 } else {
2457 2457 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2458 2458 if (rx_msg_p->max_usage_cnt == 1) {
2459 2459 buffer_free = B_TRUE;
2460 2460 }
2461 2461 }
2462 2462 } else {
2463 2463 rx_msg_p->cur_usage_cnt++;
2464 2464 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2465 2465 buffer_free = B_TRUE;
2466 2466 }
2467 2467 }
2468 2468
2469 2469 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2470 2470 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2471 2471 msg_index, l2_len,
2472 2472 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2473 2473
2474 2474 if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2475 2475 rdc_stats->ierrors++;
2476 2476 if (dcf_err) {
2477 2477 rdc_stats->dcf_err++;
2478 2478 #ifdef NXGE_DEBUG
2479 2479 if (!rdc_stats->dcf_err) {
2480 2480 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2481 2481 "nxge_receive_packet: channel %d dcf_err rcr"
2482 2482 " 0x%llx", channel, rcr_entry));
2483 2483 }
2484 2484 #endif
2485 2485 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
2486 2486 NXGE_FM_EREPORT_RDMC_DCF_ERR);
2487 2487 } else if (pkt_too_long_err) {
2488 2488 rdc_stats->pkt_too_long_err++;
2489 2489 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2490 2490 " channel %d packet length [%d] > "
2491 2491 "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2492 2492 nxgep->mac.maxframesize));
2493 2493 } else {
2494 2494 /* Update error stats */
2495 2495 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2496 2496 rdc_stats->errlog.compl_err_type = error_type;
2497 2497
2498 2498 switch (error_type) {
2499 2499 /*
2500 2500 * Do not send FMA ereport for RCR_L2_ERROR and
2501 2501 * RCR_L4_CSUM_ERROR because most likely they indicate
2502 2502 * back pressure rather than HW failures.
2503 2503 */
2504 2504 case RCR_L2_ERROR:
2505 2505 rdc_stats->l2_err++;
2506 2506 if (rdc_stats->l2_err <
2507 2507 error_disp_cnt) {
2508 2508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2509 2509 " nxge_receive_packet:"
2510 2510 " channel %d RCR L2_ERROR",
2511 2511 channel));
2512 2512 }
2513 2513 break;
2514 2514 case RCR_L4_CSUM_ERROR:
2515 2515 error_send_up = B_TRUE;
2516 2516 rdc_stats->l4_cksum_err++;
2517 2517 if (rdc_stats->l4_cksum_err <
2518 2518 error_disp_cnt) {
2519 2519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2520 2520 " nxge_receive_packet:"
2521 2521 " channel %d"
2522 2522 " RCR L4_CSUM_ERROR", channel));
2523 2523 }
2524 2524 break;
2525 2525 /*
2526 2526 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2527 2527 * RCR_ZCP_SOFT_ERROR because they reflect the same
2528 2528 * FFLP and ZCP errors that have been reported by
2529 2529 * nxge_fflp.c and nxge_zcp.c.
2530 2530 */
2531 2531 case RCR_FFLP_SOFT_ERROR:
2532 2532 error_send_up = B_TRUE;
2533 2533 rdc_stats->fflp_soft_err++;
2534 2534 if (rdc_stats->fflp_soft_err <
2535 2535 error_disp_cnt) {
2536 2536 NXGE_ERROR_MSG((nxgep,
2537 2537 NXGE_ERR_CTL,
2538 2538 " nxge_receive_packet:"
2539 2539 " channel %d"
2540 2540 " RCR FFLP_SOFT_ERROR", channel));
2541 2541 }
2542 2542 break;
2543 2543 case RCR_ZCP_SOFT_ERROR:
2544 2544 error_send_up = B_TRUE;
2545 2545 rdc_stats->fflp_soft_err++;
2546 2546 if (rdc_stats->zcp_soft_err <
2547 2547 error_disp_cnt)
2548 2548 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2549 2549 " nxge_receive_packet: Channel %d"
2550 2550 " RCR ZCP_SOFT_ERROR", channel));
2551 2551 break;
2552 2552 default:
2553 2553 rdc_stats->rcr_unknown_err++;
2554 2554 if (rdc_stats->rcr_unknown_err
2555 2555 < error_disp_cnt) {
2556 2556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2557 2557 " nxge_receive_packet: Channel %d"
2558 2558 " RCR entry 0x%llx error 0x%x",
2559 2559 rcr_entry, channel, error_type));
2560 2560 }
2561 2561 break;
2562 2562 }
2563 2563 }
2564 2564
2565 2565 /*
2566 2566 * Update and repost buffer block if max usage
2567 2567 * count is reached.
2568 2568 */
2569 2569 if (error_send_up == B_FALSE) {
2570 2570 atomic_inc_32(&rx_msg_p->ref_cnt);
2571 2571 if (buffer_free == B_TRUE) {
2572 2572 rx_msg_p->free = B_TRUE;
2573 2573 }
2574 2574
2575 2575 MUTEX_EXIT(&rx_rbr_p->lock);
2576 2576 nxge_freeb(rx_msg_p);
2577 2577 return;
2578 2578 }
2579 2579 }
2580 2580
2581 2581 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2582 2582 "==> nxge_receive_packet: DMA sync second "));
2583 2583
2584 2584 bytes_read = rcr_p->rcvd_pkt_bytes;
2585 2585 skip_len = sw_offset_bytes + hdr_size;
2586 2586 if (!rx_msg_p->rx_use_bcopy) {
2587 2587 /*
2588 2588 * For loaned up buffers, the driver reference count
2589 2589 * will be incremented first and then the free state.
2590 2590 */
2591 2591 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2592 2592 if (first_entry) {
2593 2593 nmp->b_rptr = &nmp->b_rptr[skip_len];
2594 2594 if (l2_len < bsize - skip_len) {
2595 2595 nmp->b_wptr = &nmp->b_rptr[l2_len];
2596 2596 } else {
2597 2597 nmp->b_wptr = &nmp->b_rptr[bsize
2598 2598 - skip_len];
2599 2599 }
2600 2600 } else {
2601 2601 if (l2_len - bytes_read < bsize) {
2602 2602 nmp->b_wptr =
2603 2603 &nmp->b_rptr[l2_len - bytes_read];
2604 2604 } else {
2605 2605 nmp->b_wptr = &nmp->b_rptr[bsize];
2606 2606 }
2607 2607 }
2608 2608 }
2609 2609 } else {
2610 2610 if (first_entry) {
2611 2611 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2612 2612 l2_len < bsize - skip_len ?
2613 2613 l2_len : bsize - skip_len);
2614 2614 } else {
2615 2615 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2616 2616 l2_len - bytes_read < bsize ?
2617 2617 l2_len - bytes_read : bsize);
2618 2618 }
2619 2619 }
2620 2620 if (nmp != NULL) {
2621 2621 if (first_entry) {
2622 2622 /*
2623 2623 * Jumbo packets may be received with more than one
2624 2624 * buffer, increment ipackets for the first entry only.
2625 2625 */
2626 2626 rdc_stats->ipackets++;
2627 2627
2628 2628 /* Update ibytes for kstat. */
2629 2629 rdc_stats->ibytes += skip_len
2630 2630 + l2_len < bsize ? l2_len : bsize;
2631 2631 /*
2632 2632 * Update the number of bytes read so far for the
2633 2633 * current frame.
2634 2634 */
2635 2635 bytes_read = nmp->b_wptr - nmp->b_rptr;
2636 2636 } else {
2637 2637 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2638 2638 l2_len - bytes_read : bsize;
2639 2639 bytes_read += nmp->b_wptr - nmp->b_rptr;
2640 2640 }
2641 2641
2642 2642 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2643 2643 "==> nxge_receive_packet after dupb: "
2644 2644 "rbr consumed %d "
2645 2645 "pktbufsz_type %d "
2646 2646 "nmp $%p rptr $%p wptr $%p "
2647 2647 "buf_offset %d bzise %d l2_len %d skip_len %d",
2648 2648 rx_rbr_p->rbr_consumed,
2649 2649 pktbufsz_type,
2650 2650 nmp, nmp->b_rptr, nmp->b_wptr,
2651 2651 buf_offset, bsize, l2_len, skip_len));
2652 2652 } else {
2653 2653 cmn_err(CE_WARN, "!nxge_receive_packet: "
2654 2654 "update stats (error)");
2655 2655 atomic_inc_32(&rx_msg_p->ref_cnt);
2656 2656 if (buffer_free == B_TRUE) {
2657 2657 rx_msg_p->free = B_TRUE;
2658 2658 }
2659 2659 MUTEX_EXIT(&rx_rbr_p->lock);
2660 2660 nxge_freeb(rx_msg_p);
2661 2661 return;
2662 2662 }
2663 2663
2664 2664 if (buffer_free == B_TRUE) {
2665 2665 rx_msg_p->free = B_TRUE;
2666 2666 }
2667 2667
2668 2668 is_valid = (nmp != NULL);
2669 2669
2670 2670 rcr_p->rcvd_pkt_bytes = bytes_read;
2671 2671
2672 2672 MUTEX_EXIT(&rx_rbr_p->lock);
2673 2673
2674 2674 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2675 2675 atomic_inc_32(&rx_msg_p->ref_cnt);
2676 2676 nxge_freeb(rx_msg_p);
2677 2677 }
2678 2678
2679 2679 if (is_valid) {
2680 2680 nmp->b_cont = NULL;
2681 2681 if (first_entry) {
2682 2682 *mp = nmp;
2683 2683 *mp_cont = NULL;
2684 2684 } else {
2685 2685 *mp_cont = nmp;
2686 2686 }
2687 2687 }
2688 2688
2689 2689 /*
2690 2690 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2691 2691 * If a packet is not fragmented and no error bit is set, then
2692 2692 * L4 checksum is OK.
2693 2693 */
2694 2694
2695 2695 if (is_valid && !multi) {
2696 2696 /*
2697 2697 * If the checksum flag nxge_chksum_offload
2698 2698 * is 1, TCP and UDP packets can be sent
2699 2699 * up with good checksum. If the checksum flag
2700 2700 * is set to 0, checksum reporting will apply to
2701 2701 * TCP packets only (workaround for a hardware bug).
2702 2702 * If the checksum flag nxge_cksum_offload is
2703 2703 * greater than 1, both TCP and UDP packets
2704 2704 * will not be reported its hardware checksum results.
2705 2705 */
2706 2706 if (nxge_cksum_offload == 1) {
2707 2707 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2708 2708 pkt_type == RCR_PKT_IS_UDP) ?
2709 2709 B_TRUE: B_FALSE);
2710 2710 } else if (!nxge_cksum_offload) {
2711 2711 /* TCP checksum only. */
2712 2712 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2713 2713 B_TRUE: B_FALSE);
2714 2714 }
2715 2715
2716 2716 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2717 2717 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2718 2718 is_valid, multi, is_tcp_udp, frag, error_type));
2719 2719
2720 2720 if (is_tcp_udp && !frag && !error_type) {
2721 2721 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2722 2722 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2723 2723 "==> nxge_receive_packet: Full tcp/udp cksum "
2724 2724 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2725 2725 "error %d",
2726 2726 is_valid, multi, is_tcp_udp, frag, error_type));
2727 2727 }
2728 2728 }
2729 2729
2730 2730 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2731 2731 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2732 2732
2733 2733 *multi_p = (multi == RCR_MULTI_MASK);
2734 2734 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2735 2735 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2736 2736 *multi_p, nmp, *mp, *mp_cont));
2737 2737 }
2738 2738
2739 2739 /*
2740 2740 * Enable polling for a ring. Interrupt for the ring is disabled when
2741 2741 * the nxge interrupt comes (see nxge_rx_intr).
2742 2742 */
2743 2743 int
2744 2744 nxge_enable_poll(void *arg)
2745 2745 {
2746 2746 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2747 2747 p_rx_rcr_ring_t ringp;
2748 2748 p_nxge_t nxgep;
2749 2749 p_nxge_ldg_t ldgp;
2750 2750 uint32_t channel;
2751 2751
2752 2752 if (ring_handle == NULL) {
2753 2753 ASSERT(ring_handle != NULL);
2754 2754 return (0);
2755 2755 }
2756 2756
2757 2757 nxgep = ring_handle->nxgep;
2758 2758 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2759 2759 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2760 2760 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2761 2761 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2762 2762 ldgp = ringp->ldgp;
2763 2763 if (ldgp == NULL) {
2764 2764 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2765 2765 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2766 2766 ringp->rdc));
2767 2767 return (0);
2768 2768 }
2769 2769
2770 2770 MUTEX_ENTER(&ringp->lock);
2771 2771 /* enable polling */
2772 2772 if (ringp->poll_flag == 0) {
2773 2773 ringp->poll_flag = 1;
2774 2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775 2775 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2776 2776 ringp->rdc));
2777 2777 }
2778 2778
2779 2779 MUTEX_EXIT(&ringp->lock);
2780 2780 return (0);
2781 2781 }
2782 2782 /*
2783 2783 * Disable polling for a ring and enable its interrupt.
2784 2784 */
2785 2785 int
2786 2786 nxge_disable_poll(void *arg)
2787 2787 {
2788 2788 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2789 2789 p_rx_rcr_ring_t ringp;
2790 2790 p_nxge_t nxgep;
2791 2791 uint32_t channel;
2792 2792
2793 2793 if (ring_handle == NULL) {
2794 2794 ASSERT(ring_handle != NULL);
2795 2795 return (0);
2796 2796 }
2797 2797
2798 2798 nxgep = ring_handle->nxgep;
2799 2799 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2800 2800 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2801 2801
2802 2802 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2803 2803 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2804 2804
2805 2805 MUTEX_ENTER(&ringp->lock);
2806 2806
2807 2807 /* disable polling: enable interrupt */
2808 2808 if (ringp->poll_flag) {
2809 2809 npi_handle_t handle;
2810 2810 rx_dma_ctl_stat_t cs;
2811 2811 uint8_t channel;
2812 2812 p_nxge_ldg_t ldgp;
2813 2813
2814 2814 /*
2815 2815 * Get the control and status for this channel.
2816 2816 */
2817 2817 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2818 2818 channel = ringp->rdc;
2819 2819 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2820 2820 channel, &cs.value);
2821 2821
2822 2822 /*
2823 2823 * Enable mailbox update
2824 2824 * Since packets were not read and the hardware uses
2825 2825 * bits pktread and ptrread to update the queue
2826 2826 * length, we need to set both bits to 0.
2827 2827 */
2828 2828 cs.bits.ldw.pktread = 0;
2829 2829 cs.bits.ldw.ptrread = 0;
2830 2830 cs.bits.hdw.mex = 1;
2831 2831 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2832 2832 cs.value);
2833 2833
2834 2834 /*
2835 2835 * Rearm this logical group if this is a single device
2836 2836 * group.
2837 2837 */
2838 2838 ldgp = ringp->ldgp;
2839 2839 if (ldgp == NULL) {
2840 2840 ringp->poll_flag = 0;
2841 2841 MUTEX_EXIT(&ringp->lock);
2842 2842 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2843 2843 "==> nxge_disable_poll: no ldgp rdc %d "
2844 2844 "(still set poll to 0", ringp->rdc));
2845 2845 return (0);
2846 2846 }
2847 2847 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2848 2848 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2849 2849 ringp->rdc, ldgp));
2850 2850 if (ldgp->nldvs == 1) {
2851 2851 if (isLDOMguest(nxgep)) {
2852 2852 ldgp->arm = B_TRUE;
2853 2853 nxge_hio_ldgimgn(nxgep, ldgp);
2854 2854 } else {
2855 2855 ldgimgm_t mgm;
2856 2856 mgm.value = 0;
2857 2857 mgm.bits.ldw.arm = 1;
2858 2858 mgm.bits.ldw.timer = ldgp->ldg_timer;
2859 2859 NXGE_REG_WR64(handle,
2860 2860 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2861 2861 mgm.value);
2862 2862 }
2863 2863 }
2864 2864 ringp->poll_flag = 0;
2865 2865 }
2866 2866
2867 2867 MUTEX_EXIT(&ringp->lock);
2868 2868 return (0);
2869 2869 }
2870 2870
2871 2871 /*
2872 2872 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2873 2873 */
2874 2874 mblk_t *
2875 2875 nxge_rx_poll(void *arg, int bytes_to_pickup)
2876 2876 {
2877 2877 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2878 2878 p_rx_rcr_ring_t rcr_p;
2879 2879 p_nxge_t nxgep;
2880 2880 npi_handle_t handle;
2881 2881 rx_dma_ctl_stat_t cs;
2882 2882 mblk_t *mblk;
2883 2883 p_nxge_ldv_t ldvp;
2884 2884 uint32_t channel;
2885 2885
2886 2886 nxgep = ring_handle->nxgep;
2887 2887
2888 2888 /*
2889 2889 * Get the control and status for this channel.
2890 2890 */
2891 2891 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2892 2892 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2893 2893 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2894 2894 MUTEX_ENTER(&rcr_p->lock);
2895 2895 ASSERT(rcr_p->poll_flag == 1);
2896 2896
2897 2897 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2898 2898
2899 2899 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2900 2900 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2901 2901 rcr_p->rdc, rcr_p->poll_flag));
2902 2902 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2903 2903
2904 2904 ldvp = rcr_p->ldvp;
2905 2905 /* error events. */
2906 2906 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2907 2907 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2908 2908 }
2909 2909
2910 2910 MUTEX_EXIT(&rcr_p->lock);
2911 2911
2912 2912 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2913 2913 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2914 2914 return (mblk);
2915 2915 }
2916 2916
2917 2917
2918 2918 /*ARGSUSED*/
2919 2919 static nxge_status_t
2920 2920 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2921 2921 {
2922 2922 p_nxge_rx_ring_stats_t rdc_stats;
2923 2923 npi_handle_t handle;
2924 2924 npi_status_t rs;
2925 2925 boolean_t rxchan_fatal = B_FALSE;
2926 2926 boolean_t rxport_fatal = B_FALSE;
2927 2927 uint8_t portn;
2928 2928 nxge_status_t status = NXGE_OK;
2929 2929 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2930 2930 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2931 2931
2932 2932 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2933 2933 portn = nxgep->mac.portnum;
2934 2934 rdc_stats = &nxgep->statsp->rdc_stats[channel];
2935 2935
2936 2936 if (cs.bits.hdw.rbr_tmout) {
2937 2937 rdc_stats->rx_rbr_tmout++;
2938 2938 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2939 2939 NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2940 2940 rxchan_fatal = B_TRUE;
2941 2941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2942 2942 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2943 2943 }
2944 2944 if (cs.bits.hdw.rsp_cnt_err) {
2945 2945 rdc_stats->rsp_cnt_err++;
2946 2946 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2947 2947 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2948 2948 rxchan_fatal = B_TRUE;
2949 2949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2950 2950 "==> nxge_rx_err_evnts(channel %d): "
2951 2951 "rsp_cnt_err", channel));
2952 2952 }
2953 2953 if (cs.bits.hdw.byte_en_bus) {
2954 2954 rdc_stats->byte_en_bus++;
2955 2955 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2956 2956 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2957 2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2958 2958 "==> nxge_rx_err_evnts(channel %d): "
2959 2959 "fatal error: byte_en_bus", channel));
2960 2960 rxchan_fatal = B_TRUE;
2961 2961 }
2962 2962 if (cs.bits.hdw.rsp_dat_err) {
2963 2963 rdc_stats->rsp_dat_err++;
2964 2964 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2965 2965 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2966 2966 rxchan_fatal = B_TRUE;
2967 2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2968 2968 "==> nxge_rx_err_evnts(channel %d): "
2969 2969 "fatal error: rsp_dat_err", channel));
2970 2970 }
2971 2971 if (cs.bits.hdw.rcr_ack_err) {
2972 2972 rdc_stats->rcr_ack_err++;
2973 2973 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2974 2974 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2975 2975 rxchan_fatal = B_TRUE;
2976 2976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2977 2977 "==> nxge_rx_err_evnts(channel %d): "
2978 2978 "fatal error: rcr_ack_err", channel));
2979 2979 }
2980 2980 if (cs.bits.hdw.dc_fifo_err) {
2981 2981 rdc_stats->dc_fifo_err++;
2982 2982 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2983 2983 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2984 2984 /* This is not a fatal error! */
2985 2985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2986 2986 "==> nxge_rx_err_evnts(channel %d): "
2987 2987 "dc_fifo_err", channel));
2988 2988 rxport_fatal = B_TRUE;
2989 2989 }
2990 2990 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2991 2991 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2992 2992 &rdc_stats->errlog.pre_par,
2993 2993 &rdc_stats->errlog.sha_par))
2994 2994 != NPI_SUCCESS) {
2995 2995 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2996 2996 "==> nxge_rx_err_evnts(channel %d): "
2997 2997 "rcr_sha_par: get perr", channel));
2998 2998 return (NXGE_ERROR | rs);
2999 2999 }
3000 3000 if (cs.bits.hdw.rcr_sha_par) {
3001 3001 rdc_stats->rcr_sha_par++;
3002 3002 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3003 3003 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
3004 3004 rxchan_fatal = B_TRUE;
3005 3005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3006 3006 "==> nxge_rx_err_evnts(channel %d): "
3007 3007 "fatal error: rcr_sha_par", channel));
3008 3008 }
3009 3009 if (cs.bits.hdw.rbr_pre_par) {
3010 3010 rdc_stats->rbr_pre_par++;
3011 3011 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3012 3012 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
3013 3013 rxchan_fatal = B_TRUE;
3014 3014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3015 3015 "==> nxge_rx_err_evnts(channel %d): "
3016 3016 "fatal error: rbr_pre_par", channel));
3017 3017 }
3018 3018 }
3019 3019 /*
3020 3020 * The Following 4 status bits are for information, the system
3021 3021 * is running fine. There is no need to send FMA ereports or
3022 3022 * log messages.
3023 3023 */
3024 3024 if (cs.bits.hdw.port_drop_pkt) {
3025 3025 rdc_stats->port_drop_pkt++;
3026 3026 }
3027 3027 if (cs.bits.hdw.wred_drop) {
3028 3028 rdc_stats->wred_drop++;
3029 3029 }
3030 3030 if (cs.bits.hdw.rbr_pre_empty) {
3031 3031 rdc_stats->rbr_pre_empty++;
3032 3032 }
3033 3033 if (cs.bits.hdw.rcr_shadow_full) {
3034 3034 rdc_stats->rcr_shadow_full++;
3035 3035 }
3036 3036 if (cs.bits.hdw.config_err) {
3037 3037 rdc_stats->config_err++;
3038 3038 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3039 3039 NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3040 3040 rxchan_fatal = B_TRUE;
3041 3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3042 3042 "==> nxge_rx_err_evnts(channel %d): "
3043 3043 "config error", channel));
3044 3044 }
3045 3045 if (cs.bits.hdw.rcrincon) {
3046 3046 rdc_stats->rcrincon++;
3047 3047 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3048 3048 NXGE_FM_EREPORT_RDMC_RCRINCON);
3049 3049 rxchan_fatal = B_TRUE;
3050 3050 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3051 3051 "==> nxge_rx_err_evnts(channel %d): "
3052 3052 "fatal error: rcrincon error", channel));
3053 3053 }
3054 3054 if (cs.bits.hdw.rcrfull) {
3055 3055 rdc_stats->rcrfull++;
3056 3056 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3057 3057 NXGE_FM_EREPORT_RDMC_RCRFULL);
3058 3058 rxchan_fatal = B_TRUE;
3059 3059 if (rdc_stats->rcrfull < error_disp_cnt)
3060 3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3061 3061 "==> nxge_rx_err_evnts(channel %d): "
3062 3062 "fatal error: rcrfull error", channel));
3063 3063 }
3064 3064 if (cs.bits.hdw.rbr_empty) {
3065 3065 /*
3066 3066 * This bit is for information, there is no need
3067 3067 * send FMA ereport or log a message.
3068 3068 */
3069 3069 rdc_stats->rbr_empty++;
3070 3070 }
3071 3071 if (cs.bits.hdw.rbrfull) {
3072 3072 rdc_stats->rbrfull++;
3073 3073 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3074 3074 NXGE_FM_EREPORT_RDMC_RBRFULL);
3075 3075 rxchan_fatal = B_TRUE;
3076 3076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3077 3077 "==> nxge_rx_err_evnts(channel %d): "
3078 3078 "fatal error: rbr_full error", channel));
3079 3079 }
3080 3080 if (cs.bits.hdw.rbrlogpage) {
3081 3081 rdc_stats->rbrlogpage++;
3082 3082 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3083 3083 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3084 3084 rxchan_fatal = B_TRUE;
3085 3085 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3086 3086 "==> nxge_rx_err_evnts(channel %d): "
3087 3087 "fatal error: rbr logical page error", channel));
3088 3088 }
3089 3089 if (cs.bits.hdw.cfiglogpage) {
3090 3090 rdc_stats->cfiglogpage++;
3091 3091 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3092 3092 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3093 3093 rxchan_fatal = B_TRUE;
3094 3094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3095 3095 "==> nxge_rx_err_evnts(channel %d): "
3096 3096 "fatal error: cfig logical page error", channel));
3097 3097 }
3098 3098
3099 3099 if (rxport_fatal) {
3100 3100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3101 3101 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3102 3102 portn));
3103 3103 if (isLDOMguest(nxgep)) {
3104 3104 status = NXGE_ERROR;
3105 3105 } else {
3106 3106 status = nxge_ipp_fatal_err_recover(nxgep);
3107 3107 if (status == NXGE_OK) {
3108 3108 FM_SERVICE_RESTORED(nxgep);
3109 3109 }
3110 3110 }
3111 3111 }
3112 3112
3113 3113 if (rxchan_fatal) {
3114 3114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3115 3115 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3116 3116 channel));
3117 3117 if (isLDOMguest(nxgep)) {
3118 3118 status = NXGE_ERROR;
3119 3119 } else {
3120 3120 status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3121 3121 if (status == NXGE_OK) {
3122 3122 FM_SERVICE_RESTORED(nxgep);
3123 3123 }
3124 3124 }
3125 3125 }
3126 3126
3127 3127 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3128 3128
3129 3129 return (status);
3130 3130 }
3131 3131
3132 3132 /*
3133 3133 * nxge_rdc_hvio_setup
3134 3134 *
3135 3135 * This code appears to setup some Hypervisor variables.
3136 3136 *
3137 3137 * Arguments:
3138 3138 * nxgep
3139 3139 * channel
3140 3140 *
3141 3141 * Notes:
3142 3142 * What does NIU_LP_WORKAROUND mean?
3143 3143 *
3144 3144 * NPI/NXGE function calls:
3145 3145 * na
3146 3146 *
3147 3147 * Context:
3148 3148 * Any domain
3149 3149 */
3150 3150 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3151 3151 static void
3152 3152 nxge_rdc_hvio_setup(
3153 3153 nxge_t *nxgep, int channel)
3154 3154 {
3155 3155 nxge_dma_common_t *dma_common;
3156 3156 nxge_dma_common_t *dma_control;
3157 3157 rx_rbr_ring_t *ring;
3158 3158
3159 3159 ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3160 3160 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3161 3161
3162 3162 ring->hv_set = B_FALSE;
3163 3163
3164 3164 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3165 3165 dma_common->orig_ioaddr_pp;
3166 3166 ring->hv_rx_buf_ioaddr_size = (uint64_t)
3167 3167 dma_common->orig_alength;
3168 3168
3169 3169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3170 3170 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3171 3171 channel, ring->hv_rx_buf_base_ioaddr_pp,
3172 3172 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3173 3173 dma_common->orig_alength, dma_common->orig_alength));
3174 3174
3175 3175 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3176 3176
3177 3177 ring->hv_rx_cntl_base_ioaddr_pp =
3178 3178 (uint64_t)dma_control->orig_ioaddr_pp;
3179 3179 ring->hv_rx_cntl_ioaddr_size =
3180 3180 (uint64_t)dma_control->orig_alength;
3181 3181
3182 3182 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3183 3183 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3184 3184 channel, ring->hv_rx_cntl_base_ioaddr_pp,
3185 3185 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3186 3186 dma_control->orig_alength, dma_control->orig_alength));
3187 3187 }
3188 3188 #endif
3189 3189
3190 3190 /*
3191 3191 * nxge_map_rxdma
3192 3192 *
3193 3193 * Map an RDC into our kernel space.
3194 3194 *
3195 3195 * Arguments:
3196 3196 * nxgep
3197 3197 * channel The channel to map.
3198 3198 *
3199 3199 * Notes:
3200 3200 * 1. Allocate & initialise a memory pool, if necessary.
3201 3201 * 2. Allocate however many receive buffers are required.
3202 3202 * 3. Setup buffers, descriptors, and mailbox.
3203 3203 *
3204 3204 * NPI/NXGE function calls:
3205 3205 * nxge_alloc_rx_mem_pool()
3206 3206 * nxge_alloc_rbb()
3207 3207 * nxge_map_rxdma_channel()
3208 3208 *
3209 3209 * Registers accessed:
3210 3210 *
3211 3211 * Context:
3212 3212 * Any domain
3213 3213 */
3214 3214 static nxge_status_t
3215 3215 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3216 3216 {
3217 3217 nxge_dma_common_t **data;
3218 3218 nxge_dma_common_t **control;
3219 3219 rx_rbr_ring_t **rbr_ring;
3220 3220 rx_rcr_ring_t **rcr_ring;
3221 3221 rx_mbox_t **mailbox;
3222 3222 uint32_t chunks;
3223 3223
3224 3224 nxge_status_t status;
3225 3225
3226 3226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3227 3227
3228 3228 if (!nxgep->rx_buf_pool_p) {
3229 3229 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3230 3230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3231 3231 "<== nxge_map_rxdma: buf not allocated"));
3232 3232 return (NXGE_ERROR);
3233 3233 }
3234 3234 }
3235 3235
3236 3236 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3237 3237 return (NXGE_ERROR);
3238 3238
3239 3239 /*
3240 3240 * Map descriptors from the buffer polls for each dma channel.
3241 3241 */
3242 3242
3243 3243 /*
3244 3244 * Set up and prepare buffer blocks, descriptors
3245 3245 * and mailbox.
3246 3246 */
3247 3247 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3248 3248 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3249 3249 chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3250 3250
3251 3251 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3252 3252 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3253 3253
3254 3254 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3255 3255
3256 3256 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3257 3257 chunks, control, rcr_ring, mailbox);
3258 3258 if (status != NXGE_OK) {
3259 3259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3260 3260 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3261 3261 "returned 0x%x",
3262 3262 channel, status));
3263 3263 return (status);
3264 3264 }
3265 3265 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3266 3266 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3267 3267 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3268 3268 &nxgep->statsp->rdc_stats[channel];
3269 3269
3270 3270 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3271 3271 if (!isLDOMguest(nxgep))
3272 3272 nxge_rdc_hvio_setup(nxgep, channel);
3273 3273 #endif
3274 3274
3275 3275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3276 3276 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3277 3277
3278 3278 return (status);
3279 3279 }
3280 3280
3281 3281 static void
3282 3282 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3283 3283 {
3284 3284 rx_rbr_ring_t *rbr_ring;
3285 3285 rx_rcr_ring_t *rcr_ring;
3286 3286 rx_mbox_t *mailbox;
3287 3287
3288 3288 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3289 3289
3290 3290 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3291 3291 !nxgep->rx_mbox_areas_p)
3292 3292 return;
3293 3293
3294 3294 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3295 3295 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3296 3296 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3297 3297
3298 3298 if (!rbr_ring || !rcr_ring || !mailbox)
3299 3299 return;
3300 3300
3301 3301 (void) nxge_unmap_rxdma_channel(
3302 3302 nxgep, channel, rbr_ring, rcr_ring, mailbox);
3303 3303
3304 3304 nxge_free_rxb(nxgep, channel);
3305 3305
3306 3306 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3307 3307 }
3308 3308
3309 3309 nxge_status_t
3310 3310 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3311 3311 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
3312 3312 uint32_t num_chunks,
3313 3313 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3314 3314 p_rx_mbox_t *rx_mbox_p)
3315 3315 {
3316 3316 int status = NXGE_OK;
3317 3317
3318 3318 /*
3319 3319 * Set up and prepare buffer blocks, descriptors
3320 3320 * and mailbox.
3321 3321 */
3322 3322 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3323 3323 "==> nxge_map_rxdma_channel (channel %d)", channel));
3324 3324 /*
3325 3325 * Receive buffer blocks
3326 3326 */
3327 3327 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3328 3328 dma_buf_p, rbr_p, num_chunks);
3329 3329 if (status != NXGE_OK) {
3330 3330 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3331 3331 "==> nxge_map_rxdma_channel (channel %d): "
3332 3332 "map buffer failed 0x%x", channel, status));
3333 3333 goto nxge_map_rxdma_channel_exit;
3334 3334 }
3335 3335
3336 3336 /*
3337 3337 * Receive block ring, completion ring and mailbox.
3338 3338 */
3339 3339 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3340 3340 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3341 3341 if (status != NXGE_OK) {
3342 3342 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3343 3343 "==> nxge_map_rxdma_channel (channel %d): "
3344 3344 "map config failed 0x%x", channel, status));
3345 3345 goto nxge_map_rxdma_channel_fail2;
3346 3346 }
3347 3347
3348 3348 goto nxge_map_rxdma_channel_exit;
3349 3349
3350 3350 nxge_map_rxdma_channel_fail3:
3351 3351 /* Free rbr, rcr */
3352 3352 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3353 3353 "==> nxge_map_rxdma_channel: free rbr/rcr "
3354 3354 "(status 0x%x channel %d)",
3355 3355 status, channel));
3356 3356 nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3357 3357 *rcr_p, *rx_mbox_p);
3358 3358
3359 3359 nxge_map_rxdma_channel_fail2:
3360 3360 /* Free buffer blocks */
3361 3361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3362 3362 "==> nxge_map_rxdma_channel: free rx buffers"
3363 3363 "(nxgep 0x%x status 0x%x channel %d)",
3364 3364 nxgep, status, channel));
3365 3365 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3366 3366
3367 3367 status = NXGE_ERROR;
3368 3368
3369 3369 nxge_map_rxdma_channel_exit:
3370 3370 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3371 3371 "<== nxge_map_rxdma_channel: "
3372 3372 "(nxgep 0x%x status 0x%x channel %d)",
3373 3373 nxgep, status, channel));
3374 3374
3375 3375 return (status);
3376 3376 }
3377 3377
3378 3378 /*ARGSUSED*/
3379 3379 static void
3380 3380 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3381 3381 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3382 3382 {
3383 3383 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3384 3384 "==> nxge_unmap_rxdma_channel (channel %d)", channel));
3385 3385
3386 3386 /*
3387 3387 * unmap receive block ring, completion ring and mailbox.
3388 3388 */
3389 3389 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3390 3390 rcr_p, rx_mbox_p);
3391 3391
3392 3392 /* unmap buffer blocks */
3393 3393 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3394 3394
3395 3395 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3396 3396 }
3397 3397
3398 3398 /*ARGSUSED*/
3399 3399 static nxge_status_t
3400 3400 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3401 3401 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3402 3402 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3403 3403 {
3404 3404 p_rx_rbr_ring_t rbrp;
3405 3405 p_rx_rcr_ring_t rcrp;
3406 3406 p_rx_mbox_t mboxp;
3407 3407 p_nxge_dma_common_t cntl_dmap;
3408 3408 p_nxge_dma_common_t dmap;
3409 3409 p_rx_msg_t *rx_msg_ring;
3410 3410 p_rx_msg_t rx_msg_p;
3411 3411 p_rbr_cfig_a_t rcfga_p;
3412 3412 p_rbr_cfig_b_t rcfgb_p;
3413 3413 p_rcrcfig_a_t cfga_p;
3414 3414 p_rcrcfig_b_t cfgb_p;
3415 3415 p_rxdma_cfig1_t cfig1_p;
3416 3416 p_rxdma_cfig2_t cfig2_p;
3417 3417 p_rbr_kick_t kick_p;
3418 3418 uint32_t dmaaddrp;
3419 3419 uint32_t *rbr_vaddrp;
3420 3420 uint32_t bkaddr;
3421 3421 nxge_status_t status = NXGE_OK;
3422 3422 int i;
3423 3423 uint32_t nxge_port_rcr_size;
3424 3424
3425 3425 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3426 3426 "==> nxge_map_rxdma_channel_cfg_ring"));
3427 3427
3428 3428 cntl_dmap = *dma_cntl_p;
3429 3429
3430 3430 /* Map in the receive block ring */
3431 3431 rbrp = *rbr_p;
3432 3432 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3433 3433 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3434 3434 /*
3435 3435 * Zero out buffer block ring descriptors.
3436 3436 */
3437 3437 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3438 3438
3439 3439 rcfga_p = &(rbrp->rbr_cfga);
3440 3440 rcfgb_p = &(rbrp->rbr_cfgb);
3441 3441 kick_p = &(rbrp->rbr_kick);
3442 3442 rcfga_p->value = 0;
3443 3443 rcfgb_p->value = 0;
3444 3444 kick_p->value = 0;
3445 3445 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3446 3446 rcfga_p->value = (rbrp->rbr_addr &
3447 3447 (RBR_CFIG_A_STDADDR_MASK |
3448 3448 RBR_CFIG_A_STDADDR_BASE_MASK));
3449 3449 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3450 3450
3451 3451 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3452 3452 rcfgb_p->bits.ldw.vld0 = 1;
3453 3453 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3454 3454 rcfgb_p->bits.ldw.vld1 = 1;
3455 3455 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3456 3456 rcfgb_p->bits.ldw.vld2 = 1;
3457 3457 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3458 3458
3459 3459 /*
3460 3460 * For each buffer block, enter receive block address to the ring.
3461 3461 */
3462 3462 rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3463 3463 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3464 3464 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3465 3465 "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3466 3466 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3467 3467
3468 3468 rx_msg_ring = rbrp->rx_msg_ring;
3469 3469 for (i = 0; i < rbrp->tnblocks; i++) {
3470 3470 rx_msg_p = rx_msg_ring[i];
3471 3471 rx_msg_p->nxgep = nxgep;
3472 3472 rx_msg_p->rx_rbr_p = rbrp;
3473 3473 bkaddr = (uint32_t)
3474 3474 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3475 3475 >> RBR_BKADDR_SHIFT));
3476 3476 rx_msg_p->free = B_FALSE;
3477 3477 rx_msg_p->max_usage_cnt = 0xbaddcafe;
3478 3478
3479 3479 *rbr_vaddrp++ = bkaddr;
3480 3480 }
3481 3481
3482 3482 kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3483 3483 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3484 3484
3485 3485 rbrp->rbr_rd_index = 0;
3486 3486
3487 3487 rbrp->rbr_consumed = 0;
3488 3488 rbrp->rbr_use_bcopy = B_TRUE;
3489 3489 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3490 3490 /*
3491 3491 * Do bcopy on packets greater than bcopy size once
3492 3492 * the lo threshold is reached.
3493 3493 * This lo threshold should be less than the hi threshold.
3494 3494 *
3495 3495 * Do bcopy on every packet once the hi threshold is reached.
3496 3496 */
3497 3497 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3498 3498 /* default it to use hi */
3499 3499 nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3500 3500 }
3501 3501
3502 3502 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3503 3503 nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3504 3504 }
3505 3505 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3506 3506
3507 3507 switch (nxge_rx_threshold_hi) {
3508 3508 default:
3509 3509 case NXGE_RX_COPY_NONE:
3510 3510 /* Do not do bcopy at all */
3511 3511 rbrp->rbr_use_bcopy = B_FALSE;
3512 3512 rbrp->rbr_threshold_hi = rbrp->rbb_max;
3513 3513 break;
3514 3514
3515 3515 case NXGE_RX_COPY_1:
3516 3516 case NXGE_RX_COPY_2:
3517 3517 case NXGE_RX_COPY_3:
3518 3518 case NXGE_RX_COPY_4:
3519 3519 case NXGE_RX_COPY_5:
3520 3520 case NXGE_RX_COPY_6:
3521 3521 case NXGE_RX_COPY_7:
3522 3522 rbrp->rbr_threshold_hi =
3523 3523 rbrp->rbb_max *
3524 3524 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3525 3525 break;
3526 3526
3527 3527 case NXGE_RX_COPY_ALL:
3528 3528 rbrp->rbr_threshold_hi = 0;
3529 3529 break;
3530 3530 }
3531 3531
3532 3532 switch (nxge_rx_threshold_lo) {
3533 3533 default:
3534 3534 case NXGE_RX_COPY_NONE:
3535 3535 /* Do not do bcopy at all */
3536 3536 if (rbrp->rbr_use_bcopy) {
3537 3537 rbrp->rbr_use_bcopy = B_FALSE;
3538 3538 }
3539 3539 rbrp->rbr_threshold_lo = rbrp->rbb_max;
3540 3540 break;
3541 3541
3542 3542 case NXGE_RX_COPY_1:
3543 3543 case NXGE_RX_COPY_2:
3544 3544 case NXGE_RX_COPY_3:
3545 3545 case NXGE_RX_COPY_4:
3546 3546 case NXGE_RX_COPY_5:
3547 3547 case NXGE_RX_COPY_6:
3548 3548 case NXGE_RX_COPY_7:
3549 3549 rbrp->rbr_threshold_lo =
3550 3550 rbrp->rbb_max *
3551 3551 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3552 3552 break;
3553 3553
3554 3554 case NXGE_RX_COPY_ALL:
3555 3555 rbrp->rbr_threshold_lo = 0;
3556 3556 break;
3557 3557 }
3558 3558
3559 3559 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3560 3560 "nxge_map_rxdma_channel_cfg_ring: channel %d "
3561 3561 "rbb_max %d "
3562 3562 "rbrp->rbr_bufsize_type %d "
3563 3563 "rbb_threshold_hi %d "
3564 3564 "rbb_threshold_lo %d",
3565 3565 dma_channel,
3566 3566 rbrp->rbb_max,
3567 3567 rbrp->rbr_bufsize_type,
3568 3568 rbrp->rbr_threshold_hi,
3569 3569 rbrp->rbr_threshold_lo));
3570 3570
3571 3571 rbrp->page_valid.value = 0;
3572 3572 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3573 3573 rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3574 3574 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3575 3575 rbrp->page_hdl.value = 0;
3576 3576
3577 3577 rbrp->page_valid.bits.ldw.page0 = 1;
3578 3578 rbrp->page_valid.bits.ldw.page1 = 1;
3579 3579
3580 3580 /* Map in the receive completion ring */
3581 3581 rcrp = (p_rx_rcr_ring_t)
3582 3582 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3583 3583 rcrp->rdc = dma_channel;
3584 3584
3585 3585 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3586 3586 rcrp->comp_size = nxge_port_rcr_size;
3587 3587 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3588 3588
3589 3589 rcrp->max_receive_pkts = nxge_max_rx_pkts;
3590 3590
3591 3591 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3592 3592 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3593 3593 sizeof (rcr_entry_t));
3594 3594 rcrp->comp_rd_index = 0;
3595 3595 rcrp->comp_wt_index = 0;
3596 3596 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3597 3597 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3598 3598 #if defined(__i386)
3599 3599 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3600 3600 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3601 3601 #else
3602 3602 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3603 3603 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3604 3604 #endif
3605 3605
3606 3606 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3607 3607 (nxge_port_rcr_size - 1);
3608 3608 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3609 3609 (nxge_port_rcr_size - 1);
3610 3610
3611 3611 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3612 3612 "==> nxge_map_rxdma_channel_cfg_ring: "
3613 3613 "channel %d "
3614 3614 "rbr_vaddrp $%p "
3615 3615 "rcr_desc_rd_head_p $%p "
3616 3616 "rcr_desc_rd_head_pp $%p "
3617 3617 "rcr_desc_rd_last_p $%p "
3618 3618 "rcr_desc_rd_last_pp $%p ",
3619 3619 dma_channel,
3620 3620 rbr_vaddrp,
3621 3621 rcrp->rcr_desc_rd_head_p,
3622 3622 rcrp->rcr_desc_rd_head_pp,
3623 3623 rcrp->rcr_desc_last_p,
3624 3624 rcrp->rcr_desc_last_pp));
3625 3625
3626 3626 /*
3627 3627 * Zero out buffer block ring descriptors.
3628 3628 */
3629 3629 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3630 3630
3631 3631 rcrp->intr_timeout = (nxgep->intr_timeout <
3632 3632 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
3633 3633 nxgep->intr_timeout;
3634 3634
3635 3635 rcrp->intr_threshold = (nxgep->intr_threshold <
3636 3636 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
3637 3637 nxgep->intr_threshold;
3638 3638
3639 3639 rcrp->full_hdr_flag = B_FALSE;
3640 3640
3641 3641 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
3642 3642
3643 3643
3644 3644 cfga_p = &(rcrp->rcr_cfga);
3645 3645 cfgb_p = &(rcrp->rcr_cfgb);
3646 3646 cfga_p->value = 0;
3647 3647 cfgb_p->value = 0;
3648 3648 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3649 3649 cfga_p->value = (rcrp->rcr_addr &
3650 3650 (RCRCFIG_A_STADDR_MASK |
3651 3651 RCRCFIG_A_STADDR_BASE_MASK));
3652 3652
3653 3653 rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3654 3654 RCRCFIG_A_LEN_SHIF);
3655 3655
3656 3656 /*
3657 3657 * Timeout should be set based on the system clock divider.
3658 3658 * A timeout value of 1 assumes that the
3659 3659 * granularity (1000) is 3 microseconds running at 300MHz.
3660 3660 */
3661 3661 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3662 3662 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3663 3663 cfgb_p->bits.ldw.entout = 1;
3664 3664
3665 3665 /* Map in the mailbox */
3666 3666 mboxp = (p_rx_mbox_t)
3667 3667 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3668 3668 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3669 3669 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3670 3670 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3671 3671 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3672 3672 cfig1_p->value = cfig2_p->value = 0;
3673 3673
3674 3674 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3675 3675 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3676 3676 "==> nxge_map_rxdma_channel_cfg_ring: "
3677 3677 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3678 3678 dma_channel, cfig1_p->value, cfig2_p->value,
3679 3679 mboxp->mbox_addr));
3680 3680
3681 3681 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3682 3682 & 0xfff);
3683 3683 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3684 3684
3685 3685
3686 3686 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3687 3687 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3688 3688 RXDMA_CFIG2_MBADDR_L_MASK);
3689 3689
3690 3690 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3691 3691
3692 3692 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3693 3693 "==> nxge_map_rxdma_channel_cfg_ring: "
3694 3694 "channel %d damaddrp $%p "
3695 3695 "cfg1 0x%016llx cfig2 0x%016llx",
3696 3696 dma_channel, dmaaddrp,
3697 3697 cfig1_p->value, cfig2_p->value));
3698 3698
3699 3699 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3700 3700 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
3701 3701 switch (rcrp->sw_priv_hdr_len) {
3702 3702 case SW_OFFSET_NO_OFFSET:
3703 3703 case SW_OFFSET_64:
3704 3704 case SW_OFFSET_128:
3705 3705 case SW_OFFSET_192:
3706 3706 cfig2_p->bits.ldw.offset =
3707 3707 rcrp->sw_priv_hdr_len;
3708 3708 cfig2_p->bits.ldw.offset256 = 0;
3709 3709 break;
3710 3710 case SW_OFFSET_256:
3711 3711 case SW_OFFSET_320:
3712 3712 case SW_OFFSET_384:
3713 3713 case SW_OFFSET_448:
3714 3714 cfig2_p->bits.ldw.offset =
3715 3715 rcrp->sw_priv_hdr_len & 0x3;
3716 3716 cfig2_p->bits.ldw.offset256 = 1;
3717 3717 break;
3718 3718 default:
3719 3719 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
3720 3720 cfig2_p->bits.ldw.offset256 = 0;
3721 3721 }
3722 3722 } else {
3723 3723 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3724 3724 }
3725 3725
3726 3726 rbrp->rx_rcr_p = rcrp;
3727 3727 rcrp->rx_rbr_p = rbrp;
3728 3728 *rcr_p = rcrp;
3729 3729 *rx_mbox_p = mboxp;
3730 3730
3731 3731 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3732 3732 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3733 3733
3734 3734 return (status);
3735 3735 }
3736 3736
3737 3737 /*ARGSUSED*/
3738 3738 static void
3739 3739 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3740 3740 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3741 3741 {
3742 3742 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3743 3743 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3744 3744 rcr_p->rdc));
3745 3745
3746 3746 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3747 3747 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3748 3748
3749 3749 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3750 3750 "<== nxge_unmap_rxdma_channel_cfg_ring"));
3751 3751 }
3752 3752
3753 3753 static nxge_status_t
3754 3754 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3755 3755 p_nxge_dma_common_t *dma_buf_p,
3756 3756 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3757 3757 {
3758 3758 p_rx_rbr_ring_t rbrp;
3759 3759 p_nxge_dma_common_t dma_bufp, tmp_bufp;
3760 3760 p_rx_msg_t *rx_msg_ring;
3761 3761 p_rx_msg_t rx_msg_p;
3762 3762 p_mblk_t mblk_p;
3763 3763
3764 3764 rxring_info_t *ring_info;
3765 3765 nxge_status_t status = NXGE_OK;
3766 3766 int i, j, index;
3767 3767 uint32_t size, bsize, nblocks, nmsgs;
3768 3768
3769 3769 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3770 3770 "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3771 3771 channel));
3772 3772
3773 3773 dma_bufp = tmp_bufp = *dma_buf_p;
3774 3774 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3775 3775 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3776 3776 "chunks bufp 0x%016llx",
3777 3777 channel, num_chunks, dma_bufp));
3778 3778
3779 3779 nmsgs = 0;
3780 3780 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3781 3781 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3782 3782 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3783 3783 "bufp 0x%016llx nblocks %d nmsgs %d",
3784 3784 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3785 3785 nmsgs += tmp_bufp->nblocks;
3786 3786 }
3787 3787 if (!nmsgs) {
3788 3788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3789 3789 "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3790 3790 "no msg blocks",
3791 3791 channel));
3792 3792 status = NXGE_ERROR;
3793 3793 goto nxge_map_rxdma_channel_buf_ring_exit;
3794 3794 }
3795 3795
3796 3796 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3797 3797
3798 3798 size = nmsgs * sizeof (p_rx_msg_t);
3799 3799 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3800 3800 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3801 3801 KM_SLEEP);
3802 3802
3803 3803 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3804 3804 (void *)nxgep->interrupt_cookie);
3805 3805 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3806 3806 (void *)nxgep->interrupt_cookie);
3807 3807 rbrp->rdc = channel;
3808 3808 rbrp->num_blocks = num_chunks;
3809 3809 rbrp->tnblocks = nmsgs;
3810 3810 rbrp->rbb_max = nmsgs;
3811 3811 rbrp->rbr_max_size = nmsgs;
3812 3812 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3813 3813
3814 3814 /*
3815 3815 * Buffer sizes suggested by NIU architect.
3816 3816 * 256, 512 and 2K.
3817 3817 */
3818 3818
3819 3819 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3820 3820 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3821 3821 rbrp->npi_pkt_buf_size0 = SIZE_256B;
3822 3822
3823 3823 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3824 3824 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3825 3825 rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3826 3826
3827 3827 rbrp->block_size = nxgep->rx_default_block_size;
3828 3828
3829 3829 if (!nxgep->mac.is_jumbo) {
3830 3830 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3831 3831 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3832 3832 rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3833 3833 } else {
3834 3834 if (rbrp->block_size >= 0x2000) {
3835 3835 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3836 3836 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3837 3837 rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3838 3838 } else {
3839 3839 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3840 3840 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3841 3841 rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3842 3842 }
3843 3843 }
3844 3844
3845 3845 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3846 3846 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3847 3847 "actual rbr max %d rbb_max %d nmsgs %d "
3848 3848 "rbrp->block_size %d default_block_size %d "
3849 3849 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3850 3850 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3851 3851 rbrp->block_size, nxgep->rx_default_block_size,
3852 3852 nxge_rbr_size, nxge_rbr_spare_size));
3853 3853
3854 3854 /* Map in buffers from the buffer pool. */
3855 3855 index = 0;
3856 3856 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3857 3857 bsize = dma_bufp->block_size;
3858 3858 nblocks = dma_bufp->nblocks;
3859 3859 #if defined(__i386)
3860 3860 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
3861 3861 #else
3862 3862 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3863 3863 #endif
3864 3864 ring_info->buffer[i].buf_index = i;
3865 3865 ring_info->buffer[i].buf_size = dma_bufp->alength;
3866 3866 ring_info->buffer[i].start_index = index;
3867 3867 #if defined(__i386)
3868 3868 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
3869 3869 #else
3870 3870 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3871 3871 #endif
3872 3872
3873 3873 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3874 3874 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3875 3875 "chunk %d"
3876 3876 " nblocks %d chunk_size %x block_size 0x%x "
3877 3877 "dma_bufp $%p", channel, i,
3878 3878 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3879 3879 dma_bufp));
3880 3880
3881 3881 for (j = 0; j < nblocks; j++) {
3882 3882 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3883 3883 dma_bufp)) == NULL) {
3884 3884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3885 3885 "allocb failed (index %d i %d j %d)",
3886 3886 index, i, j));
3887 3887 goto nxge_map_rxdma_channel_buf_ring_fail1;
3888 3888 }
3889 3889 rx_msg_ring[index] = rx_msg_p;
3890 3890 rx_msg_p->block_index = index;
3891 3891 rx_msg_p->shifted_addr = (uint32_t)
3892 3892 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3893 3893 RBR_BKADDR_SHIFT));
3894 3894
3895 3895 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3896 3896 "index %d j %d rx_msg_p $%p mblk %p",
3897 3897 index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3898 3898
3899 3899 mblk_p = rx_msg_p->rx_mblk_p;
3900 3900 mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3901 3901
3902 3902 rbrp->rbr_ref_cnt++;
3903 3903 index++;
3904 3904 rx_msg_p->buf_dma.dma_channel = channel;
3905 3905 }
3906 3906
3907 3907 rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3908 3908 if (dma_bufp->contig_alloc_type) {
3909 3909 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3910 3910 }
3911 3911
3912 3912 if (dma_bufp->kmem_alloc_type) {
3913 3913 rbrp->rbr_alloc_type = KMEM_ALLOC;
3914 3914 }
3915 3915
3916 3916 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3917 3917 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3918 3918 "chunk %d"
3919 3919 " nblocks %d chunk_size %x block_size 0x%x "
3920 3920 "dma_bufp $%p",
3921 3921 channel, i,
3922 3922 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3923 3923 dma_bufp));
3924 3924 }
3925 3925 if (i < rbrp->num_blocks) {
3926 3926 goto nxge_map_rxdma_channel_buf_ring_fail1;
3927 3927 }
3928 3928
3929 3929 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3930 3930 "nxge_map_rxdma_channel_buf_ring: done buf init "
3931 3931 "channel %d msg block entries %d",
3932 3932 channel, index));
3933 3933 ring_info->block_size_mask = bsize - 1;
3934 3934 rbrp->rx_msg_ring = rx_msg_ring;
3935 3935 rbrp->dma_bufp = dma_buf_p;
3936 3936 rbrp->ring_info = ring_info;
3937 3937
3938 3938 status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3939 3939 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3940 3940 " nxge_map_rxdma_channel_buf_ring: "
3941 3941 "channel %d done buf info init", channel));
3942 3942
3943 3943 /*
3944 3944 * Finally, permit nxge_freeb() to call nxge_post_page().
3945 3945 */
3946 3946 rbrp->rbr_state = RBR_POSTING;
3947 3947
3948 3948 *rbr_p = rbrp;
3949 3949 goto nxge_map_rxdma_channel_buf_ring_exit;
3950 3950
3951 3951 nxge_map_rxdma_channel_buf_ring_fail1:
3952 3952 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3953 3953 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3954 3954 channel, status));
3955 3955
3956 3956 index--;
3957 3957 for (; index >= 0; index--) {
3958 3958 rx_msg_p = rx_msg_ring[index];
3959 3959 if (rx_msg_p != NULL) {
3960 3960 freeb(rx_msg_p->rx_mblk_p);
3961 3961 rx_msg_ring[index] = NULL;
3962 3962 }
3963 3963 }
3964 3964 nxge_map_rxdma_channel_buf_ring_fail:
3965 3965 MUTEX_DESTROY(&rbrp->post_lock);
3966 3966 MUTEX_DESTROY(&rbrp->lock);
3967 3967 KMEM_FREE(ring_info, sizeof (rxring_info_t));
3968 3968 KMEM_FREE(rx_msg_ring, size);
3969 3969 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3970 3970
3971 3971 status = NXGE_ERROR;
3972 3972
3973 3973 nxge_map_rxdma_channel_buf_ring_exit:
3974 3974 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3975 3975 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3976 3976
3977 3977 return (status);
3978 3978 }
3979 3979
3980 3980 /*ARGSUSED*/
3981 3981 static void
3982 3982 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3983 3983 p_rx_rbr_ring_t rbr_p)
3984 3984 {
3985 3985 p_rx_msg_t *rx_msg_ring;
3986 3986 p_rx_msg_t rx_msg_p;
3987 3987 rxring_info_t *ring_info;
3988 3988 int i;
3989 3989 uint32_t size;
3990 3990 #ifdef NXGE_DEBUG
3991 3991 int num_chunks;
3992 3992 #endif
3993 3993
3994 3994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3995 3995 "==> nxge_unmap_rxdma_channel_buf_ring"));
3996 3996 if (rbr_p == NULL) {
3997 3997 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3998 3998 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3999 3999 return;
4000 4000 }
4001 4001 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4002 4002 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
4003 4003 rbr_p->rdc));
4004 4004
4005 4005 rx_msg_ring = rbr_p->rx_msg_ring;
4006 4006 ring_info = rbr_p->ring_info;
4007 4007
4008 4008 if (rx_msg_ring == NULL || ring_info == NULL) {
4009 4009 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4010 4010 "<== nxge_unmap_rxdma_channel_buf_ring: "
4011 4011 "rx_msg_ring $%p ring_info $%p",
4012 4012 rx_msg_p, ring_info));
4013 4013 return;
4014 4014 }
4015 4015
4016 4016 #ifdef NXGE_DEBUG
4017 4017 num_chunks = rbr_p->num_blocks;
4018 4018 #endif
4019 4019 size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
4020 4020 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4021 4021 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
4022 4022 "tnblocks %d (max %d) size ptrs %d ",
4023 4023 rbr_p->rdc, num_chunks,
4024 4024 rbr_p->tnblocks, rbr_p->rbr_max_size, size));
4025 4025
4026 4026 for (i = 0; i < rbr_p->tnblocks; i++) {
4027 4027 rx_msg_p = rx_msg_ring[i];
4028 4028 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4029 4029 " nxge_unmap_rxdma_channel_buf_ring: "
4030 4030 "rx_msg_p $%p",
4031 4031 rx_msg_p));
4032 4032 if (rx_msg_p != NULL) {
4033 4033 freeb(rx_msg_p->rx_mblk_p);
4034 4034 rx_msg_ring[i] = NULL;
4035 4035 }
4036 4036 }
4037 4037
4038 4038 /*
4039 4039 * We no longer may use the mutex <post_lock>. By setting
4040 4040 * <rbr_state> to anything but POSTING, we prevent
4041 4041 * nxge_post_page() from accessing a dead mutex.
4042 4042 */
4043 4043 rbr_p->rbr_state = RBR_UNMAPPING;
4044 4044 MUTEX_DESTROY(&rbr_p->post_lock);
4045 4045
4046 4046 MUTEX_DESTROY(&rbr_p->lock);
4047 4047
4048 4048 if (rbr_p->rbr_ref_cnt == 0) {
4049 4049 /*
4050 4050 * This is the normal state of affairs.
4051 4051 * Need to free the following buffers:
4052 4052 * - data buffers
4053 4053 * - rx_msg ring
4054 4054 * - ring_info
4055 4055 * - rbr ring
4056 4056 */
4057 4057 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4058 4058 "unmap_rxdma_buf_ring: No outstanding - freeing "));
4059 4059 nxge_rxdma_databuf_free(rbr_p);
4060 4060 KMEM_FREE(ring_info, sizeof (rxring_info_t));
4061 4061 KMEM_FREE(rx_msg_ring, size);
4062 4062 KMEM_FREE(rbr_p, sizeof (*rbr_p));
4063 4063 } else {
4064 4064 /*
4065 4065 * Some of our buffers are still being used.
4066 4066 * Therefore, tell nxge_freeb() this ring is
4067 4067 * unmapped, so it may free <rbr_p> for us.
4068 4068 */
4069 4069 rbr_p->rbr_state = RBR_UNMAPPED;
4070 4070 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4071 4071 "unmap_rxdma_buf_ring: %d %s outstanding.",
4072 4072 rbr_p->rbr_ref_cnt,
4073 4073 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4074 4074 }
4075 4075
4076 4076 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4077 4077 "<== nxge_unmap_rxdma_channel_buf_ring"));
4078 4078 }
4079 4079
4080 4080 /*
4081 4081 * nxge_rxdma_hw_start_common
4082 4082 *
4083 4083 * Arguments:
4084 4084 * nxgep
4085 4085 *
4086 4086 * Notes:
4087 4087 *
4088 4088 * NPI/NXGE function calls:
4089 4089 * nxge_init_fzc_rx_common();
4090 4090 * nxge_init_fzc_rxdma_port();
4091 4091 *
4092 4092 * Registers accessed:
4093 4093 *
4094 4094 * Context:
4095 4095 * Service domain
4096 4096 */
4097 4097 static nxge_status_t
4098 4098 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
4099 4099 {
4100 4100 nxge_status_t status = NXGE_OK;
4101 4101
4102 4102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4103 4103
4104 4104 /*
4105 4105 * Load the sharable parameters by writing to the
4106 4106 * function zero control registers. These FZC registers
4107 4107 * should be initialized only once for the entire chip.
4108 4108 */
4109 4109 (void) nxge_init_fzc_rx_common(nxgep);
4110 4110
4111 4111 /*
4112 4112 * Initialize the RXDMA port specific FZC control configurations.
4113 4113 * These FZC registers are pertaining to each port.
4114 4114 */
4115 4115 (void) nxge_init_fzc_rxdma_port(nxgep);
4116 4116
4117 4117 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4118 4118
4119 4119 return (status);
4120 4120 }
4121 4121
4122 4122 static nxge_status_t
4123 4123 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
4124 4124 {
4125 4125 int i, ndmas;
4126 4126 p_rx_rbr_rings_t rx_rbr_rings;
4127 4127 p_rx_rbr_ring_t *rbr_rings;
4128 4128 p_rx_rcr_rings_t rx_rcr_rings;
4129 4129 p_rx_rcr_ring_t *rcr_rings;
4130 4130 p_rx_mbox_areas_t rx_mbox_areas_p;
4131 4131 p_rx_mbox_t *rx_mbox_p;
4132 4132 nxge_status_t status = NXGE_OK;
4133 4133
4134 4134 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
4135 4135
4136 4136 rx_rbr_rings = nxgep->rx_rbr_rings;
4137 4137 rx_rcr_rings = nxgep->rx_rcr_rings;
4138 4138 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4139 4139 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4140 4140 "<== nxge_rxdma_hw_start: NULL ring pointers"));
4141 4141 return (NXGE_ERROR);
4142 4142 }
4143 4143 ndmas = rx_rbr_rings->ndmas;
4144 4144 if (ndmas == 0) {
4145 4145 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4146 4146 "<== nxge_rxdma_hw_start: no dma channel allocated"));
4147 4147 return (NXGE_ERROR);
4148 4148 }
4149 4149
4150 4150 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4151 4151 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
4152 4152
4153 4153 rbr_rings = rx_rbr_rings->rbr_rings;
4154 4154 rcr_rings = rx_rcr_rings->rcr_rings;
4155 4155 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
4156 4156 if (rx_mbox_areas_p) {
4157 4157 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4158 4158 }
4159 4159
4160 4160 i = channel;
4161 4161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4162 4162 "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4163 4163 ndmas, channel));
4164 4164 status = nxge_rxdma_start_channel(nxgep, channel,
4165 4165 (p_rx_rbr_ring_t)rbr_rings[i],
4166 4166 (p_rx_rcr_ring_t)rcr_rings[i],
4167 4167 (p_rx_mbox_t)rx_mbox_p[i]);
4168 4168 if (status != NXGE_OK) {
4169 4169 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4170 4170 "==> nxge_rxdma_hw_start: disable "
4171 4171 "(status 0x%x channel %d)", status, channel));
4172 4172 return (status);
4173 4173 }
4174 4174
4175 4175 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
4176 4176 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4177 4177 rx_rbr_rings, rx_rcr_rings));
4178 4178
4179 4179 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4180 4180 "==> nxge_rxdma_hw_start: (status 0x%x)", status));
4181 4181
4182 4182 return (status);
4183 4183 }
4184 4184
4185 4185 static void
4186 4186 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
4187 4187 {
4188 4188 p_rx_rbr_rings_t rx_rbr_rings;
4189 4189 p_rx_rcr_rings_t rx_rcr_rings;
4190 4190
4191 4191 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
4192 4192
4193 4193 rx_rbr_rings = nxgep->rx_rbr_rings;
4194 4194 rx_rcr_rings = nxgep->rx_rcr_rings;
4195 4195 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4196 4196 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4197 4197 "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4198 4198 return;
4199 4199 }
4200 4200
4201 4201 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4202 4202 "==> nxge_rxdma_hw_stop(channel %d)",
4203 4203 channel));
4204 4204 (void) nxge_rxdma_stop_channel(nxgep, channel);
4205 4205
4206 4206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
4207 4207 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4208 4208 rx_rbr_rings, rx_rcr_rings));
4209 4209
4210 4210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
4211 4211 }
4212 4212
4213 4213
4214 4214 static nxge_status_t
4215 4215 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
4216 4216 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
4217 4217
4218 4218 {
4219 4219 npi_handle_t handle;
4220 4220 npi_status_t rs = NPI_SUCCESS;
4221 4221 rx_dma_ctl_stat_t cs;
4222 4222 rx_dma_ent_msk_t ent_mask;
4223 4223 nxge_status_t status = NXGE_OK;
4224 4224
4225 4225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
4226 4226
4227 4227 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4228 4228
4229 4229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4230 4230 "npi handle addr $%p acc $%p",
4231 4231 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4232 4232
4233 4233 /* Reset RXDMA channel, but not if you're a guest. */
4234 4234 if (!isLDOMguest(nxgep)) {
4235 4235 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4236 4236 if (rs != NPI_SUCCESS) {
4237 4237 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4238 4238 "==> nxge_init_fzc_rdc: "
4239 4239 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4240 4240 channel, rs));
4241 4241 return (NXGE_ERROR | rs);
4242 4242 }
4243 4243
4244 4244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4245 4245 "==> nxge_rxdma_start_channel: reset done: channel %d",
4246 4246 channel));
4247 4247 }
4248 4248
4249 4249 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4250 4250 if (isLDOMguest(nxgep))
4251 4251 (void) nxge_rdc_lp_conf(nxgep, channel);
4252 4252 #endif
4253 4253
4254 4254 /*
4255 4255 * Initialize the RXDMA channel specific FZC control
4256 4256 * configurations. These FZC registers are pertaining
4257 4257 * to each RX channel (logical pages).
4258 4258 */
4259 4259 if (!isLDOMguest(nxgep)) {
4260 4260 status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4261 4261 if (status != NXGE_OK) {
4262 4262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4263 4263 "==> nxge_rxdma_start_channel: "
4264 4264 "init fzc rxdma failed (0x%08x channel %d)",
4265 4265 status, channel));
4266 4266 return (status);
4267 4267 }
4268 4268
4269 4269 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4270 4270 "==> nxge_rxdma_start_channel: fzc done"));
4271 4271 }
4272 4272
4273 4273 /* Set up the interrupt event masks. */
4274 4274 ent_mask.value = 0;
4275 4275 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4276 4276 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4277 4277 &ent_mask);
4278 4278 if (rs != NPI_SUCCESS) {
4279 4279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4280 4280 "==> nxge_rxdma_start_channel: "
4281 4281 "init rxdma event masks failed "
4282 4282 "(0x%08x channel %d)",
4283 4283 status, channel));
4284 4284 return (NXGE_ERROR | rs);
4285 4285 }
4286 4286
4287 4287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4288 4288 "==> nxge_rxdma_start_channel: "
4289 4289 "event done: channel %d (mask 0x%016llx)",
4290 4290 channel, ent_mask.value));
4291 4291
4292 4292 /* Initialize the receive DMA control and status register */
4293 4293 cs.value = 0;
4294 4294 cs.bits.hdw.mex = 1;
4295 4295 cs.bits.hdw.rcrthres = 1;
4296 4296 cs.bits.hdw.rcrto = 1;
4297 4297 cs.bits.hdw.rbr_empty = 1;
4298 4298 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4299 4299 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4300 4300 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4301 4301 if (status != NXGE_OK) {
4302 4302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4303 4303 "==> nxge_rxdma_start_channel: "
4304 4304 "init rxdma control register failed (0x%08x channel %d",
4305 4305 status, channel));
4306 4306 return (status);
4307 4307 }
4308 4308
4309 4309 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4310 4310 "control done - channel %d cs 0x%016llx", channel, cs.value));
4311 4311
4312 4312 /*
4313 4313 * Load RXDMA descriptors, buffers, mailbox,
4314 4314 * initialise the receive DMA channels and
4315 4315 * enable each DMA channel.
4316 4316 */
4317 4317 status = nxge_enable_rxdma_channel(nxgep,
4318 4318 channel, rbr_p, rcr_p, mbox_p);
4319 4319
4320 4320 if (status != NXGE_OK) {
4321 4321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4322 4322 " nxge_rxdma_start_channel: "
4323 4323 " enable rxdma failed (0x%08x channel %d)",
4324 4324 status, channel));
4325 4325 return (status);
4326 4326 }
4327 4327
4328 4328 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4329 4329 "==> nxge_rxdma_start_channel: enabled channel %d"));
4330 4330
4331 4331 if (isLDOMguest(nxgep)) {
4332 4332 /* Add interrupt handler for this channel. */
4333 4333 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4334 4334 if (status != NXGE_OK) {
4335 4335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4336 4336 " nxge_rxdma_start_channel: "
4337 4337 " nxge_hio_intr_add failed (0x%08x channel %d)",
4338 4338 status, channel));
4339 4339 return (status);
4340 4340 }
4341 4341 }
4342 4342
4343 4343 ent_mask.value = 0;
4344 4344 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4345 4345 RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4346 4346 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4347 4347 &ent_mask);
4348 4348 if (rs != NPI_SUCCESS) {
4349 4349 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4350 4350 "==> nxge_rxdma_start_channel: "
4351 4351 "init rxdma event masks failed (0x%08x channel %d)",
4352 4352 status, channel));
4353 4353 return (NXGE_ERROR | rs);
4354 4354 }
4355 4355
4356 4356 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4357 4357 "control done - channel %d cs 0x%016llx", channel, cs.value));
4358 4358
4359 4359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4360 4360
4361 4361 return (NXGE_OK);
4362 4362 }
4363 4363
4364 4364 static nxge_status_t
4365 4365 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4366 4366 {
4367 4367 npi_handle_t handle;
4368 4368 npi_status_t rs = NPI_SUCCESS;
4369 4369 rx_dma_ctl_stat_t cs;
4370 4370 rx_dma_ent_msk_t ent_mask;
4371 4371 nxge_status_t status = NXGE_OK;
4372 4372
4373 4373 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4374 4374
4375 4375 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4376 4376
4377 4377 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4378 4378 "npi handle addr $%p acc $%p",
4379 4379 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4380 4380
4381 4381 if (!isLDOMguest(nxgep)) {
4382 4382 /*
4383 4383 * Stop RxMAC = A.9.2.6
4384 4384 */
4385 4385 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4386 4386 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4387 4387 "nxge_rxdma_stop_channel: "
4388 4388 "Failed to disable RxMAC"));
4389 4389 }
4390 4390
4391 4391 /*
4392 4392 * Drain IPP Port = A.9.3.6
4393 4393 */
4394 4394 (void) nxge_ipp_drain(nxgep);
4395 4395 }
4396 4396
4397 4397 /* Reset RXDMA channel */
4398 4398 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4399 4399 if (rs != NPI_SUCCESS) {
4400 4400 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4401 4401 " nxge_rxdma_stop_channel: "
4402 4402 " reset rxdma failed (0x%08x channel %d)",
4403 4403 rs, channel));
4404 4404 return (NXGE_ERROR | rs);
4405 4405 }
4406 4406
4407 4407 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4408 4408 "==> nxge_rxdma_stop_channel: reset done"));
4409 4409
4410 4410 /* Set up the interrupt event masks. */
4411 4411 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4412 4412 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4413 4413 &ent_mask);
4414 4414 if (rs != NPI_SUCCESS) {
4415 4415 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4416 4416 "==> nxge_rxdma_stop_channel: "
4417 4417 "set rxdma event masks failed (0x%08x channel %d)",
4418 4418 rs, channel));
4419 4419 return (NXGE_ERROR | rs);
4420 4420 }
4421 4421
4422 4422 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4423 4423 "==> nxge_rxdma_stop_channel: event done"));
4424 4424
4425 4425 /*
4426 4426 * Initialize the receive DMA control and status register
4427 4427 */
4428 4428 cs.value = 0;
4429 4429 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4430 4430 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4431 4431 " to default (all 0s) 0x%08x", cs.value));
4432 4432 if (status != NXGE_OK) {
4433 4433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4434 4434 " nxge_rxdma_stop_channel: init rxdma"
4435 4435 " control register failed (0x%08x channel %d",
4436 4436 status, channel));
4437 4437 return (status);
4438 4438 }
4439 4439
4440 4440 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4441 4441 "==> nxge_rxdma_stop_channel: control done"));
4442 4442
4443 4443 /*
4444 4444 * Make sure channel is disabled.
4445 4445 */
4446 4446 status = nxge_disable_rxdma_channel(nxgep, channel);
4447 4447
4448 4448 if (status != NXGE_OK) {
4449 4449 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4450 4450 " nxge_rxdma_stop_channel: "
4451 4451 " init enable rxdma failed (0x%08x channel %d)",
4452 4452 status, channel));
4453 4453 return (status);
4454 4454 }
4455 4455
4456 4456 if (!isLDOMguest(nxgep)) {
4457 4457 /*
4458 4458 * Enable RxMAC = A.9.2.10
4459 4459 */
4460 4460 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4461 4461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4462 4462 "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4463 4463 }
4464 4464 }
4465 4465
4466 4466 NXGE_DEBUG_MSG((nxgep,
4467 4467 RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4468 4468
4469 4469 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4470 4470
4471 4471 return (NXGE_OK);
4472 4472 }
4473 4473
4474 4474 nxge_status_t
4475 4475 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4476 4476 {
4477 4477 npi_handle_t handle;
4478 4478 p_nxge_rdc_sys_stats_t statsp;
4479 4479 rx_ctl_dat_fifo_stat_t stat;
4480 4480 uint32_t zcp_err_status;
4481 4481 uint32_t ipp_err_status;
4482 4482 nxge_status_t status = NXGE_OK;
4483 4483 npi_status_t rs = NPI_SUCCESS;
4484 4484 boolean_t my_err = B_FALSE;
4485 4485
4486 4486 handle = nxgep->npi_handle;
4487 4487 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4488 4488
4489 4489 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4490 4490
4491 4491 if (rs != NPI_SUCCESS)
4492 4492 return (NXGE_ERROR | rs);
4493 4493
4494 4494 if (stat.bits.ldw.id_mismatch) {
4495 4495 statsp->id_mismatch++;
4496 4496 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
4497 4497 NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4498 4498 /* Global fatal error encountered */
4499 4499 }
4500 4500
4501 4501 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4502 4502 switch (nxgep->mac.portnum) {
4503 4503 case 0:
4504 4504 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4505 4505 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4506 4506 my_err = B_TRUE;
4507 4507 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4508 4508 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4509 4509 }
4510 4510 break;
4511 4511 case 1:
4512 4512 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4513 4513 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4514 4514 my_err = B_TRUE;
4515 4515 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4516 4516 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4517 4517 }
4518 4518 break;
4519 4519 case 2:
4520 4520 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4521 4521 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4522 4522 my_err = B_TRUE;
4523 4523 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4524 4524 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4525 4525 }
4526 4526 break;
4527 4527 case 3:
4528 4528 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4529 4529 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4530 4530 my_err = B_TRUE;
4531 4531 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4532 4532 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4533 4533 }
4534 4534 break;
4535 4535 default:
4536 4536 return (NXGE_ERROR);
4537 4537 }
4538 4538 }
4539 4539
4540 4540 if (my_err) {
4541 4541 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4542 4542 zcp_err_status);
4543 4543 if (status != NXGE_OK)
4544 4544 return (status);
4545 4545 }
4546 4546
4547 4547 return (NXGE_OK);
4548 4548 }
4549 4549
4550 4550 static nxge_status_t
4551 4551 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4552 4552 uint32_t zcp_status)
4553 4553 {
4554 4554 boolean_t rxport_fatal = B_FALSE;
4555 4555 p_nxge_rdc_sys_stats_t statsp;
4556 4556 nxge_status_t status = NXGE_OK;
4557 4557 uint8_t portn;
4558 4558
4559 4559 portn = nxgep->mac.portnum;
4560 4560 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4561 4561
4562 4562 if (ipp_status & (0x1 << portn)) {
4563 4563 statsp->ipp_eop_err++;
4564 4564 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4565 4565 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4566 4566 rxport_fatal = B_TRUE;
4567 4567 }
4568 4568
4569 4569 if (zcp_status & (0x1 << portn)) {
4570 4570 statsp->zcp_eop_err++;
4571 4571 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4572 4572 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4573 4573 rxport_fatal = B_TRUE;
4574 4574 }
4575 4575
4576 4576 if (rxport_fatal) {
4577 4577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4578 4578 " nxge_rxdma_handle_port_error: "
4579 4579 " fatal error on Port #%d\n",
4580 4580 portn));
4581 4581 status = nxge_rx_port_fatal_err_recover(nxgep);
4582 4582 if (status == NXGE_OK) {
4583 4583 FM_SERVICE_RESTORED(nxgep);
4584 4584 }
4585 4585 }
4586 4586
4587 4587 return (status);
4588 4588 }
4589 4589
4590 4590 static nxge_status_t
4591 4591 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4592 4592 {
4593 4593 npi_handle_t handle;
4594 4594 npi_status_t rs = NPI_SUCCESS;
4595 4595 nxge_status_t status = NXGE_OK;
4596 4596 p_rx_rbr_ring_t rbrp;
4597 4597 p_rx_rcr_ring_t rcrp;
4598 4598 p_rx_mbox_t mboxp;
4599 4599 rx_dma_ent_msk_t ent_mask;
4600 4600 p_nxge_dma_common_t dmap;
4601 4601 uint32_t ref_cnt;
4602 4602 p_rx_msg_t rx_msg_p;
4603 4603 int i;
4604 4604 uint32_t nxge_port_rcr_size;
4605 4605
4606 4606 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4607 4607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4608 4608 "Recovering from RxDMAChannel#%d error...", channel));
4609 4609
4610 4610 /*
4611 4611 * Stop the dma channel waits for the stop done.
4612 4612 * If the stop done bit is not set, then create
4613 4613 * an error.
4614 4614 */
4615 4615
4616 4616 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4617 4617 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4618 4618
4619 4619 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
4620 4620 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
4621 4621
4622 4622 MUTEX_ENTER(&rbrp->lock);
4623 4623 MUTEX_ENTER(&rbrp->post_lock);
4624 4624
4625 4625 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4626 4626
4627 4627 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4628 4628 if (rs != NPI_SUCCESS) {
4629 4629 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4630 4630 "nxge_disable_rxdma_channel:failed"));
4631 4631 goto fail;
4632 4632 }
4633 4633
4634 4634 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4635 4635
4636 4636 /* Disable interrupt */
4637 4637 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4638 4638 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4639 4639 if (rs != NPI_SUCCESS) {
4640 4640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4641 4641 "nxge_rxdma_stop_channel: "
4642 4642 "set rxdma event masks failed (channel %d)",
4643 4643 channel));
4644 4644 }
4645 4645
4646 4646 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4647 4647
4648 4648 /* Reset RXDMA channel */
4649 4649 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4650 4650 if (rs != NPI_SUCCESS) {
4651 4651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4652 4652 "nxge_rxdma_fatal_err_recover: "
4653 4653 " reset rxdma failed (channel %d)", channel));
4654 4654 goto fail;
4655 4655 }
4656 4656
4657 4657 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4658 4658
4659 4659 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
4660 4660
4661 4661 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4662 4662 rbrp->rbr_rd_index = 0;
4663 4663
4664 4664 rcrp->comp_rd_index = 0;
4665 4665 rcrp->comp_wt_index = 0;
4666 4666 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4667 4667 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4668 4668 #if defined(__i386)
4669 4669 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4670 4670 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4671 4671 #else
4672 4672 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4673 4673 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4674 4674 #endif
4675 4675
4676 4676 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4677 4677 (nxge_port_rcr_size - 1);
4678 4678 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4679 4679 (nxge_port_rcr_size - 1);
4680 4680
4681 4681 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4682 4682 bzero((caddr_t)dmap->kaddrp, dmap->alength);
4683 4683
4684 4684 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4685 4685
4686 4686 for (i = 0; i < rbrp->rbr_max_size; i++) {
4687 4687 rx_msg_p = rbrp->rx_msg_ring[i];
4688 4688 ref_cnt = rx_msg_p->ref_cnt;
4689 4689 if (ref_cnt != 1) {
4690 4690 if (rx_msg_p->cur_usage_cnt !=
4691 4691 rx_msg_p->max_usage_cnt) {
4692 4692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4693 4693 "buf[%d]: cur_usage_cnt = %d "
4694 4694 "max_usage_cnt = %d\n", i,
4695 4695 rx_msg_p->cur_usage_cnt,
4696 4696 rx_msg_p->max_usage_cnt));
4697 4697 } else {
4698 4698 /* Buffer can be re-posted */
4699 4699 rx_msg_p->free = B_TRUE;
4700 4700 rx_msg_p->cur_usage_cnt = 0;
4701 4701 rx_msg_p->max_usage_cnt = 0xbaddcafe;
4702 4702 rx_msg_p->pkt_buf_size = 0;
4703 4703 }
4704 4704 }
4705 4705 }
4706 4706
4707 4707 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4708 4708
4709 4709 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4710 4710 if (status != NXGE_OK) {
4711 4711 goto fail;
4712 4712 }
4713 4713
4714 4714 MUTEX_EXIT(&rbrp->post_lock);
4715 4715 MUTEX_EXIT(&rbrp->lock);
4716 4716
4717 4717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4718 4718 "Recovery Successful, RxDMAChannel#%d Restored",
4719 4719 channel));
4720 4720 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4721 4721 return (NXGE_OK);
4722 4722
4723 4723 fail:
4724 4724 MUTEX_EXIT(&rbrp->post_lock);
4725 4725 MUTEX_EXIT(&rbrp->lock);
4726 4726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4727 4727 return (NXGE_ERROR | rs);
4728 4728 }
4729 4729
4730 4730 nxge_status_t
4731 4731 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4732 4732 {
4733 4733 nxge_grp_set_t *set = &nxgep->rx_set;
4734 4734 nxge_status_t status = NXGE_OK;
4735 4735 p_rx_rcr_ring_t rcrp;
4736 4736 int rdc;
4737 4737
4738 4738 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4739 4739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4740 4740 "Recovering from RxPort error..."));
4741 4741 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4742 4742
4743 4743 if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4744 4744 goto fail;
4745 4745
4746 4746 NXGE_DELAY(1000);
4747 4747
4748 4748 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4749 4749
4750 4750 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4751 4751 if ((1 << rdc) & set->owned.map) {
4752 4752 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4753 4753 if (rcrp != NULL) {
4754 4754 MUTEX_ENTER(&rcrp->lock);
4755 4755 if (nxge_rxdma_fatal_err_recover(nxgep,
4756 4756 rdc) != NXGE_OK) {
4757 4757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4758 4758 "Could not recover "
4759 4759 "channel %d", rdc));
4760 4760 }
4761 4761 MUTEX_EXIT(&rcrp->lock);
4762 4762 }
4763 4763 }
4764 4764 }
4765 4765
4766 4766 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4767 4767
4768 4768 /* Reset IPP */
4769 4769 if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4770 4770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4771 4771 "nxge_rx_port_fatal_err_recover: "
4772 4772 "Failed to reset IPP"));
4773 4773 goto fail;
4774 4774 }
4775 4775
4776 4776 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4777 4777
4778 4778 /* Reset RxMAC */
4779 4779 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4780 4780 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4781 4781 "nxge_rx_port_fatal_err_recover: "
4782 4782 "Failed to reset RxMAC"));
4783 4783 goto fail;
4784 4784 }
4785 4785
4786 4786 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4787 4787
4788 4788 /* Re-Initialize IPP */
4789 4789 if (nxge_ipp_init(nxgep) != NXGE_OK) {
4790 4790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4791 4791 "nxge_rx_port_fatal_err_recover: "
4792 4792 "Failed to init IPP"));
4793 4793 goto fail;
4794 4794 }
4795 4795
4796 4796 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4797 4797
4798 4798 /* Re-Initialize RxMAC */
4799 4799 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4800 4800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4801 4801 "nxge_rx_port_fatal_err_recover: "
4802 4802 "Failed to reset RxMAC"));
4803 4803 goto fail;
4804 4804 }
4805 4805
4806 4806 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4807 4807
4808 4808 /* Re-enable RxMAC */
4809 4809 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4810 4810 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4811 4811 "nxge_rx_port_fatal_err_recover: "
4812 4812 "Failed to enable RxMAC"));
4813 4813 goto fail;
4814 4814 }
4815 4815
4816 4816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4817 4817 "Recovery Successful, RxPort Restored"));
4818 4818
4819 4819 return (NXGE_OK);
4820 4820 fail:
4821 4821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4822 4822 return (status);
4823 4823 }
4824 4824
4825 4825 void
4826 4826 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4827 4827 {
4828 4828 rx_dma_ctl_stat_t cs;
4829 4829 rx_ctl_dat_fifo_stat_t cdfs;
4830 4830
4831 4831 switch (err_id) {
4832 4832 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4833 4833 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4834 4834 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4835 4835 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4836 4836 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4837 4837 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4838 4838 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4839 4839 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4840 4840 case NXGE_FM_EREPORT_RDMC_RCRINCON:
4841 4841 case NXGE_FM_EREPORT_RDMC_RCRFULL:
4842 4842 case NXGE_FM_EREPORT_RDMC_RBRFULL:
4843 4843 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4844 4844 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4845 4845 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4846 4846 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4847 4847 chan, &cs.value);
4848 4848 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4849 4849 cs.bits.hdw.rcr_ack_err = 1;
4850 4850 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4851 4851 cs.bits.hdw.dc_fifo_err = 1;
4852 4852 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4853 4853 cs.bits.hdw.rcr_sha_par = 1;
4854 4854 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4855 4855 cs.bits.hdw.rbr_pre_par = 1;
4856 4856 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4857 4857 cs.bits.hdw.rbr_tmout = 1;
4858 4858 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4859 4859 cs.bits.hdw.rsp_cnt_err = 1;
4860 4860 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4861 4861 cs.bits.hdw.byte_en_bus = 1;
4862 4862 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4863 4863 cs.bits.hdw.rsp_dat_err = 1;
4864 4864 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4865 4865 cs.bits.hdw.config_err = 1;
4866 4866 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4867 4867 cs.bits.hdw.rcrincon = 1;
4868 4868 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4869 4869 cs.bits.hdw.rcrfull = 1;
4870 4870 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4871 4871 cs.bits.hdw.rbrfull = 1;
4872 4872 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4873 4873 cs.bits.hdw.rbrlogpage = 1;
4874 4874 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4875 4875 cs.bits.hdw.cfiglogpage = 1;
4876 4876 #if defined(__i386)
4877 4877 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4878 4878 cs.value);
4879 4879 #else
4880 4880 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4881 4881 cs.value);
4882 4882 #endif
4883 4883 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4884 4884 chan, cs.value);
4885 4885 break;
4886 4886 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4887 4887 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4888 4888 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4889 4889 cdfs.value = 0;
4890 4890 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4891 4891 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4892 4892 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4893 4893 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4894 4894 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4895 4895 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4896 4896 #if defined(__i386)
4897 4897 cmn_err(CE_NOTE,
4898 4898 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4899 4899 cdfs.value);
4900 4900 #else
4901 4901 cmn_err(CE_NOTE,
4902 4902 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4903 4903 cdfs.value);
4904 4904 #endif
4905 4905 NXGE_REG_WR64(nxgep->npi_handle,
4906 4906 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4907 4907 break;
4908 4908 case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4909 4909 break;
4910 4910 case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4911 4911 break;
4912 4912 }
4913 4913 }
4914 4914
4915 4915 static void
4916 4916 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4917 4917 {
4918 4918 rxring_info_t *ring_info;
4919 4919 int index;
4920 4920 uint32_t chunk_size;
4921 4921 uint64_t kaddr;
4922 4922 uint_t num_blocks;
4923 4923
4924 4924 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4925 4925
4926 4926 if (rbr_p == NULL) {
4927 4927 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4928 4928 "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4929 4929 return;
4930 4930 }
4931 4931
4932 4932 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4933 4933 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4934 4934 "<== nxge_rxdma_databuf_free: DDI"));
4935 4935 return;
4936 4936 }
4937 4937
4938 4938 ring_info = rbr_p->ring_info;
4939 4939 if (ring_info == NULL) {
4940 4940 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4941 4941 "==> nxge_rxdma_databuf_free: NULL ring info"));
4942 4942 return;
4943 4943 }
4944 4944 num_blocks = rbr_p->num_blocks;
4945 4945 for (index = 0; index < num_blocks; index++) {
4946 4946 kaddr = ring_info->buffer[index].kaddr;
4947 4947 chunk_size = ring_info->buffer[index].buf_size;
4948 4948 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4949 4949 "==> nxge_rxdma_databuf_free: free chunk %d "
4950 4950 "kaddrp $%p chunk size %d",
4951 4951 index, kaddr, chunk_size));
4952 4952 if (kaddr == NULL) continue;
4953 4953 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4954 4954 ring_info->buffer[index].kaddr = NULL;
4955 4955 }
4956 4956
4957 4957 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4958 4958 }
4959 4959
4960 4960 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4961 4961 extern void contig_mem_free(void *, size_t);
4962 4962 #endif
4963 4963
4964 4964 void
4965 4965 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4966 4966 {
4967 4967 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4968 4968
4969 4969 if (kaddr == NULL || !buf_size) {
4970 4970 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4971 4971 "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4972 4972 kaddr, buf_size));
4973 4973 return;
4974 4974 }
4975 4975
4976 4976 switch (alloc_type) {
4977 4977 case KMEM_ALLOC:
4978 4978 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4979 4979 "==> nxge_free_buf: freeing kmem $%p size %d",
4980 4980 kaddr, buf_size));
4981 4981 #if defined(__i386)
4982 4982 KMEM_FREE((void *)(uint32_t)kaddr, buf_size);
4983 4983 #else
4984 4984 KMEM_FREE((void *)kaddr, buf_size);
4985 4985 #endif
4986 4986 break;
4987 4987
4988 4988 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4989 4989 case CONTIG_MEM_ALLOC:
4990 4990 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4991 4991 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4992 4992 kaddr, buf_size));
4993 4993 contig_mem_free((void *)kaddr, buf_size);
4994 4994 break;
4995 4995 #endif
4996 4996
4997 4997 default:
4998 4998 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4999 4999 "<== nxge_free_buf: unsupported alloc type %d",
5000 5000 alloc_type));
5001 5001 return;
5002 5002 }
5003 5003
5004 5004 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
5005 5005 }
↓ open down ↓ |
3308 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX