Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_txdma.c
+++ new/usr/src/uts/common/io/nxge/nxge_txdma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/nxge/nxge_impl.h>
28 28 #include <sys/nxge/nxge_txdma.h>
29 29 #include <sys/nxge/nxge_hio.h>
30 30 #include <npi_tx_rd64.h>
31 31 #include <npi_tx_wr64.h>
32 32 #include <sys/llc1.h>
33 33
34 34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35 35 uint32_t nxge_tx_minfree = 64;
36 36 uint32_t nxge_tx_intr_thres = 0;
37 37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38 38 uint32_t nxge_tx_tiny_pack = 1;
39 39 uint32_t nxge_tx_use_bcopy = 1;
40 40
41 41 extern uint32_t nxge_tx_ring_size;
42 42 extern uint32_t nxge_bcopy_thresh;
43 43 extern uint32_t nxge_dvma_thresh;
44 44 extern uint32_t nxge_dma_stream_thresh;
45 45 extern dma_method_t nxge_force_dma;
46 46 extern uint32_t nxge_cksum_offload;
47 47
48 48 /* Device register access attributes for PIO. */
49 49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 50 /* Device descriptor access attributes for DMA. */
51 51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 52 /* Device buffer access attributes for DMA. */
53 53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56 56
57 57 extern void nxge_tx_ring_task(void *arg);
58 58
59 59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60 60
61 61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62 62
63 63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 65 uint32_t, p_nxge_dma_common_t *,
66 66 p_tx_mbox_t *);
67 67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68 68
69 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72 72
73 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 74 p_nxge_dma_common_t *, p_tx_ring_t,
75 75 p_tx_mbox_t *);
76 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 77 p_tx_ring_t, p_tx_mbox_t);
78 78
79 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 80 p_tx_ring_t, p_tx_mbox_t);
81 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82 82
83 83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 85 p_nxge_ldv_t, tx_cs_t);
86 86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 88 uint16_t, p_tx_ring_t);
89 89
90 90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 91 p_tx_ring_t ring_p, uint16_t channel);
92 92
93 93 nxge_status_t
94 94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 95 {
96 96 nxge_grp_set_t *set = &nxgep->tx_set;
97 97 int i, tdc, count;
98 98 nxge_grp_t *group;
99 99 dc_map_t map;
100 100 int dev_gindex;
101 101
102 102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103 103
104 104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 105 if ((1 << i) & set->lg.map) {
106 106 group = set->group[i];
107 107 dev_gindex =
108 108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 111 if ((1 << tdc) & map) {
112 112 if ((nxge_grp_dc_add(nxgep,
113 113 group, VP_BOUND_TX, tdc)))
114 114 goto init_txdma_channels_exit;
115 115 }
116 116 }
117 117 }
118 118 if (++count == set->lg.count)
119 119 break;
120 120 }
121 121
122 122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 123 return (NXGE_OK);
124 124
125 125 init_txdma_channels_exit:
126 126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 127 if ((1 << i) & set->lg.map) {
128 128 group = set->group[i];
129 129 dev_gindex =
130 130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 133 if ((1 << tdc) & map) {
134 134 nxge_grp_dc_remove(nxgep,
135 135 VP_BOUND_TX, tdc);
136 136 }
137 137 }
138 138 }
139 139 if (++count == set->lg.count)
140 140 break;
141 141 }
142 142
143 143 return (NXGE_ERROR);
144 144
145 145 }
146 146
147 147 nxge_status_t
148 148 nxge_init_txdma_channel(
149 149 p_nxge_t nxge,
150 150 int channel)
151 151 {
152 152 nxge_status_t status;
153 153
154 154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155 155
156 156 status = nxge_map_txdma(nxge, channel);
157 157 if (status != NXGE_OK) {
158 158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 161 return (status);
162 162 }
163 163
164 164 status = nxge_txdma_hw_start(nxge, channel);
165 165 if (status != NXGE_OK) {
166 166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 168 return (status);
169 169 }
170 170
171 171 if (!nxge->statsp->tdc_ksp[channel])
172 172 nxge_setup_tdc_kstats(nxge, channel);
173 173
174 174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175 175
176 176 return (status);
177 177 }
178 178
179 179 void
180 180 nxge_uninit_txdma_channels(p_nxge_t nxgep)
181 181 {
182 182 nxge_grp_set_t *set = &nxgep->tx_set;
183 183 int tdc;
184 184
185 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186 186
187 187 if (set->owned.map == 0) {
188 188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 189 "nxge_uninit_txdma_channels: no channels"));
190 190 return;
191 191 }
192 192
193 193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 194 if ((1 << tdc) & set->owned.map) {
195 195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 196 }
197 197 }
198 198
199 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200 200 }
201 201
202 202 void
203 203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204 204 {
205 205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206 206
207 207 if (nxgep->statsp->tdc_ksp[channel]) {
208 208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 209 nxgep->statsp->tdc_ksp[channel] = 0;
210 210 }
211 211
212 212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 213 goto nxge_uninit_txdma_channel_exit;
214 214
215 215 nxge_unmap_txdma_channel(nxgep, channel);
216 216
217 217 nxge_uninit_txdma_channel_exit:
218 218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219 219 }
220 220
221 221 void
222 222 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 223 uint32_t entries, uint32_t size)
224 224 {
225 225 size_t tsize;
226 226 *dest_p = *src_p;
227 227 tsize = size * entries;
228 228 dest_p->alength = tsize;
229 229 dest_p->nblocks = entries;
230 230 dest_p->block_size = size;
231 231 dest_p->offset += tsize;
232 232
233 233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 234 src_p->alength -= tsize;
235 235 src_p->dma_cookie.dmac_laddress += tsize;
236 236 src_p->dma_cookie.dmac_size -= tsize;
237 237 }
238 238
239 239 /*
240 240 * nxge_reset_txdma_channel
241 241 *
242 242 * Reset a TDC.
243 243 *
244 244 * Arguments:
245 245 * nxgep
246 246 * channel The channel to reset.
247 247 * reg_data The current TX_CS.
248 248 *
249 249 * Notes:
250 250 *
251 251 * NPI/NXGE function calls:
252 252 * npi_txdma_channel_reset()
253 253 * npi_txdma_channel_control()
254 254 *
255 255 * Registers accessed:
256 256 * TX_CS DMC+0x40028 Transmit Control And Status
257 257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 258 *
259 259 * Context:
260 260 * Any domain
261 261 */
262 262 nxge_status_t
263 263 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264 264 {
265 265 npi_status_t rs = NPI_SUCCESS;
266 266 nxge_status_t status = NXGE_OK;
267 267 npi_handle_t handle;
268 268
269 269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270 270
271 271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 273 rs = npi_txdma_channel_reset(handle, channel);
274 274 } else {
275 275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 276 channel);
277 277 }
278 278
279 279 if (rs != NPI_SUCCESS) {
280 280 status = NXGE_ERROR | rs;
281 281 }
282 282
283 283 /*
284 284 * Reset the tail (kick) register to 0.
285 285 * (Hardware will not reset it. Tx overflow fatal
286 286 * error if tail is not set to 0 after reset!
287 287 */
288 288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289 289
290 290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 291 return (status);
292 292 }
293 293
294 294 /*
295 295 * nxge_init_txdma_channel_event_mask
296 296 *
297 297 * Enable interrupts for a set of events.
298 298 *
299 299 * Arguments:
300 300 * nxgep
301 301 * channel The channel to map.
302 302 * mask_p The events to enable.
303 303 *
304 304 * Notes:
305 305 *
306 306 * NPI/NXGE function calls:
307 307 * npi_txdma_event_mask()
308 308 *
309 309 * Registers accessed:
310 310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 311 *
312 312 * Context:
313 313 * Any domain
314 314 */
315 315 nxge_status_t
316 316 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 317 p_tx_dma_ent_msk_t mask_p)
318 318 {
319 319 npi_handle_t handle;
320 320 npi_status_t rs = NPI_SUCCESS;
321 321 nxge_status_t status = NXGE_OK;
322 322
323 323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 324 "<== nxge_init_txdma_channel_event_mask"));
325 325
326 326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 328 if (rs != NPI_SUCCESS) {
329 329 status = NXGE_ERROR | rs;
330 330 }
331 331
332 332 return (status);
333 333 }
334 334
335 335 /*
336 336 * nxge_init_txdma_channel_cntl_stat
337 337 *
338 338 * Stop a TDC. If at first we don't succeed, inject an error.
339 339 *
340 340 * Arguments:
341 341 * nxgep
342 342 * channel The channel to stop.
343 343 *
344 344 * Notes:
345 345 *
346 346 * NPI/NXGE function calls:
347 347 * npi_txdma_control_status()
348 348 *
349 349 * Registers accessed:
350 350 * TX_CS DMC+0x40028 Transmit Control And Status
351 351 *
352 352 * Context:
353 353 * Any domain
354 354 */
355 355 nxge_status_t
356 356 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 357 uint64_t reg_data)
358 358 {
359 359 npi_handle_t handle;
360 360 npi_status_t rs = NPI_SUCCESS;
361 361 nxge_status_t status = NXGE_OK;
362 362
363 363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 364 "<== nxge_init_txdma_channel_cntl_stat"));
365 365
366 366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 368 (p_tx_cs_t)®_data);
369 369
370 370 if (rs != NPI_SUCCESS) {
371 371 status = NXGE_ERROR | rs;
372 372 }
373 373
374 374 return (status);
375 375 }
376 376
377 377 /*
378 378 * nxge_enable_txdma_channel
379 379 *
380 380 * Enable a TDC.
381 381 *
382 382 * Arguments:
383 383 * nxgep
384 384 * channel The channel to enable.
385 385 * tx_desc_p channel's transmit descriptor ring.
386 386 * mbox_p channel's mailbox,
387 387 *
388 388 * Notes:
389 389 *
390 390 * NPI/NXGE function calls:
391 391 * npi_txdma_ring_config()
392 392 * npi_txdma_mbox_config()
393 393 * npi_txdma_channel_init_enable()
394 394 *
395 395 * Registers accessed:
396 396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 399 * TX_CS DMC+0x40028 Transmit Control And Status
400 400 *
401 401 * Context:
402 402 * Any domain
403 403 */
404 404 nxge_status_t
405 405 nxge_enable_txdma_channel(p_nxge_t nxgep,
406 406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407 407 {
408 408 npi_handle_t handle;
409 409 npi_status_t rs = NPI_SUCCESS;
410 410 nxge_status_t status = NXGE_OK;
411 411
412 412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413 413
414 414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 415 /*
416 416 * Use configuration data composed at init time.
417 417 * Write to hardware the transmit ring configurations.
418 418 */
419 419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421 421
422 422 if (rs != NPI_SUCCESS) {
423 423 return (NXGE_ERROR | rs);
424 424 }
425 425
426 426 if (isLDOMguest(nxgep)) {
427 427 /* Add interrupt handler for this channel. */
428 428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 429 return (NXGE_ERROR);
430 430 }
431 431
432 432 /* Write to hardware the mailbox */
433 433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435 435
436 436 if (rs != NPI_SUCCESS) {
437 437 return (NXGE_ERROR | rs);
438 438 }
439 439
440 440 /* Start the DMA engine. */
441 441 rs = npi_txdma_channel_init_enable(handle, channel);
442 442
443 443 if (rs != NPI_SUCCESS) {
444 444 return (NXGE_ERROR | rs);
445 445 }
446 446
447 447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448 448
449 449 return (status);
450 450 }
451 451
452 452 void
453 453 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 455 p_tx_pkt_hdr_all_t pkthdrp,
456 456 t_uscalar_t start_offset,
457 457 t_uscalar_t stuff_offset)
458 458 {
459 459 p_tx_pkt_header_t hdrp;
460 460 p_mblk_t nmp;
461 461 uint64_t tmp;
462 462 size_t mblk_len;
463 463 size_t iph_len;
464 464 size_t hdrs_size;
465 465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 466 64 + sizeof (uint32_t)];
467 467 uint8_t *cursor;
468 468 uint8_t *ip_buf;
469 469 uint16_t eth_type;
470 470 uint8_t ipproto;
471 471 boolean_t is_vlan = B_FALSE;
472 472 size_t eth_hdr_size;
473 473
474 474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475 475
476 476 /*
477 477 * Caller should zero out the headers first.
478 478 */
479 479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480 480
481 481 if (fill_len) {
482 482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 484 "npads %d", pkt_len, npads));
485 485 tmp = (uint64_t)pkt_len;
486 486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 487 goto fill_tx_header_done;
488 488 }
489 489
490 490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491 491
492 492 /*
493 493 * mp is the original data packet (does not include the
494 494 * Neptune transmit header).
495 495 */
496 496 nmp = mp;
497 497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 498 "mp $%p b_rptr $%p len %d",
499 499 mp, nmp->b_rptr, MBLKL(nmp)));
500 500 /* copy ether_header from mblk to hdrs_buf */
501 501 cursor = &hdrs_buf[0];
502 502 tmp = sizeof (struct ether_vlan_header);
503 503 while ((nmp != NULL) && (tmp > 0)) {
504 504 size_t buflen;
505 505 mblk_len = MBLKL(nmp);
506 506 buflen = min((size_t)tmp, mblk_len);
507 507 bcopy(nmp->b_rptr, cursor, buflen);
508 508 cursor += buflen;
509 509 tmp -= buflen;
510 510 nmp = nmp->b_cont;
511 511 }
512 512
513 513 nmp = mp;
514 514 mblk_len = MBLKL(nmp);
515 515 ip_buf = NULL;
516 516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 518 "ether type 0x%x", eth_type, hdrp->value));
519 519
520 520 if (eth_type < ETHERMTU) {
521 521 tmp = 1ull;
522 522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 524 "value 0x%llx", hdrp->value));
525 525 if (*(hdrs_buf + sizeof (struct ether_header))
526 526 == LLC_SNAP_SAP) {
527 527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 528 sizeof (struct ether_header) + 6)));
529 529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 531 eth_type));
532 532 } else {
533 533 goto fill_tx_header_done;
534 534 }
535 535 } else if (eth_type == VLAN_ETHERTYPE) {
536 536 tmp = 1ull;
537 537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538 538
539 539 eth_type = ntohs(((struct ether_vlan_header *)
540 540 hdrs_buf)->ether_type);
541 541 is_vlan = B_TRUE;
542 542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 543 "value 0x%llx", hdrp->value));
544 544 }
545 545
546 546 if (!is_vlan) {
547 547 eth_hdr_size = sizeof (struct ether_header);
548 548 } else {
549 549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 550 }
551 551
552 552 switch (eth_type) {
553 553 case ETHERTYPE_IP:
554 554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 556 mblk_len -= eth_hdr_size;
557 557 iph_len = ((*ip_buf) & 0x0f);
558 558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 559 ip_buf = nmp->b_rptr;
560 560 ip_buf += eth_hdr_size;
561 561 } else {
562 562 ip_buf = NULL;
563 563 }
564 564
565 565 }
566 566 if (ip_buf == NULL) {
567 567 hdrs_size = 0;
568 568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 569 while ((nmp) && (hdrs_size <
570 570 sizeof (hdrs_buf))) {
571 571 mblk_len = (size_t)nmp->b_wptr -
572 572 (size_t)nmp->b_rptr;
573 573 if (mblk_len >=
574 574 (sizeof (hdrs_buf) - hdrs_size))
575 575 mblk_len = sizeof (hdrs_buf) -
576 576 hdrs_size;
577 577 bcopy(nmp->b_rptr,
578 578 &hdrs_buf[hdrs_size], mblk_len);
579 579 hdrs_size += mblk_len;
580 580 nmp = nmp->b_cont;
581 581 }
582 582 ip_buf = hdrs_buf;
583 583 ip_buf += eth_hdr_size;
584 584 iph_len = ((*ip_buf) & 0x0f);
585 585 }
586 586
587 587 ipproto = ip_buf[9];
588 588
589 589 tmp = (uint64_t)iph_len;
590 590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593 593
594 594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 596 "tmp 0x%x",
597 597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 598 ipproto, tmp));
599 599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 600 "value 0x%llx", hdrp->value));
601 601
602 602 break;
603 603
604 604 case ETHERTYPE_IPV6:
605 605 hdrs_size = 0;
606 606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 607 while ((nmp) && (hdrs_size <
608 608 sizeof (hdrs_buf))) {
609 609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 610 if (mblk_len >=
611 611 (sizeof (hdrs_buf) - hdrs_size))
612 612 mblk_len = sizeof (hdrs_buf) -
613 613 hdrs_size;
614 614 bcopy(nmp->b_rptr,
615 615 &hdrs_buf[hdrs_size], mblk_len);
616 616 hdrs_size += mblk_len;
617 617 nmp = nmp->b_cont;
618 618 }
619 619 ip_buf = hdrs_buf;
620 620 ip_buf += eth_hdr_size;
621 621
622 622 tmp = 1ull;
623 623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624 624
625 625 tmp = (eth_hdr_size >> 1);
626 626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627 627
628 628 /* byte 6 is the next header protocol */
629 629 ipproto = ip_buf[6];
630 630
631 631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 634 ipproto));
635 635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 636 "value 0x%llx", hdrp->value));
637 637
638 638 break;
639 639
640 640 default:
641 641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 642 goto fill_tx_header_done;
643 643 }
644 644
645 645 switch (ipproto) {
646 646 case IPPROTO_TCP:
647 647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 649 if (l4_cksum) {
650 650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 651 hdrp->value |=
652 652 (((uint64_t)(start_offset >> 1)) <<
653 653 TX_PKT_HEADER_L4START_SHIFT);
654 654 hdrp->value |=
655 655 (((uint64_t)(stuff_offset >> 1)) <<
656 656 TX_PKT_HEADER_L4STUFF_SHIFT);
657 657
658 658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 660 "value 0x%llx", hdrp->value));
661 661 }
662 662
663 663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 664 "value 0x%llx", hdrp->value));
665 665 break;
666 666
667 667 case IPPROTO_UDP:
668 668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 669 if (l4_cksum) {
670 670 if (!nxge_cksum_offload) {
671 671 uint16_t *up;
672 672 uint16_t cksum;
673 673 t_uscalar_t stuff_len;
674 674
675 675 /*
676 676 * The checksum field has the
677 677 * partial checksum.
678 678 * IP_CSUM() macro calls ip_cksum() which
679 679 * can add in the partial checksum.
680 680 */
681 681 cksum = IP_CSUM(mp, start_offset, 0);
682 682 stuff_len = stuff_offset;
683 683 nmp = mp;
684 684 mblk_len = MBLKL(nmp);
685 685 while ((nmp != NULL) &&
686 686 (mblk_len < stuff_len)) {
687 687 stuff_len -= mblk_len;
688 688 nmp = nmp->b_cont;
689 689 if (nmp)
690 690 mblk_len = MBLKL(nmp);
691 691 }
692 692 ASSERT(nmp);
693 693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694 694
695 695 *up = cksum;
696 696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 699 "use sw cksum "
700 700 "write to $%p cksum 0x%x content up 0x%x",
701 701 stuff_len,
702 702 up,
703 703 cksum,
704 704 *up));
705 705 } else {
706 706 /* Hardware will compute the full checksum */
707 707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 708 hdrp->value |=
709 709 (((uint64_t)(start_offset >> 1)) <<
710 710 TX_PKT_HEADER_L4START_SHIFT);
711 711 hdrp->value |=
712 712 (((uint64_t)(stuff_offset >> 1)) <<
713 713 TX_PKT_HEADER_L4STUFF_SHIFT);
714 714
715 715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 717 " use partial checksum "
718 718 "cksum 0x%x ",
719 719 "value 0x%llx",
720 720 stuff_offset,
721 721 IP_CSUM(mp, start_offset, 0),
722 722 hdrp->value));
723 723 }
724 724 }
725 725
726 726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 727 "==> nxge_tx_pkt_hdr_init: UDP"
728 728 "value 0x%llx", hdrp->value));
729 729 break;
730 730
731 731 default:
732 732 goto fill_tx_header_done;
733 733 }
734 734
735 735 fill_tx_header_done:
736 736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739 739
740 740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741 741 }
742 742
743 743 /*ARGSUSED*/
744 744 p_mblk_t
745 745 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746 746 {
747 747 p_mblk_t newmp = NULL;
748 748
749 749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 752 return (NULL);
753 753 }
754 754
755 755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 757 DB_TYPE(newmp) = M_DATA;
758 758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 759 linkb(newmp, mp);
760 760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761 761
762 762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 763 "b_rptr $%p b_wptr $%p",
764 764 newmp->b_rptr, newmp->b_wptr));
765 765
766 766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768 768
769 769 return (newmp);
770 770 }
771 771
772 772 int
773 773 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774 774 {
775 775 uint_t nmblks;
776 776 ssize_t len;
777 777 uint_t pkt_len;
778 778 p_mblk_t nmp, bmp, tmp;
779 779 uint8_t *b_wptr;
780 780
781 781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784 784
785 785 nmp = mp;
786 786 bmp = mp;
787 787 nmblks = 0;
788 788 pkt_len = 0;
789 789 *tot_xfer_len_p = 0;
790 790
791 791 while (nmp) {
792 792 len = MBLKL(nmp);
793 793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 795 len, pkt_len, nmblks,
796 796 *tot_xfer_len_p));
797 797
798 798 if (len <= 0) {
799 799 bmp = nmp;
800 800 nmp = nmp->b_cont;
801 801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 802 "==> nxge_tx_pkt_nmblocks: "
803 803 "len (0) pkt_len %d nmblks %d",
804 804 pkt_len, nmblks));
805 805 continue;
806 806 }
807 807
808 808 *tot_xfer_len_p += len;
809 809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 811 len, pkt_len, nmblks,
812 812 *tot_xfer_len_p));
813 813
814 814 if (len < nxge_bcopy_thresh) {
815 815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 816 "==> nxge_tx_pkt_nmblocks: "
817 817 "len %d (< thresh) pkt_len %d nmblks %d",
818 818 len, pkt_len, nmblks));
819 819 if (pkt_len == 0)
820 820 nmblks++;
821 821 pkt_len += len;
822 822 if (pkt_len >= nxge_bcopy_thresh) {
823 823 pkt_len = 0;
824 824 len = 0;
825 825 nmp = bmp;
826 826 }
827 827 } else {
828 828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 829 "==> nxge_tx_pkt_nmblocks: "
830 830 "len %d (> thresh) pkt_len %d nmblks %d",
831 831 len, pkt_len, nmblks));
832 832 pkt_len = 0;
833 833 nmblks++;
834 834 /*
835 835 * Hardware limits the transfer length to 4K.
836 836 * If len is more than 4K, we need to break
837 837 * it up to at most 2 more blocks.
838 838 */
839 839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 840 uint32_t nsegs;
841 841
842 842 nsegs = 1;
843 843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 844 "==> nxge_tx_pkt_nmblocks: "
845 845 "len %d pkt_len %d nmblks %d nsegs %d",
846 846 len, pkt_len, nmblks, nsegs));
847 847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 848 ++nsegs;
849 849 }
850 850 do {
851 851 b_wptr = nmp->b_rptr +
852 852 TX_MAX_TRANSFER_LENGTH;
853 853 nmp->b_wptr = b_wptr;
854 854 if ((tmp = dupb(nmp)) == NULL) {
855 855 return (0);
856 856 }
857 857 tmp->b_rptr = b_wptr;
858 858 tmp->b_wptr = nmp->b_wptr;
859 859 tmp->b_cont = nmp->b_cont;
860 860 nmp->b_cont = tmp;
861 861 nmblks++;
862 862 if (--nsegs) {
863 863 nmp = tmp;
864 864 }
865 865 } while (nsegs);
866 866 nmp = tmp;
867 867 }
868 868 }
869 869
870 870 /*
871 871 * Hardware limits the transmit gather pointers to 15.
872 872 */
873 873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 874 TX_MAX_GATHER_POINTERS) {
875 875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 877 "len %d pkt_len %d nmblks %d",
878 878 len, pkt_len, nmblks));
879 879 /* Pull all message blocks from b_cont */
880 880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 881 return (0);
882 882 }
883 883 freemsg(nmp->b_cont);
884 884 nmp->b_cont = tmp;
885 885 pkt_len = 0;
886 886 }
887 887 bmp = nmp;
888 888 nmp = nmp->b_cont;
889 889 }
890 890
891 891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 893 "nmblks %d len %d tot_xfer_len %d",
894 894 mp->b_rptr, mp->b_wptr, nmblks,
895 895 MBLKL(mp), *tot_xfer_len_p));
896 896
897 897 return (nmblks);
898 898 }
899 899
900 900 boolean_t
901 901 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902 902 {
903 903 boolean_t status = B_TRUE;
904 904 p_nxge_dma_common_t tx_desc_dma_p;
905 905 nxge_dma_common_t desc_area;
906 906 p_tx_desc_t tx_desc_ring_vp;
907 907 p_tx_desc_t tx_desc_p;
908 908 p_tx_desc_t tx_desc_pp;
909 909 tx_desc_t r_tx_desc;
910 910 p_tx_msg_t tx_msg_ring;
911 911 p_tx_msg_t tx_msg_p;
912 912 npi_handle_t handle;
913 913 tx_ring_hdl_t tx_head;
914 914 uint32_t pkt_len;
915 915 uint_t tx_rd_index;
916 916 uint16_t head_index, tail_index;
917 917 uint8_t tdc;
918 918 boolean_t head_wrap, tail_wrap;
919 919 p_nxge_tx_ring_stats_t tdc_stats;
920 920 int rc;
921 921
922 922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923 923
924 924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 925 (nmblks != 0));
926 926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 929 nmblks));
930 930 if (!status) {
931 931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 932 desc_area = tx_ring_p->tdc_desc;
933 933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 935 tx_desc_ring_vp =
936 936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 937 tx_rd_index = tx_ring_p->rd_index;
938 938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 941 tdc = tx_ring_p->tdc;
942 942 tdc_stats = tx_ring_p->tdc_stats;
943 943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 945 }
946 946
947 947 tail_index = tx_ring_p->wr_index;
948 948 tail_wrap = tx_ring_p->wr_index_wrap;
949 949
950 950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 952 "tail_index %d tail_wrap %d "
953 953 "tx_desc_p $%p ($%p) ",
954 954 tdc, tx_rd_index, tail_index, tail_wrap,
955 955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 956 /*
957 957 * Read the hardware maintained transmit head
958 958 * and wrap around bit.
959 959 */
960 960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 961 head_index = tx_head.bits.ldw.head;
962 962 head_wrap = tx_head.bits.ldw.wrap;
963 963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 964 "==> nxge_txdma_reclaim: "
965 965 "tx_rd_index %d tail %d tail_wrap %d "
966 966 "head %d wrap %d",
967 967 tx_rd_index, tail_index, tail_wrap,
968 968 head_index, head_wrap));
969 969
970 970 if (head_index == tail_index) {
971 971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 972 tail_index, tail_wrap) &&
973 973 (head_index == tx_rd_index)) {
974 974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 975 "==> nxge_txdma_reclaim: EMPTY"));
976 976 return (B_TRUE);
977 977 }
978 978
979 979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 980 "==> nxge_txdma_reclaim: Checking "
981 981 "if ring full"));
982 982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 983 tail_wrap)) {
984 984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 985 "==> nxge_txdma_reclaim: full"));
986 986 return (B_FALSE);
987 987 }
988 988 }
989 989
990 990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992 992
993 993 tx_desc_pp = &r_tx_desc;
994 994 while ((tx_rd_index != head_index) &&
995 995 (tx_ring_p->descs_pending != 0)) {
996 996
997 997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 998 "==> nxge_txdma_reclaim: Checking if pending"));
999 999
1000 1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 1001 "==> nxge_txdma_reclaim: "
1002 1002 "descs_pending %d ",
1003 1003 tx_ring_p->descs_pending));
1004 1004
1005 1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 1006 "==> nxge_txdma_reclaim: "
1007 1007 "(tx_rd_index %d head_index %d "
1008 1008 "(tx_desc_p $%p)",
1009 1009 tx_rd_index, head_index,
1010 1010 tx_desc_p));
1011 1011
1012 1012 tx_desc_pp->value = tx_desc_p->value;
1013 1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 1014 "==> nxge_txdma_reclaim: "
1015 1015 "(tx_rd_index %d head_index %d "
1016 1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 1017 tx_rd_index, head_index,
1018 1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019 1019
1020 1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 1021 "==> nxge_txdma_reclaim: dump desc:"));
1022 1022
1023 1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 1028 "tdc channel %d opackets %d",
1029 1029 pkt_len,
1030 1030 tdc,
1031 1031 tdc_stats->opackets));
1032 1032
1033 1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 1035 "tx_desc_p = $%p "
1036 1036 "tx_desc_pp = $%p "
1037 1037 "index = %d",
1038 1038 tx_desc_p,
1039 1039 tx_desc_pp,
1040 1040 tx_ring_p->rd_index));
1041 1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 1042 0, -1);
1043 1043 tx_msg_p->dvma_handle = NULL;
1044 1044 if (tx_ring_p->dvma_wr_index ==
1045 1045 tx_ring_p->dvma_wrap_mask) {
1046 1046 tx_ring_p->dvma_wr_index = 0;
1047 1047 } else {
1048 1048 tx_ring_p->dvma_wr_index++;
1049 1049 }
1050 1050 tx_ring_p->dvma_pending--;
1051 1051 } else if (tx_msg_p->flags.dma_type ==
1052 1052 USE_DMA) {
1053 1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 1054 "==> nxge_txdma_reclaim: "
1055 1055 "USE DMA"));
1056 1056 if (rc = ddi_dma_unbind_handle
1057 1057 (tx_msg_p->dma_handle)) {
1058 1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 1059 "ddi_dma_unbind_handle "
1060 1060 "failed. status %d", rc);
1061 1061 }
1062 1062 }
1063 1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 1064 "==> nxge_txdma_reclaim: count packets"));
1065 1065 /*
1066 1066 * count a chained packet only once.
1067 1067 */
1068 1068 if (tx_msg_p->tx_message != NULL) {
1069 1069 freemsg(tx_msg_p->tx_message);
1070 1070 tx_msg_p->tx_message = NULL;
1071 1071 }
1072 1072
1073 1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 1074 tx_rd_index = tx_ring_p->rd_index;
1075 1075 tx_rd_index = (tx_rd_index + 1) &
↓ open down ↓ |
1075 lines elided |
↑ open up ↑ |
1076 1076 tx_ring_p->tx_wrap_mask;
1077 1077 tx_ring_p->rd_index = tx_rd_index;
1078 1078 tx_ring_p->descs_pending--;
1079 1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 1081 }
1082 1082
1083 1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 1085 if (status) {
1086 - (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1086 + (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
1087 + 1, 0);
1087 1088 }
1088 1089 } else {
1089 1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1090 1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1091 1092 }
1092 1093
1093 1094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1094 1095 "<== nxge_txdma_reclaim status = 0x%08x", status));
1095 1096
1096 1097 return (status);
1097 1098 }
1098 1099
1099 1100 /*
1100 1101 * nxge_tx_intr
1101 1102 *
1102 1103 * Process a TDC interrupt
1103 1104 *
1104 1105 * Arguments:
1105 1106 * arg1 A Logical Device state Vector (LSV) data structure.
1106 1107 * arg2 nxge_t *
1107 1108 *
1108 1109 * Notes:
1109 1110 *
1110 1111 * NPI/NXGE function calls:
1111 1112 * npi_txdma_control_status()
1112 1113 * npi_intr_ldg_mgmt_set()
1113 1114 *
1114 1115 * nxge_tx_err_evnts()
1115 1116 * nxge_txdma_reclaim()
1116 1117 *
1117 1118 * Registers accessed:
1118 1119 * TX_CS DMC+0x40028 Transmit Control And Status
1119 1120 * PIO_LDSV
1120 1121 *
1121 1122 * Context:
1122 1123 * Any domain
1123 1124 */
1124 1125 uint_t
1125 1126 nxge_tx_intr(void *arg1, void *arg2)
1126 1127 {
1127 1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1128 1129 p_nxge_t nxgep = (p_nxge_t)arg2;
1129 1130 p_nxge_ldg_t ldgp;
1130 1131 uint8_t channel;
1131 1132 uint32_t vindex;
1132 1133 npi_handle_t handle;
1133 1134 tx_cs_t cs;
1134 1135 p_tx_ring_t *tx_rings;
1135 1136 p_tx_ring_t tx_ring_p;
1136 1137 npi_status_t rs = NPI_SUCCESS;
1137 1138 uint_t serviced = DDI_INTR_UNCLAIMED;
1138 1139 nxge_status_t status = NXGE_OK;
1139 1140
1140 1141 if (ldvp == NULL) {
1141 1142 NXGE_DEBUG_MSG((NULL, INT_CTL,
1142 1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1143 1144 nxgep, ldvp));
1144 1145 return (DDI_INTR_UNCLAIMED);
1145 1146 }
1146 1147
1147 1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1148 1149 nxgep = ldvp->nxgep;
1149 1150 }
1150 1151 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1151 1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1152 1153 nxgep, ldvp));
1153 1154
1154 1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1155 1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1156 1157 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1157 1158 "<== nxge_tx_intr: interface not started or intialized"));
1158 1159 return (DDI_INTR_CLAIMED);
1159 1160 }
1160 1161
1161 1162 /*
1162 1163 * This interrupt handler is for a specific
1163 1164 * transmit dma channel.
1164 1165 */
1165 1166 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1166 1167 /* Get the control and status for this channel. */
1167 1168 channel = ldvp->channel;
1168 1169 ldgp = ldvp->ldgp;
1169 1170 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1170 1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1171 1172 "channel %d",
1172 1173 nxgep, ldvp, channel));
1173 1174
1174 1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1175 1176 vindex = ldvp->vdma_index;
1176 1177 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1177 1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1178 1179 channel, vindex, rs));
1179 1180 if (!rs && cs.bits.ldw.mk) {
1180 1181 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1181 1182 "==> nxge_tx_intr:channel %d ring index %d "
1182 1183 "status 0x%08x (mk bit set)",
1183 1184 channel, vindex, rs));
1184 1185 tx_rings = nxgep->tx_rings->rings;
1185 1186 tx_ring_p = tx_rings[vindex];
1186 1187 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1187 1188 "==> nxge_tx_intr:channel %d ring index %d "
1188 1189 "status 0x%08x (mk bit set, calling reclaim)",
1189 1190 channel, vindex, rs));
1190 1191
1191 1192 nxge_tx_ring_task((void *)tx_ring_p);
1192 1193 }
1193 1194
1194 1195 /*
1195 1196 * Process other transmit control and status.
1196 1197 * Check the ldv state.
1197 1198 */
1198 1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1199 1200 /*
1200 1201 * Rearm this logical group if this is a single device
1201 1202 * group.
1202 1203 */
1203 1204 if (ldgp->nldvs == 1) {
1204 1205 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1205 1206 "==> nxge_tx_intr: rearm"));
1206 1207 if (status == NXGE_OK) {
1207 1208 if (isLDOMguest(nxgep)) {
1208 1209 nxge_hio_ldgimgn(nxgep, ldgp);
1209 1210 } else {
1210 1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1211 1212 B_TRUE, ldgp->ldg_timer);
1212 1213 }
1213 1214 }
1214 1215 }
1215 1216
1216 1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1217 1218 serviced = DDI_INTR_CLAIMED;
1218 1219 return (serviced);
1219 1220 }
1220 1221
1221 1222 void
1222 1223 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1223 1224 {
1224 1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1225 1226
1226 1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1227 1228
1228 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1229 1230 }
1230 1231
1231 1232 void
1232 1233 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1233 1234 {
1234 1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1235 1236
1236 1237 (void) nxge_txdma_stop(nxgep);
1237 1238
1238 1239 (void) nxge_fixup_txdma_rings(nxgep);
1239 1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1240 1241 (void) nxge_tx_mac_enable(nxgep);
1241 1242 (void) nxge_txdma_hw_kick(nxgep);
1242 1243
1243 1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1244 1245 }
1245 1246
1246 1247 npi_status_t
1247 1248 nxge_txdma_channel_disable(
1248 1249 nxge_t *nxge,
1249 1250 int channel)
1250 1251 {
1251 1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1252 1253 npi_status_t rs;
1253 1254 tdmc_intr_dbg_t intr_dbg;
1254 1255
1255 1256 /*
1256 1257 * Stop the dma channel and wait for the stop-done.
1257 1258 * If the stop-done bit is not present, then force
1258 1259 * an error so TXC will stop.
1259 1260 * All channels bound to this port need to be stopped
1260 1261 * and reset after injecting an interrupt error.
1261 1262 */
1262 1263 rs = npi_txdma_channel_disable(handle, channel);
1263 1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1264 1265 "==> nxge_txdma_channel_disable(%d) "
1265 1266 "rs 0x%x", channel, rs));
1266 1267 if (rs != NPI_SUCCESS) {
1267 1268 /* Inject any error */
1268 1269 intr_dbg.value = 0;
1269 1270 intr_dbg.bits.ldw.nack_pref = 1;
1270 1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1271 1272 "==> nxge_txdma_hw_mode: "
1272 1273 "channel %d (stop failed 0x%x) "
1273 1274 "(inject err)", rs, channel));
1274 1275 (void) npi_txdma_inj_int_error_set(
1275 1276 handle, channel, &intr_dbg);
1276 1277 rs = npi_txdma_channel_disable(handle, channel);
1277 1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1278 1279 "==> nxge_txdma_hw_mode: "
1279 1280 "channel %d (stop again 0x%x) "
1280 1281 "(after inject err)",
1281 1282 rs, channel));
1282 1283 }
1283 1284
1284 1285 return (rs);
1285 1286 }
1286 1287
1287 1288 /*
1288 1289 * nxge_txdma_hw_mode
1289 1290 *
1290 1291 * Toggle all TDCs on (enable) or off (disable).
1291 1292 *
1292 1293 * Arguments:
1293 1294 * nxgep
1294 1295 * enable Enable or disable a TDC.
1295 1296 *
1296 1297 * Notes:
1297 1298 *
1298 1299 * NPI/NXGE function calls:
1299 1300 * npi_txdma_channel_enable(TX_CS)
1300 1301 * npi_txdma_channel_disable(TX_CS)
1301 1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1302 1303 *
1303 1304 * Registers accessed:
1304 1305 * TX_CS DMC+0x40028 Transmit Control And Status
1305 1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1306 1307 *
1307 1308 * Context:
1308 1309 * Any domain
1309 1310 */
1310 1311 nxge_status_t
1311 1312 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1312 1313 {
1313 1314 nxge_grp_set_t *set = &nxgep->tx_set;
1314 1315
1315 1316 npi_handle_t handle;
1316 1317 nxge_status_t status;
1317 1318 npi_status_t rs;
1318 1319 int tdc;
1319 1320
1320 1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1321 1322 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1322 1323
1323 1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1324 1325 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1325 1326 "<== nxge_txdma_mode: not initialized"));
1326 1327 return (NXGE_ERROR);
1327 1328 }
1328 1329
1329 1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1330 1331 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1331 1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1332 1333 return (NXGE_ERROR);
1333 1334 }
1334 1335
1335 1336 /* Enable or disable all of the TDCs owned by us. */
1336 1337 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1337 1338 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1338 1339 if ((1 << tdc) & set->owned.map) {
1339 1340 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1340 1341 if (ring) {
1341 1342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1342 1343 "==> nxge_txdma_hw_mode: channel %d", tdc));
1343 1344 if (enable) {
1344 1345 rs = npi_txdma_channel_enable
1345 1346 (handle, tdc);
1346 1347 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1347 1348 "==> nxge_txdma_hw_mode: "
1348 1349 "channel %d (enable) rs 0x%x",
1349 1350 tdc, rs));
1350 1351 } else {
1351 1352 rs = nxge_txdma_channel_disable
1352 1353 (nxgep, tdc);
1353 1354 }
1354 1355 }
1355 1356 }
1356 1357 }
1357 1358
1358 1359 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1359 1360
1360 1361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1361 1362 "<== nxge_txdma_hw_mode: status 0x%x", status));
1362 1363
1363 1364 return (status);
1364 1365 }
1365 1366
1366 1367 void
1367 1368 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1368 1369 {
1369 1370 npi_handle_t handle;
1370 1371
1371 1372 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1372 1373 "==> nxge_txdma_enable_channel: channel %d", channel));
1373 1374
1374 1375 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1375 1376 /* enable the transmit dma channels */
1376 1377 (void) npi_txdma_channel_enable(handle, channel);
1377 1378
1378 1379 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1379 1380 }
1380 1381
1381 1382 void
1382 1383 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1383 1384 {
1384 1385 npi_handle_t handle;
1385 1386
1386 1387 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1387 1388 "==> nxge_txdma_disable_channel: channel %d", channel));
1388 1389
1389 1390 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1390 1391 /* stop the transmit dma channels */
1391 1392 (void) npi_txdma_channel_disable(handle, channel);
1392 1393
1393 1394 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1394 1395 }
1395 1396
1396 1397 /*
1397 1398 * nxge_txdma_stop_inj_err
1398 1399 *
1399 1400 * Stop a TDC. If at first we don't succeed, inject an error.
1400 1401 *
1401 1402 * Arguments:
1402 1403 * nxgep
1403 1404 * channel The channel to stop.
1404 1405 *
1405 1406 * Notes:
1406 1407 *
1407 1408 * NPI/NXGE function calls:
1408 1409 * npi_txdma_channel_disable()
1409 1410 * npi_txdma_inj_int_error_set()
1410 1411 * #if defined(NXGE_DEBUG)
1411 1412 * nxge_txdma_regs_dump_channels(nxgep);
1412 1413 * #endif
1413 1414 *
1414 1415 * Registers accessed:
1415 1416 * TX_CS DMC+0x40028 Transmit Control And Status
1416 1417 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1417 1418 *
1418 1419 * Context:
1419 1420 * Any domain
1420 1421 */
1421 1422 int
1422 1423 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1423 1424 {
1424 1425 npi_handle_t handle;
1425 1426 tdmc_intr_dbg_t intr_dbg;
1426 1427 int status;
1427 1428 npi_status_t rs = NPI_SUCCESS;
1428 1429
1429 1430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1430 1431 /*
1431 1432 * Stop the dma channel waits for the stop done.
1432 1433 * If the stop done bit is not set, then create
1433 1434 * an error.
1434 1435 */
1435 1436 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1436 1437 rs = npi_txdma_channel_disable(handle, channel);
1437 1438 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1438 1439 if (status == NXGE_OK) {
1439 1440 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1440 1441 "<== nxge_txdma_stop_inj_err (channel %d): "
1441 1442 "stopped OK", channel));
1442 1443 return (status);
1443 1444 }
1444 1445
1445 1446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1446 1447 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1447 1448 "injecting error", channel, rs));
1448 1449 /* Inject any error */
1449 1450 intr_dbg.value = 0;
1450 1451 intr_dbg.bits.ldw.nack_pref = 1;
1451 1452 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1452 1453
1453 1454 /* Stop done bit will be set as a result of error injection */
1454 1455 rs = npi_txdma_channel_disable(handle, channel);
1455 1456 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1456 1457 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1457 1458 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1458 1459 "<== nxge_txdma_stop_inj_err (channel %d): "
1459 1460 "stopped OK ", channel));
1460 1461 return (status);
1461 1462 }
1462 1463
1463 1464 #if defined(NXGE_DEBUG)
1464 1465 nxge_txdma_regs_dump_channels(nxgep);
1465 1466 #endif
1466 1467 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1467 1468 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1468 1469 " (injected error but still not stopped)", channel, rs));
1469 1470
1470 1471 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1471 1472 return (status);
1472 1473 }
1473 1474
1474 1475 /*ARGSUSED*/
1475 1476 void
1476 1477 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1477 1478 {
1478 1479 nxge_grp_set_t *set = &nxgep->tx_set;
1479 1480 int tdc;
1480 1481
1481 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1482 1483
1483 1484 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1484 1485 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1485 1486 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1486 1487 return;
1487 1488 }
1488 1489
1489 1490 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1490 1491 if ((1 << tdc) & set->owned.map) {
1491 1492 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1492 1493 if (ring) {
1493 1494 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1494 1495 "==> nxge_fixup_txdma_rings: channel %d",
1495 1496 tdc));
1496 1497 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1497 1498 }
1498 1499 }
1499 1500 }
1500 1501
1501 1502 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1502 1503 }
1503 1504
1504 1505 /*ARGSUSED*/
1505 1506 void
1506 1507 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1507 1508 {
1508 1509 p_tx_ring_t ring_p;
1509 1510
1510 1511 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1511 1512 ring_p = nxge_txdma_get_ring(nxgep, channel);
1512 1513 if (ring_p == NULL) {
1513 1514 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1514 1515 return;
1515 1516 }
1516 1517
1517 1518 if (ring_p->tdc != channel) {
1518 1519 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1519 1520 "<== nxge_txdma_fix_channel: channel not matched "
1520 1521 "ring tdc %d passed channel",
1521 1522 ring_p->tdc, channel));
1522 1523 return;
1523 1524 }
1524 1525
1525 1526 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1526 1527
1527 1528 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1528 1529 }
1529 1530
1530 1531 /*ARGSUSED*/
1531 1532 void
1532 1533 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1533 1534 {
1534 1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1535 1536
1536 1537 if (ring_p == NULL) {
1537 1538 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1538 1539 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1539 1540 return;
1540 1541 }
1541 1542
1542 1543 if (ring_p->tdc != channel) {
1543 1544 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1544 1545 "<== nxge_txdma_fixup_channel: channel not matched "
1545 1546 "ring tdc %d passed channel",
1546 1547 ring_p->tdc, channel));
1547 1548 return;
1548 1549 }
1549 1550
1550 1551 MUTEX_ENTER(&ring_p->lock);
1551 1552 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1552 1553 ring_p->rd_index = 0;
1553 1554 ring_p->wr_index = 0;
1554 1555 ring_p->ring_head.value = 0;
1555 1556 ring_p->ring_kick_tail.value = 0;
1556 1557 ring_p->descs_pending = 0;
1557 1558 MUTEX_EXIT(&ring_p->lock);
1558 1559
1559 1560 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1560 1561 }
1561 1562
1562 1563 /*ARGSUSED*/
1563 1564 void
1564 1565 nxge_txdma_hw_kick(p_nxge_t nxgep)
1565 1566 {
1566 1567 nxge_grp_set_t *set = &nxgep->tx_set;
1567 1568 int tdc;
1568 1569
1569 1570 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1570 1571
1571 1572 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1572 1573 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1573 1574 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1574 1575 return;
1575 1576 }
1576 1577
1577 1578 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1578 1579 if ((1 << tdc) & set->owned.map) {
1579 1580 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1580 1581 if (ring) {
1581 1582 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1582 1583 "==> nxge_txdma_hw_kick: channel %d", tdc));
1583 1584 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1584 1585 }
1585 1586 }
1586 1587 }
1587 1588
1588 1589 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1589 1590 }
1590 1591
1591 1592 /*ARGSUSED*/
1592 1593 void
1593 1594 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1594 1595 {
1595 1596 p_tx_ring_t ring_p;
1596 1597
1597 1598 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1598 1599
1599 1600 ring_p = nxge_txdma_get_ring(nxgep, channel);
1600 1601 if (ring_p == NULL) {
1601 1602 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1602 1603 " nxge_txdma_kick_channel"));
1603 1604 return;
1604 1605 }
1605 1606
1606 1607 if (ring_p->tdc != channel) {
1607 1608 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1608 1609 "<== nxge_txdma_kick_channel: channel not matched "
1609 1610 "ring tdc %d passed channel",
1610 1611 ring_p->tdc, channel));
1611 1612 return;
1612 1613 }
1613 1614
1614 1615 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1615 1616
1616 1617 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1617 1618 }
1618 1619
1619 1620 /*ARGSUSED*/
1620 1621 void
1621 1622 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1622 1623 {
1623 1624
1624 1625 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1625 1626
1626 1627 if (ring_p == NULL) {
1627 1628 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1628 1629 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1629 1630 return;
1630 1631 }
1631 1632
1632 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1633 1634 }
1634 1635
1635 1636 /*
1636 1637 * nxge_check_tx_hang
1637 1638 *
1638 1639 * Check the state of all TDCs belonging to nxgep.
1639 1640 *
1640 1641 * Arguments:
1641 1642 * nxgep
1642 1643 *
1643 1644 * Notes:
1644 1645 * Called by nxge_hw.c:nxge_check_hw_state().
1645 1646 *
1646 1647 * NPI/NXGE function calls:
1647 1648 *
1648 1649 * Registers accessed:
1649 1650 *
1650 1651 * Context:
1651 1652 * Any domain
1652 1653 */
1653 1654 /*ARGSUSED*/
1654 1655 void
1655 1656 nxge_check_tx_hang(p_nxge_t nxgep)
1656 1657 {
1657 1658 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1658 1659
1659 1660 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1660 1661 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1661 1662 goto nxge_check_tx_hang_exit;
1662 1663 }
1663 1664
1664 1665 /*
1665 1666 * Needs inputs from hardware for regs:
1666 1667 * head index had not moved since last timeout.
1667 1668 * packets not transmitted or stuffed registers.
1668 1669 */
1669 1670 if (nxge_txdma_hung(nxgep)) {
1670 1671 nxge_fixup_hung_txdma_rings(nxgep);
1671 1672 }
1672 1673
1673 1674 nxge_check_tx_hang_exit:
1674 1675 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1675 1676 }
1676 1677
1677 1678 /*
1678 1679 * nxge_txdma_hung
1679 1680 *
1680 1681 * Reset a TDC.
1681 1682 *
1682 1683 * Arguments:
1683 1684 * nxgep
1684 1685 * channel The channel to reset.
1685 1686 * reg_data The current TX_CS.
1686 1687 *
1687 1688 * Notes:
1688 1689 * Called by nxge_check_tx_hang()
1689 1690 *
1690 1691 * NPI/NXGE function calls:
1691 1692 * nxge_txdma_channel_hung()
1692 1693 *
1693 1694 * Registers accessed:
1694 1695 *
1695 1696 * Context:
1696 1697 * Any domain
1697 1698 */
1698 1699 int
1699 1700 nxge_txdma_hung(p_nxge_t nxgep)
1700 1701 {
1701 1702 nxge_grp_set_t *set = &nxgep->tx_set;
1702 1703 int tdc;
1703 1704 boolean_t shared;
1704 1705
1705 1706 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1706 1707
1707 1708 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1708 1709 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1709 1710 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1710 1711 return (B_FALSE);
1711 1712 }
1712 1713
1713 1714 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1714 1715 /*
1715 1716 * Grab the shared state of the TDC.
1716 1717 */
1717 1718 if (isLDOMservice(nxgep)) {
1718 1719 nxge_hio_data_t *nhd =
1719 1720 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1720 1721
1721 1722 MUTEX_ENTER(&nhd->lock);
1722 1723 shared = nxgep->tdc_is_shared[tdc];
1723 1724 MUTEX_EXIT(&nhd->lock);
1724 1725 } else {
1725 1726 shared = B_FALSE;
1726 1727 }
1727 1728
1728 1729 /*
1729 1730 * Now, process continue to process.
1730 1731 */
1731 1732 if (((1 << tdc) & set->owned.map) && !shared) {
1732 1733 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1733 1734 if (ring) {
1734 1735 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1735 1736 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1736 1737 "==> nxge_txdma_hung: TDC %d hung",
1737 1738 tdc));
1738 1739 return (B_TRUE);
1739 1740 }
1740 1741 }
1741 1742 }
1742 1743 }
1743 1744
1744 1745 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1745 1746
1746 1747 return (B_FALSE);
1747 1748 }
1748 1749
1749 1750 /*
1750 1751 * nxge_txdma_channel_hung
1751 1752 *
1752 1753 * Reset a TDC.
1753 1754 *
1754 1755 * Arguments:
1755 1756 * nxgep
1756 1757 * ring <channel>'s ring.
1757 1758 * channel The channel to reset.
1758 1759 *
1759 1760 * Notes:
1760 1761 * Called by nxge_txdma.c:nxge_txdma_hung()
1761 1762 *
1762 1763 * NPI/NXGE function calls:
1763 1764 * npi_txdma_ring_head_get()
1764 1765 *
1765 1766 * Registers accessed:
1766 1767 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1767 1768 *
1768 1769 * Context:
1769 1770 * Any domain
1770 1771 */
1771 1772 int
1772 1773 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1773 1774 {
1774 1775 uint16_t head_index, tail_index;
1775 1776 boolean_t head_wrap, tail_wrap;
1776 1777 npi_handle_t handle;
1777 1778 tx_ring_hdl_t tx_head;
1778 1779 uint_t tx_rd_index;
1779 1780
1780 1781 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1781 1782
1782 1783 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1783 1784 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1784 1785 "==> nxge_txdma_channel_hung: channel %d", channel));
1785 1786 MUTEX_ENTER(&tx_ring_p->lock);
1786 1787 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1787 1788
1788 1789 tail_index = tx_ring_p->wr_index;
1789 1790 tail_wrap = tx_ring_p->wr_index_wrap;
1790 1791 tx_rd_index = tx_ring_p->rd_index;
1791 1792 MUTEX_EXIT(&tx_ring_p->lock);
1792 1793
1793 1794 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1794 1795 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1795 1796 "tail_index %d tail_wrap %d ",
1796 1797 channel, tx_rd_index, tail_index, tail_wrap));
1797 1798 /*
1798 1799 * Read the hardware maintained transmit head
1799 1800 * and wrap around bit.
1800 1801 */
1801 1802 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1802 1803 head_index = tx_head.bits.ldw.head;
1803 1804 head_wrap = tx_head.bits.ldw.wrap;
1804 1805 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1805 1806 "==> nxge_txdma_channel_hung: "
1806 1807 "tx_rd_index %d tail %d tail_wrap %d "
1807 1808 "head %d wrap %d",
1808 1809 tx_rd_index, tail_index, tail_wrap,
1809 1810 head_index, head_wrap));
1810 1811
1811 1812 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1812 1813 tail_index, tail_wrap) &&
1813 1814 (head_index == tx_rd_index)) {
1814 1815 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1815 1816 "==> nxge_txdma_channel_hung: EMPTY"));
1816 1817 return (B_FALSE);
1817 1818 }
1818 1819
1819 1820 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1820 1821 "==> nxge_txdma_channel_hung: Checking if ring full"));
1821 1822 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1822 1823 tail_wrap)) {
1823 1824 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1824 1825 "==> nxge_txdma_channel_hung: full"));
1825 1826 return (B_TRUE);
1826 1827 }
1827 1828
1828 1829 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1829 1830
1830 1831 return (B_FALSE);
1831 1832 }
1832 1833
1833 1834 /*
1834 1835 * nxge_fixup_hung_txdma_rings
1835 1836 *
1836 1837 * Disable a TDC.
1837 1838 *
1838 1839 * Arguments:
1839 1840 * nxgep
1840 1841 * channel The channel to reset.
1841 1842 * reg_data The current TX_CS.
1842 1843 *
1843 1844 * Notes:
1844 1845 * Called by nxge_check_tx_hang()
1845 1846 *
1846 1847 * NPI/NXGE function calls:
1847 1848 * npi_txdma_ring_head_get()
1848 1849 *
1849 1850 * Registers accessed:
1850 1851 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1851 1852 *
1852 1853 * Context:
1853 1854 * Any domain
1854 1855 */
1855 1856 /*ARGSUSED*/
1856 1857 void
1857 1858 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1858 1859 {
1859 1860 nxge_grp_set_t *set = &nxgep->tx_set;
1860 1861 int tdc;
1861 1862
1862 1863 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1863 1864
1864 1865 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1865 1866 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1866 1867 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1867 1868 return;
1868 1869 }
1869 1870
1870 1871 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1871 1872 if ((1 << tdc) & set->owned.map) {
1872 1873 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1873 1874 if (ring) {
1874 1875 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1875 1876 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1876 1877 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1877 1878 tdc));
1878 1879 }
1879 1880 }
1880 1881 }
1881 1882
1882 1883 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1883 1884 }
1884 1885
1885 1886 /*
1886 1887 * nxge_txdma_fixup_hung_channel
1887 1888 *
1888 1889 * 'Fix' a hung TDC.
1889 1890 *
1890 1891 * Arguments:
1891 1892 * nxgep
1892 1893 * channel The channel to fix.
1893 1894 *
1894 1895 * Notes:
1895 1896 * Called by nxge_fixup_hung_txdma_rings()
1896 1897 *
1897 1898 * 1. Reclaim the TDC.
1898 1899 * 2. Disable the TDC.
1899 1900 *
1900 1901 * NPI/NXGE function calls:
1901 1902 * nxge_txdma_reclaim()
1902 1903 * npi_txdma_channel_disable(TX_CS)
1903 1904 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1904 1905 *
1905 1906 * Registers accessed:
1906 1907 * TX_CS DMC+0x40028 Transmit Control And Status
1907 1908 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1908 1909 *
1909 1910 * Context:
1910 1911 * Any domain
1911 1912 */
1912 1913 /*ARGSUSED*/
1913 1914 void
1914 1915 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1915 1916 {
1916 1917 p_tx_ring_t ring_p;
1917 1918
1918 1919 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1919 1920 ring_p = nxge_txdma_get_ring(nxgep, channel);
1920 1921 if (ring_p == NULL) {
1921 1922 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1922 1923 "<== nxge_txdma_fix_hung_channel"));
1923 1924 return;
1924 1925 }
1925 1926
1926 1927 if (ring_p->tdc != channel) {
1927 1928 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1928 1929 "<== nxge_txdma_fix_hung_channel: channel not matched "
1929 1930 "ring tdc %d passed channel",
1930 1931 ring_p->tdc, channel));
1931 1932 return;
1932 1933 }
1933 1934
1934 1935 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1935 1936
1936 1937 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1937 1938 }
1938 1939
1939 1940 /*ARGSUSED*/
1940 1941 void
1941 1942 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1942 1943 uint16_t channel)
1943 1944 {
1944 1945 npi_handle_t handle;
1945 1946 tdmc_intr_dbg_t intr_dbg;
1946 1947 int status = NXGE_OK;
1947 1948
1948 1949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1949 1950
1950 1951 if (ring_p == NULL) {
1951 1952 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1952 1953 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1953 1954 return;
1954 1955 }
1955 1956
1956 1957 if (ring_p->tdc != channel) {
1957 1958 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1958 1959 "<== nxge_txdma_fixup_hung_channel: channel "
1959 1960 "not matched "
1960 1961 "ring tdc %d passed channel",
1961 1962 ring_p->tdc, channel));
1962 1963 return;
1963 1964 }
1964 1965
1965 1966 /* Reclaim descriptors */
1966 1967 MUTEX_ENTER(&ring_p->lock);
1967 1968 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1968 1969 MUTEX_EXIT(&ring_p->lock);
1969 1970
1970 1971 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1971 1972 /*
1972 1973 * Stop the dma channel waits for the stop done.
1973 1974 * If the stop done bit is not set, then force
1974 1975 * an error.
1975 1976 */
1976 1977 status = npi_txdma_channel_disable(handle, channel);
1977 1978 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1978 1979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1979 1980 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1980 1981 "ring tdc %d passed channel %d",
1981 1982 ring_p->tdc, channel));
1982 1983 return;
1983 1984 }
1984 1985
1985 1986 /* Inject any error */
1986 1987 intr_dbg.value = 0;
1987 1988 intr_dbg.bits.ldw.nack_pref = 1;
1988 1989 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1989 1990
1990 1991 /* Stop done bit will be set as a result of error injection */
1991 1992 status = npi_txdma_channel_disable(handle, channel);
1992 1993 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1993 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1994 1995 "<== nxge_txdma_fixup_hung_channel: stopped again"
1995 1996 "ring tdc %d passed channel",
1996 1997 ring_p->tdc, channel));
1997 1998 return;
1998 1999 }
1999 2000
2000 2001 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2001 2002 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2002 2003 "ring tdc %d passed channel",
2003 2004 ring_p->tdc, channel));
2004 2005
2005 2006 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2006 2007 }
2007 2008
2008 2009 /*ARGSUSED*/
2009 2010 void
2010 2011 nxge_reclaim_rings(p_nxge_t nxgep)
2011 2012 {
2012 2013 nxge_grp_set_t *set = &nxgep->tx_set;
2013 2014 int tdc;
2014 2015
2015 2016 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2016 2017
2017 2018 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2018 2019 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2019 2020 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2020 2021 return;
2021 2022 }
2022 2023
2023 2024 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2024 2025 if ((1 << tdc) & set->owned.map) {
2025 2026 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2026 2027 if (ring) {
2027 2028 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2028 2029 "==> nxge_reclaim_rings: TDC %d", tdc));
2029 2030 MUTEX_ENTER(&ring->lock);
2030 2031 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2031 2032 MUTEX_EXIT(&ring->lock);
2032 2033 }
2033 2034 }
2034 2035 }
2035 2036
2036 2037 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2037 2038 }
2038 2039
2039 2040 void
2040 2041 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2041 2042 {
2042 2043 nxge_grp_set_t *set = &nxgep->tx_set;
2043 2044 npi_handle_t handle;
2044 2045 int tdc;
2045 2046
2046 2047 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2047 2048
2048 2049 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2049 2050
2050 2051 if (!isLDOMguest(nxgep)) {
2051 2052 (void) npi_txdma_dump_fzc_regs(handle);
2052 2053
2053 2054 /* Dump TXC registers. */
2054 2055 (void) npi_txc_dump_fzc_regs(handle);
2055 2056 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2056 2057 }
2057 2058
2058 2059 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2059 2060 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2060 2061 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2061 2062 return;
2062 2063 }
2063 2064
2064 2065 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2065 2066 if ((1 << tdc) & set->owned.map) {
2066 2067 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2067 2068 if (ring) {
2068 2069 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2069 2070 "==> nxge_txdma_regs_dump_channels: "
2070 2071 "TDC %d", tdc));
2071 2072 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2072 2073
2073 2074 /* Dump TXC registers, if able to. */
2074 2075 if (!isLDOMguest(nxgep)) {
2075 2076 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2076 2077 "==> nxge_txdma_regs_dump_channels:"
2077 2078 " FZC TDC %d", tdc));
2078 2079 (void) npi_txc_dump_tdc_fzc_regs
2079 2080 (handle, tdc);
2080 2081 }
2081 2082 nxge_txdma_regs_dump(nxgep, tdc);
2082 2083 }
2083 2084 }
2084 2085 }
2085 2086
2086 2087 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2087 2088 }
2088 2089
2089 2090 void
2090 2091 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2091 2092 {
2092 2093 npi_handle_t handle;
2093 2094 tx_ring_hdl_t hdl;
2094 2095 tx_ring_kick_t kick;
2095 2096 tx_cs_t cs;
2096 2097 txc_control_t control;
2097 2098 uint32_t bitmap = 0;
2098 2099 uint32_t burst = 0;
2099 2100 uint32_t bytes = 0;
2100 2101 dma_log_page_t cfg;
2101 2102
2102 2103 printf("\n\tfunc # %d tdc %d ",
2103 2104 nxgep->function_num, channel);
2104 2105 cfg.page_num = 0;
2105 2106 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2106 2107 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2107 2108 printf("\n\tlog page func %d valid page 0 %d",
2108 2109 cfg.func_num, cfg.valid);
2109 2110 cfg.page_num = 1;
2110 2111 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2111 2112 printf("\n\tlog page func %d valid page 1 %d",
2112 2113 cfg.func_num, cfg.valid);
2113 2114
2114 2115 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2115 2116 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2116 2117 printf("\n\thead value is 0x%0llx",
2117 2118 (long long)hdl.value);
2118 2119 printf("\n\thead index %d", hdl.bits.ldw.head);
2119 2120 printf("\n\tkick value is 0x%0llx",
2120 2121 (long long)kick.value);
2121 2122 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2122 2123
2123 2124 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2124 2125 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2125 2126 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2126 2127
2127 2128 (void) npi_txc_control(handle, OP_GET, &control);
2128 2129 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2129 2130 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2130 2131 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2131 2132
2132 2133 printf("\n\tTXC port control 0x%0llx",
2133 2134 (long long)control.value);
2134 2135 printf("\n\tTXC port bitmap 0x%x", bitmap);
2135 2136 printf("\n\tTXC max burst %d", burst);
2136 2137 printf("\n\tTXC bytes xmt %d\n", bytes);
2137 2138
2138 2139 {
2139 2140 ipp_status_t status;
2140 2141
2141 2142 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2142 2143 #if defined(__i386)
2143 2144 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2144 2145 #else
2145 2146 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2146 2147 #endif
2147 2148 }
2148 2149 }
2149 2150
2150 2151 /*
2151 2152 * nxge_tdc_hvio_setup
2152 2153 *
2153 2154 * I'm not exactly sure what this code does.
2154 2155 *
2155 2156 * Arguments:
2156 2157 * nxgep
2157 2158 * channel The channel to map.
2158 2159 *
2159 2160 * Notes:
2160 2161 *
2161 2162 * NPI/NXGE function calls:
2162 2163 * na
2163 2164 *
2164 2165 * Context:
2165 2166 * Service domain?
2166 2167 */
2167 2168 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2168 2169 static void
2169 2170 nxge_tdc_hvio_setup(
2170 2171 nxge_t *nxgep, int channel)
2171 2172 {
2172 2173 nxge_dma_common_t *data;
2173 2174 nxge_dma_common_t *control;
2174 2175 tx_ring_t *ring;
2175 2176
2176 2177 ring = nxgep->tx_rings->rings[channel];
2177 2178 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2178 2179
2179 2180 ring->hv_set = B_FALSE;
2180 2181
2181 2182 ring->hv_tx_buf_base_ioaddr_pp =
2182 2183 (uint64_t)data->orig_ioaddr_pp;
2183 2184 ring->hv_tx_buf_ioaddr_size =
2184 2185 (uint64_t)data->orig_alength;
2185 2186
2186 2187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2187 2188 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2188 2189 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2189 2190 ring->hv_tx_buf_base_ioaddr_pp,
2190 2191 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2191 2192 data->ioaddr_pp, data->orig_vatopa,
2192 2193 data->orig_alength, data->orig_alength));
2193 2194
2194 2195 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2195 2196
2196 2197 ring->hv_tx_cntl_base_ioaddr_pp =
2197 2198 (uint64_t)control->orig_ioaddr_pp;
2198 2199 ring->hv_tx_cntl_ioaddr_size =
2199 2200 (uint64_t)control->orig_alength;
2200 2201
2201 2202 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2202 2203 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2203 2204 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2204 2205 ring->hv_tx_cntl_base_ioaddr_pp,
2205 2206 control->orig_ioaddr_pp, control->orig_vatopa,
2206 2207 ring->hv_tx_cntl_ioaddr_size,
2207 2208 control->orig_alength, control->orig_alength));
2208 2209 }
2209 2210 #endif
2210 2211
2211 2212 static nxge_status_t
2212 2213 nxge_map_txdma(p_nxge_t nxgep, int channel)
2213 2214 {
2214 2215 nxge_dma_common_t **pData;
2215 2216 nxge_dma_common_t **pControl;
2216 2217 tx_ring_t **pRing, *ring;
2217 2218 tx_mbox_t **mailbox;
2218 2219 uint32_t num_chunks;
2219 2220
2220 2221 nxge_status_t status = NXGE_OK;
2221 2222
2222 2223 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2223 2224
2224 2225 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2225 2226 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2226 2227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2227 2228 "<== nxge_map_txdma: buf not allocated"));
2228 2229 return (NXGE_ERROR);
2229 2230 }
2230 2231 }
2231 2232
2232 2233 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2233 2234 return (NXGE_ERROR);
2234 2235
2235 2236 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2236 2237 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2237 2238 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2238 2239 pRing = &nxgep->tx_rings->rings[channel];
2239 2240 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2240 2241
2241 2242 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2242 2243 "tx_rings $%p tx_desc_rings $%p",
2243 2244 nxgep->tx_rings, nxgep->tx_rings->rings));
2244 2245
2245 2246 /*
2246 2247 * Map descriptors from the buffer pools for <channel>.
2247 2248 */
2248 2249
2249 2250 /*
2250 2251 * Set up and prepare buffer blocks, descriptors
2251 2252 * and mailbox.
2252 2253 */
2253 2254 status = nxge_map_txdma_channel(nxgep, channel,
2254 2255 pData, pRing, num_chunks, pControl, mailbox);
2255 2256 if (status != NXGE_OK) {
2256 2257 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2257 2258 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2258 2259 "returned 0x%x",
2259 2260 nxgep, channel, status));
2260 2261 return (status);
2261 2262 }
2262 2263
2263 2264 ring = *pRing;
2264 2265
2265 2266 ring->index = (uint16_t)channel;
2266 2267 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2267 2268
2268 2269 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2269 2270 if (isLDOMguest(nxgep)) {
2270 2271 (void) nxge_tdc_lp_conf(nxgep, channel);
2271 2272 } else {
2272 2273 nxge_tdc_hvio_setup(nxgep, channel);
2273 2274 }
2274 2275 #endif
2275 2276
2276 2277 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2277 2278 "(status 0x%x channel %d)", status, channel));
2278 2279
2279 2280 return (status);
2280 2281 }
2281 2282
2282 2283 static nxge_status_t
2283 2284 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2284 2285 p_nxge_dma_common_t *dma_buf_p,
2285 2286 p_tx_ring_t *tx_desc_p,
2286 2287 uint32_t num_chunks,
2287 2288 p_nxge_dma_common_t *dma_cntl_p,
2288 2289 p_tx_mbox_t *tx_mbox_p)
2289 2290 {
2290 2291 int status = NXGE_OK;
2291 2292
2292 2293 /*
2293 2294 * Set up and prepare buffer blocks, descriptors
2294 2295 * and mailbox.
2295 2296 */
2296 2297 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2297 2298 "==> nxge_map_txdma_channel (channel %d)", channel));
2298 2299 /*
2299 2300 * Transmit buffer blocks
2300 2301 */
2301 2302 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2302 2303 dma_buf_p, tx_desc_p, num_chunks);
2303 2304 if (status != NXGE_OK) {
2304 2305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2305 2306 "==> nxge_map_txdma_channel (channel %d): "
2306 2307 "map buffer failed 0x%x", channel, status));
2307 2308 goto nxge_map_txdma_channel_exit;
2308 2309 }
2309 2310
2310 2311 /*
2311 2312 * Transmit block ring, and mailbox.
2312 2313 */
2313 2314 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2314 2315 tx_mbox_p);
2315 2316
2316 2317 goto nxge_map_txdma_channel_exit;
2317 2318
2318 2319 nxge_map_txdma_channel_fail1:
2319 2320 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2320 2321 "==> nxge_map_txdma_channel: unmap buf"
2321 2322 "(status 0x%x channel %d)",
2322 2323 status, channel));
2323 2324 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2324 2325
2325 2326 nxge_map_txdma_channel_exit:
2326 2327 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2327 2328 "<== nxge_map_txdma_channel: "
2328 2329 "(status 0x%x channel %d)",
2329 2330 status, channel));
2330 2331
2331 2332 return (status);
2332 2333 }
2333 2334
2334 2335 /*ARGSUSED*/
2335 2336 static void
2336 2337 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2337 2338 {
2338 2339 tx_ring_t *ring;
2339 2340 tx_mbox_t *mailbox;
2340 2341
2341 2342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2342 2343 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2343 2344 /*
2344 2345 * unmap tx block ring, and mailbox.
2345 2346 */
2346 2347 ring = nxgep->tx_rings->rings[channel];
2347 2348 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2348 2349
2349 2350 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2350 2351
2351 2352 /* unmap buffer blocks */
2352 2353 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2353 2354
2354 2355 nxge_free_txb(nxgep, channel);
2355 2356
2356 2357 /*
2357 2358 * Cleanup the reference to the ring now that it does not exist.
2358 2359 */
2359 2360 nxgep->tx_rings->rings[channel] = NULL;
2360 2361
2361 2362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2362 2363 }
2363 2364
2364 2365 /*
2365 2366 * nxge_map_txdma_channel_cfg_ring
2366 2367 *
2367 2368 * Map a TDC into our kernel space.
2368 2369 * This function allocates all of the per-channel data structures.
2369 2370 *
2370 2371 * Arguments:
2371 2372 * nxgep
2372 2373 * dma_channel The channel to map.
2373 2374 * dma_cntl_p
2374 2375 * tx_ring_p dma_channel's transmit ring
2375 2376 * tx_mbox_p dma_channel's mailbox
2376 2377 *
2377 2378 * Notes:
2378 2379 *
2379 2380 * NPI/NXGE function calls:
2380 2381 * nxge_setup_dma_common()
2381 2382 *
2382 2383 * Registers accessed:
2383 2384 * none.
2384 2385 *
2385 2386 * Context:
2386 2387 * Any domain
2387 2388 */
2388 2389 /*ARGSUSED*/
2389 2390 static void
2390 2391 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2391 2392 p_nxge_dma_common_t *dma_cntl_p,
2392 2393 p_tx_ring_t tx_ring_p,
2393 2394 p_tx_mbox_t *tx_mbox_p)
2394 2395 {
2395 2396 p_tx_mbox_t mboxp;
2396 2397 p_nxge_dma_common_t cntl_dmap;
2397 2398 p_nxge_dma_common_t dmap;
2398 2399 p_tx_rng_cfig_t tx_ring_cfig_p;
2399 2400 p_tx_ring_kick_t tx_ring_kick_p;
2400 2401 p_tx_cs_t tx_cs_p;
2401 2402 p_tx_dma_ent_msk_t tx_evmask_p;
2402 2403 p_txdma_mbh_t mboxh_p;
2403 2404 p_txdma_mbl_t mboxl_p;
2404 2405 uint64_t tx_desc_len;
2405 2406
2406 2407 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2407 2408 "==> nxge_map_txdma_channel_cfg_ring"));
2408 2409
2409 2410 cntl_dmap = *dma_cntl_p;
2410 2411
2411 2412 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2412 2413 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2413 2414 sizeof (tx_desc_t));
2414 2415 /*
2415 2416 * Zero out transmit ring descriptors.
2416 2417 */
2417 2418 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2418 2419 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2419 2420 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2420 2421 tx_cs_p = &(tx_ring_p->tx_cs);
2421 2422 tx_evmask_p = &(tx_ring_p->tx_evmask);
2422 2423 tx_ring_cfig_p->value = 0;
2423 2424 tx_ring_kick_p->value = 0;
2424 2425 tx_cs_p->value = 0;
2425 2426 tx_evmask_p->value = 0;
2426 2427
2427 2428 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2428 2429 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2429 2430 dma_channel,
2430 2431 dmap->dma_cookie.dmac_laddress));
2431 2432
2432 2433 tx_ring_cfig_p->value = 0;
2433 2434 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2434 2435 tx_ring_cfig_p->value =
2435 2436 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2436 2437 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2437 2438
2438 2439 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2439 2440 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2440 2441 dma_channel,
2441 2442 tx_ring_cfig_p->value));
2442 2443
2443 2444 tx_cs_p->bits.ldw.rst = 1;
2444 2445
2445 2446 /* Map in mailbox */
2446 2447 mboxp = (p_tx_mbox_t)
2447 2448 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2448 2449 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2449 2450 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2450 2451 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2451 2452 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2452 2453 mboxh_p->value = mboxl_p->value = 0;
2453 2454
2454 2455 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2455 2456 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2456 2457 dmap->dma_cookie.dmac_laddress));
2457 2458
2458 2459 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2459 2460 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2460 2461
2461 2462 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2462 2463 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2463 2464
2464 2465 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2465 2466 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2466 2467 dmap->dma_cookie.dmac_laddress));
2467 2468 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2468 2469 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2469 2470 "mbox $%p",
2470 2471 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2471 2472 tx_ring_p->page_valid.value = 0;
2472 2473 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2473 2474 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2474 2475 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2475 2476 tx_ring_p->page_hdl.value = 0;
2476 2477
2477 2478 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2478 2479 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2479 2480
2480 2481 tx_ring_p->max_burst.value = 0;
2481 2482 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2482 2483
2483 2484 *tx_mbox_p = mboxp;
2484 2485
2485 2486 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2486 2487 "<== nxge_map_txdma_channel_cfg_ring"));
2487 2488 }
2488 2489
2489 2490 /*ARGSUSED*/
2490 2491 static void
2491 2492 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2492 2493 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2493 2494 {
2494 2495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2495 2496 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2496 2497 tx_ring_p->tdc));
2497 2498
2498 2499 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2499 2500
2500 2501 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2501 2502 "<== nxge_unmap_txdma_channel_cfg_ring"));
2502 2503 }
2503 2504
2504 2505 /*
2505 2506 * nxge_map_txdma_channel_buf_ring
2506 2507 *
2507 2508 *
2508 2509 * Arguments:
2509 2510 * nxgep
2510 2511 * channel The channel to map.
2511 2512 * dma_buf_p
2512 2513 * tx_desc_p channel's descriptor ring
2513 2514 * num_chunks
2514 2515 *
2515 2516 * Notes:
2516 2517 *
2517 2518 * NPI/NXGE function calls:
2518 2519 * nxge_setup_dma_common()
2519 2520 *
2520 2521 * Registers accessed:
2521 2522 * none.
2522 2523 *
2523 2524 * Context:
2524 2525 * Any domain
2525 2526 */
2526 2527 static nxge_status_t
2527 2528 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2528 2529 p_nxge_dma_common_t *dma_buf_p,
2529 2530 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2530 2531 {
2531 2532 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2532 2533 p_nxge_dma_common_t dmap;
2533 2534 nxge_os_dma_handle_t tx_buf_dma_handle;
2534 2535 p_tx_ring_t tx_ring_p;
2535 2536 p_tx_msg_t tx_msg_ring;
2536 2537 nxge_status_t status = NXGE_OK;
2537 2538 int ddi_status = DDI_SUCCESS;
2538 2539 int i, j, index;
2539 2540 uint32_t size, bsize;
2540 2541 uint32_t nblocks, nmsgs;
2541 2542 char qname[TASKQ_NAMELEN];
2542 2543
2543 2544 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2544 2545 "==> nxge_map_txdma_channel_buf_ring"));
2545 2546
2546 2547 dma_bufp = tmp_bufp = *dma_buf_p;
2547 2548 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2548 2549 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2549 2550 "chunks bufp $%p",
2550 2551 channel, num_chunks, dma_bufp));
2551 2552
2552 2553 nmsgs = 0;
2553 2554 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2554 2555 nmsgs += tmp_bufp->nblocks;
2555 2556 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2556 2557 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2557 2558 "bufp $%p nblocks %d nmsgs %d",
2558 2559 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2559 2560 }
2560 2561 if (!nmsgs) {
2561 2562 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2562 2563 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2563 2564 "no msg blocks",
2564 2565 channel));
2565 2566 status = NXGE_ERROR;
2566 2567 goto nxge_map_txdma_channel_buf_ring_exit;
2567 2568 }
2568 2569
2569 2570 tx_ring_p = (p_tx_ring_t)
2570 2571 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2571 2572 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2572 2573 (void *)nxgep->interrupt_cookie);
2573 2574
2574 2575 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2575 2576 tx_ring_p->tx_ring_busy = B_FALSE;
2576 2577 tx_ring_p->nxgep = nxgep;
2577 2578 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2578 2579 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2579 2580 nxgep->instance, channel);
2580 2581 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2581 2582 TASKQ_DEFAULTPRI, 0);
2582 2583 if (tx_ring_p->taskq == NULL) {
2583 2584 goto nxge_map_txdma_channel_buf_ring_fail1;
2584 2585 }
2585 2586
2586 2587 /*
2587 2588 * Allocate transmit message rings and handles for packets
2588 2589 * not to be copied to premapped buffers.
2589 2590 */
2590 2591 size = nmsgs * sizeof (tx_msg_t);
2591 2592 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2592 2593 for (i = 0; i < nmsgs; i++) {
2593 2594 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2594 2595 DDI_DMA_DONTWAIT, 0,
2595 2596 &tx_msg_ring[i].dma_handle);
2596 2597 if (ddi_status != DDI_SUCCESS) {
2597 2598 status |= NXGE_DDI_FAILED;
2598 2599 break;
2599 2600 }
2600 2601 }
2601 2602 if (i < nmsgs) {
2602 2603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2603 2604 "Allocate handles failed."));
2604 2605 goto nxge_map_txdma_channel_buf_ring_fail1;
2605 2606 }
2606 2607
2607 2608 tx_ring_p->tdc = channel;
2608 2609 tx_ring_p->tx_msg_ring = tx_msg_ring;
2609 2610 tx_ring_p->tx_ring_size = nmsgs;
2610 2611 tx_ring_p->num_chunks = num_chunks;
2611 2612 if (!nxge_tx_intr_thres) {
2612 2613 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2613 2614 }
2614 2615 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2615 2616 tx_ring_p->rd_index = 0;
2616 2617 tx_ring_p->wr_index = 0;
2617 2618 tx_ring_p->ring_head.value = 0;
2618 2619 tx_ring_p->ring_kick_tail.value = 0;
2619 2620 tx_ring_p->descs_pending = 0;
2620 2621
2621 2622 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2622 2623 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2623 2624 "actual tx desc max %d nmsgs %d "
2624 2625 "(config nxge_tx_ring_size %d)",
2625 2626 channel, tx_ring_p->tx_ring_size, nmsgs,
2626 2627 nxge_tx_ring_size));
2627 2628
2628 2629 /*
2629 2630 * Map in buffers from the buffer pool.
2630 2631 */
2631 2632 index = 0;
2632 2633 bsize = dma_bufp->block_size;
2633 2634
2634 2635 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2635 2636 "dma_bufp $%p tx_rng_p $%p "
2636 2637 "tx_msg_rng_p $%p bsize %d",
2637 2638 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2638 2639
2639 2640 tx_buf_dma_handle = dma_bufp->dma_handle;
2640 2641 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2641 2642 bsize = dma_bufp->block_size;
2642 2643 nblocks = dma_bufp->nblocks;
2643 2644 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2644 2645 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2645 2646 "size %d dma_bufp $%p",
2646 2647 i, sizeof (nxge_dma_common_t), dma_bufp));
2647 2648
2648 2649 for (j = 0; j < nblocks; j++) {
2649 2650 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2650 2651 dmap = &tx_msg_ring[index++].buf_dma;
2651 2652 #ifdef TX_MEM_DEBUG
2652 2653 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2653 2654 "==> nxge_map_txdma_channel_buf_ring: j %d"
2654 2655 "dmap $%p", i, dmap));
2655 2656 #endif
2656 2657 nxge_setup_dma_common(dmap, dma_bufp, 1,
2657 2658 bsize);
2658 2659 }
2659 2660 }
2660 2661
2661 2662 if (i < num_chunks) {
2662 2663 status = NXGE_ERROR;
2663 2664 goto nxge_map_txdma_channel_buf_ring_fail1;
2664 2665 }
2665 2666
2666 2667 *tx_desc_p = tx_ring_p;
2667 2668
2668 2669 goto nxge_map_txdma_channel_buf_ring_exit;
2669 2670
2670 2671 nxge_map_txdma_channel_buf_ring_fail1:
2671 2672 if (tx_ring_p->taskq) {
2672 2673 ddi_taskq_destroy(tx_ring_p->taskq);
2673 2674 tx_ring_p->taskq = NULL;
2674 2675 }
2675 2676
2676 2677 index--;
2677 2678 for (; index >= 0; index--) {
2678 2679 if (tx_msg_ring[index].dma_handle != NULL) {
2679 2680 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2680 2681 }
2681 2682 }
2682 2683 MUTEX_DESTROY(&tx_ring_p->lock);
2683 2684 KMEM_FREE(tx_msg_ring, size);
2684 2685 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2685 2686
2686 2687 status = NXGE_ERROR;
2687 2688
2688 2689 nxge_map_txdma_channel_buf_ring_exit:
2689 2690 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2690 2691 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2691 2692
2692 2693 return (status);
2693 2694 }
2694 2695
2695 2696 /*ARGSUSED*/
2696 2697 static void
2697 2698 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2698 2699 {
2699 2700 p_tx_msg_t tx_msg_ring;
2700 2701 p_tx_msg_t tx_msg_p;
2701 2702 int i;
2702 2703
2703 2704 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2704 2705 "==> nxge_unmap_txdma_channel_buf_ring"));
2705 2706 if (tx_ring_p == NULL) {
2706 2707 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2707 2708 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2708 2709 return;
2709 2710 }
2710 2711 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2711 2712 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2712 2713 tx_ring_p->tdc));
2713 2714
2714 2715 tx_msg_ring = tx_ring_p->tx_msg_ring;
2715 2716
2716 2717 /*
2717 2718 * Since the serialization thread, timer thread and
2718 2719 * interrupt thread can all call the transmit reclaim,
2719 2720 * the unmapping function needs to acquire the lock
2720 2721 * to free those buffers which were transmitted
2721 2722 * by the hardware already.
2722 2723 */
2723 2724 MUTEX_ENTER(&tx_ring_p->lock);
2724 2725 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2725 2726 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2726 2727 "channel %d",
2727 2728 tx_ring_p->tdc));
2728 2729 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2729 2730
2730 2731 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2731 2732 tx_msg_p = &tx_msg_ring[i];
2732 2733 if (tx_msg_p->tx_message != NULL) {
2733 2734 freemsg(tx_msg_p->tx_message);
2734 2735 tx_msg_p->tx_message = NULL;
2735 2736 }
2736 2737 }
2737 2738
2738 2739 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2739 2740 if (tx_msg_ring[i].dma_handle != NULL) {
2740 2741 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2741 2742 }
2742 2743 tx_msg_ring[i].dma_handle = NULL;
2743 2744 }
2744 2745
2745 2746 MUTEX_EXIT(&tx_ring_p->lock);
2746 2747
2747 2748 if (tx_ring_p->taskq) {
2748 2749 ddi_taskq_destroy(tx_ring_p->taskq);
2749 2750 tx_ring_p->taskq = NULL;
2750 2751 }
2751 2752
2752 2753 MUTEX_DESTROY(&tx_ring_p->lock);
2753 2754 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2754 2755 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2755 2756
2756 2757 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2757 2758 "<== nxge_unmap_txdma_channel_buf_ring"));
2758 2759 }
2759 2760
2760 2761 static nxge_status_t
2761 2762 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2762 2763 {
2763 2764 p_tx_rings_t tx_rings;
2764 2765 p_tx_ring_t *tx_desc_rings;
2765 2766 p_tx_mbox_areas_t tx_mbox_areas_p;
2766 2767 p_tx_mbox_t *tx_mbox_p;
2767 2768 nxge_status_t status = NXGE_OK;
2768 2769
2769 2770 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2770 2771
2771 2772 tx_rings = nxgep->tx_rings;
2772 2773 if (tx_rings == NULL) {
2773 2774 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2774 2775 "<== nxge_txdma_hw_start: NULL ring pointer"));
2775 2776 return (NXGE_ERROR);
2776 2777 }
2777 2778 tx_desc_rings = tx_rings->rings;
2778 2779 if (tx_desc_rings == NULL) {
2779 2780 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2780 2781 "<== nxge_txdma_hw_start: NULL ring pointers"));
2781 2782 return (NXGE_ERROR);
2782 2783 }
2783 2784
2784 2785 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2785 2786 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2786 2787
2787 2788 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2788 2789 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2789 2790
2790 2791 status = nxge_txdma_start_channel(nxgep, channel,
2791 2792 (p_tx_ring_t)tx_desc_rings[channel],
2792 2793 (p_tx_mbox_t)tx_mbox_p[channel]);
2793 2794 if (status != NXGE_OK) {
2794 2795 goto nxge_txdma_hw_start_fail1;
2795 2796 }
2796 2797
2797 2798 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2798 2799 "tx_rings $%p rings $%p",
2799 2800 nxgep->tx_rings, nxgep->tx_rings->rings));
2800 2801 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2801 2802 "tx_rings $%p tx_desc_rings $%p",
2802 2803 nxgep->tx_rings, tx_desc_rings));
2803 2804
2804 2805 goto nxge_txdma_hw_start_exit;
2805 2806
2806 2807 nxge_txdma_hw_start_fail1:
2807 2808 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2808 2809 "==> nxge_txdma_hw_start: disable "
2809 2810 "(status 0x%x channel %d)", status, channel));
2810 2811
2811 2812 nxge_txdma_hw_start_exit:
2812 2813 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2813 2814 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2814 2815
2815 2816 return (status);
2816 2817 }
2817 2818
2818 2819 /*
2819 2820 * nxge_txdma_start_channel
2820 2821 *
2821 2822 * Start a TDC.
2822 2823 *
2823 2824 * Arguments:
2824 2825 * nxgep
2825 2826 * channel The channel to start.
2826 2827 * tx_ring_p channel's transmit descriptor ring.
2827 2828 * tx_mbox_p channel' smailbox.
2828 2829 *
2829 2830 * Notes:
2830 2831 *
2831 2832 * NPI/NXGE function calls:
2832 2833 * nxge_reset_txdma_channel()
2833 2834 * nxge_init_txdma_channel_event_mask()
2834 2835 * nxge_enable_txdma_channel()
2835 2836 *
2836 2837 * Registers accessed:
2837 2838 * none directly (see functions above).
2838 2839 *
2839 2840 * Context:
2840 2841 * Any domain
2841 2842 */
2842 2843 static nxge_status_t
2843 2844 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2844 2845 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2845 2846
2846 2847 {
2847 2848 nxge_status_t status = NXGE_OK;
2848 2849
2849 2850 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2850 2851 "==> nxge_txdma_start_channel (channel %d)", channel));
2851 2852 /*
2852 2853 * TXDMA/TXC must be in stopped state.
2853 2854 */
2854 2855 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2855 2856
2856 2857 /*
2857 2858 * Reset TXDMA channel
2858 2859 */
2859 2860 tx_ring_p->tx_cs.value = 0;
2860 2861 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2861 2862 status = nxge_reset_txdma_channel(nxgep, channel,
2862 2863 tx_ring_p->tx_cs.value);
2863 2864 if (status != NXGE_OK) {
2864 2865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2865 2866 "==> nxge_txdma_start_channel (channel %d)"
2866 2867 " reset channel failed 0x%x", channel, status));
2867 2868 goto nxge_txdma_start_channel_exit;
2868 2869 }
2869 2870
2870 2871 /*
2871 2872 * Initialize the TXDMA channel specific FZC control
2872 2873 * configurations. These FZC registers are pertaining
2873 2874 * to each TX channel (i.e. logical pages).
2874 2875 */
2875 2876 if (!isLDOMguest(nxgep)) {
2876 2877 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2877 2878 tx_ring_p, tx_mbox_p);
2878 2879 if (status != NXGE_OK) {
2879 2880 goto nxge_txdma_start_channel_exit;
2880 2881 }
2881 2882 }
2882 2883
2883 2884 /*
2884 2885 * Initialize the event masks.
2885 2886 */
2886 2887 tx_ring_p->tx_evmask.value = 0;
2887 2888 status = nxge_init_txdma_channel_event_mask(nxgep,
2888 2889 channel, &tx_ring_p->tx_evmask);
2889 2890 if (status != NXGE_OK) {
2890 2891 goto nxge_txdma_start_channel_exit;
2891 2892 }
2892 2893
2893 2894 /*
2894 2895 * Load TXDMA descriptors, buffers, mailbox,
2895 2896 * initialise the DMA channels and
2896 2897 * enable each DMA channel.
2897 2898 */
2898 2899 status = nxge_enable_txdma_channel(nxgep, channel,
2899 2900 tx_ring_p, tx_mbox_p);
2900 2901 if (status != NXGE_OK) {
2901 2902 goto nxge_txdma_start_channel_exit;
2902 2903 }
2903 2904
2904 2905 nxge_txdma_start_channel_exit:
2905 2906 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2906 2907
2907 2908 return (status);
2908 2909 }
2909 2910
2910 2911 /*
2911 2912 * nxge_txdma_stop_channel
2912 2913 *
2913 2914 * Stop a TDC.
2914 2915 *
2915 2916 * Arguments:
2916 2917 * nxgep
2917 2918 * channel The channel to stop.
2918 2919 * tx_ring_p channel's transmit descriptor ring.
2919 2920 * tx_mbox_p channel' smailbox.
2920 2921 *
2921 2922 * Notes:
2922 2923 *
2923 2924 * NPI/NXGE function calls:
2924 2925 * nxge_txdma_stop_inj_err()
2925 2926 * nxge_reset_txdma_channel()
2926 2927 * nxge_init_txdma_channel_event_mask()
2927 2928 * nxge_init_txdma_channel_cntl_stat()
2928 2929 * nxge_disable_txdma_channel()
2929 2930 *
2930 2931 * Registers accessed:
2931 2932 * none directly (see functions above).
2932 2933 *
2933 2934 * Context:
2934 2935 * Any domain
2935 2936 */
2936 2937 /*ARGSUSED*/
2937 2938 static nxge_status_t
2938 2939 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2939 2940 {
2940 2941 p_tx_ring_t tx_ring_p;
2941 2942 int status = NXGE_OK;
2942 2943
2943 2944 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2944 2945 "==> nxge_txdma_stop_channel: channel %d", channel));
2945 2946
2946 2947 /*
2947 2948 * Stop (disable) TXDMA and TXC (if stop bit is set
2948 2949 * and STOP_N_GO bit not set, the TXDMA reset state will
2949 2950 * not be set if reset TXDMA.
2950 2951 */
2951 2952 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2952 2953
2953 2954 if (nxgep->tx_rings == NULL) {
2954 2955 status = NXGE_ERROR;
2955 2956 goto nxge_txdma_stop_channel_exit;
2956 2957 }
2957 2958
2958 2959 tx_ring_p = nxgep->tx_rings->rings[channel];
2959 2960 if (tx_ring_p == NULL) {
2960 2961 status = NXGE_ERROR;
2961 2962 goto nxge_txdma_stop_channel_exit;
2962 2963 }
2963 2964
2964 2965 /*
2965 2966 * Reset TXDMA channel
2966 2967 */
2967 2968 tx_ring_p->tx_cs.value = 0;
2968 2969 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2969 2970 status = nxge_reset_txdma_channel(nxgep, channel,
2970 2971 tx_ring_p->tx_cs.value);
2971 2972 if (status != NXGE_OK) {
2972 2973 goto nxge_txdma_stop_channel_exit;
2973 2974 }
2974 2975
2975 2976 #ifdef HARDWARE_REQUIRED
2976 2977 /* Set up the interrupt event masks. */
2977 2978 tx_ring_p->tx_evmask.value = 0;
2978 2979 status = nxge_init_txdma_channel_event_mask(nxgep,
2979 2980 channel, &tx_ring_p->tx_evmask);
2980 2981 if (status != NXGE_OK) {
2981 2982 goto nxge_txdma_stop_channel_exit;
2982 2983 }
2983 2984
2984 2985 /* Initialize the DMA control and status register */
2985 2986 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2986 2987 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2987 2988 tx_ring_p->tx_cs.value);
2988 2989 if (status != NXGE_OK) {
2989 2990 goto nxge_txdma_stop_channel_exit;
2990 2991 }
2991 2992
2992 2993 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2993 2994
2994 2995 /* Disable channel */
2995 2996 status = nxge_disable_txdma_channel(nxgep, channel,
2996 2997 tx_ring_p, tx_mbox_p);
2997 2998 if (status != NXGE_OK) {
2998 2999 goto nxge_txdma_start_channel_exit;
2999 3000 }
3000 3001
3001 3002 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3002 3003 "==> nxge_txdma_stop_channel: event done"));
3003 3004
3004 3005 #endif
3005 3006
3006 3007 nxge_txdma_stop_channel_exit:
3007 3008 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3008 3009 return (status);
3009 3010 }
3010 3011
3011 3012 /*
3012 3013 * nxge_txdma_get_ring
3013 3014 *
3014 3015 * Get the ring for a TDC.
3015 3016 *
3016 3017 * Arguments:
3017 3018 * nxgep
3018 3019 * channel
3019 3020 *
3020 3021 * Notes:
3021 3022 *
3022 3023 * NPI/NXGE function calls:
3023 3024 *
3024 3025 * Registers accessed:
3025 3026 *
3026 3027 * Context:
3027 3028 * Any domain
3028 3029 */
3029 3030 static p_tx_ring_t
3030 3031 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3031 3032 {
3032 3033 nxge_grp_set_t *set = &nxgep->tx_set;
3033 3034 int tdc;
3034 3035
3035 3036 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3036 3037
3037 3038 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3038 3039 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3039 3040 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3040 3041 goto return_null;
3041 3042 }
3042 3043
3043 3044 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3044 3045 if ((1 << tdc) & set->owned.map) {
3045 3046 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3046 3047 if (ring) {
3047 3048 if (channel == ring->tdc) {
3048 3049 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3049 3050 "<== nxge_txdma_get_ring: "
3050 3051 "tdc %d ring $%p", tdc, ring));
3051 3052 return (ring);
3052 3053 }
3053 3054 }
3054 3055 }
3055 3056 }
3056 3057
3057 3058 return_null:
3058 3059 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3059 3060 "ring not found"));
3060 3061
3061 3062 return (NULL);
3062 3063 }
3063 3064
3064 3065 /*
3065 3066 * nxge_txdma_get_mbox
3066 3067 *
3067 3068 * Get the mailbox for a TDC.
3068 3069 *
3069 3070 * Arguments:
3070 3071 * nxgep
3071 3072 * channel
3072 3073 *
3073 3074 * Notes:
3074 3075 *
3075 3076 * NPI/NXGE function calls:
3076 3077 *
3077 3078 * Registers accessed:
3078 3079 *
3079 3080 * Context:
3080 3081 * Any domain
3081 3082 */
3082 3083 static p_tx_mbox_t
3083 3084 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3084 3085 {
3085 3086 nxge_grp_set_t *set = &nxgep->tx_set;
3086 3087 int tdc;
3087 3088
3088 3089 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3089 3090
3090 3091 if (nxgep->tx_mbox_areas_p == 0 ||
3091 3092 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3092 3093 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3093 3094 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3094 3095 goto return_null;
3095 3096 }
3096 3097
3097 3098 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3098 3099 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3099 3100 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3100 3101 goto return_null;
3101 3102 }
3102 3103
3103 3104 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3104 3105 if ((1 << tdc) & set->owned.map) {
3105 3106 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3106 3107 if (ring) {
3107 3108 if (channel == ring->tdc) {
3108 3109 tx_mbox_t *mailbox = nxgep->
3109 3110 tx_mbox_areas_p->
3110 3111 txmbox_areas_p[tdc];
3111 3112 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3112 3113 "<== nxge_txdma_get_mbox: tdc %d "
3113 3114 "ring $%p", tdc, mailbox));
3114 3115 return (mailbox);
3115 3116 }
3116 3117 }
3117 3118 }
3118 3119 }
3119 3120
3120 3121 return_null:
3121 3122 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3122 3123 "mailbox not found"));
3123 3124
3124 3125 return (NULL);
3125 3126 }
3126 3127
3127 3128 /*
3128 3129 * nxge_tx_err_evnts
3129 3130 *
3130 3131 * Recover a TDC.
3131 3132 *
3132 3133 * Arguments:
3133 3134 * nxgep
3134 3135 * index The index to the TDC ring.
3135 3136 * ldvp Used to get the channel number ONLY.
3136 3137 * cs A copy of the bits from TX_CS.
3137 3138 *
3138 3139 * Notes:
3139 3140 * Calling tree:
3140 3141 * nxge_tx_intr()
3141 3142 *
3142 3143 * NPI/NXGE function calls:
3143 3144 * npi_txdma_ring_error_get()
3144 3145 * npi_txdma_inj_par_error_get()
3145 3146 * nxge_txdma_fatal_err_recover()
3146 3147 *
3147 3148 * Registers accessed:
3148 3149 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3149 3150 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3150 3151 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3151 3152 *
3152 3153 * Context:
3153 3154 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3154 3155 */
3155 3156 /*ARGSUSED*/
3156 3157 static nxge_status_t
3157 3158 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3158 3159 {
3159 3160 npi_handle_t handle;
3160 3161 npi_status_t rs;
3161 3162 uint8_t channel;
3162 3163 p_tx_ring_t *tx_rings;
3163 3164 p_tx_ring_t tx_ring_p;
3164 3165 p_nxge_tx_ring_stats_t tdc_stats;
3165 3166 boolean_t txchan_fatal = B_FALSE;
3166 3167 nxge_status_t status = NXGE_OK;
3167 3168 tdmc_inj_par_err_t par_err;
3168 3169 uint32_t value;
3169 3170
3170 3171 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3171 3172 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3172 3173 channel = ldvp->channel;
3173 3174
3174 3175 tx_rings = nxgep->tx_rings->rings;
3175 3176 tx_ring_p = tx_rings[index];
3176 3177 tdc_stats = tx_ring_p->tdc_stats;
3177 3178 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3178 3179 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3179 3180 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3180 3181 if ((rs = npi_txdma_ring_error_get(handle, channel,
3181 3182 &tdc_stats->errlog)) != NPI_SUCCESS)
3182 3183 return (NXGE_ERROR | rs);
3183 3184 }
3184 3185
3185 3186 if (cs.bits.ldw.mbox_err) {
3186 3187 tdc_stats->mbox_err++;
3187 3188 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3188 3189 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3189 3190 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3190 3191 "==> nxge_tx_err_evnts(channel %d): "
3191 3192 "fatal error: mailbox", channel));
3192 3193 txchan_fatal = B_TRUE;
3193 3194 }
3194 3195 if (cs.bits.ldw.pkt_size_err) {
3195 3196 tdc_stats->pkt_size_err++;
3196 3197 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3197 3198 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3198 3199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3199 3200 "==> nxge_tx_err_evnts(channel %d): "
3200 3201 "fatal error: pkt_size_err", channel));
3201 3202 txchan_fatal = B_TRUE;
3202 3203 }
3203 3204 if (cs.bits.ldw.tx_ring_oflow) {
3204 3205 tdc_stats->tx_ring_oflow++;
3205 3206 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3206 3207 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3207 3208 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3208 3209 "==> nxge_tx_err_evnts(channel %d): "
3209 3210 "fatal error: tx_ring_oflow", channel));
3210 3211 txchan_fatal = B_TRUE;
3211 3212 }
3212 3213 if (cs.bits.ldw.pref_buf_par_err) {
3213 3214 tdc_stats->pre_buf_par_err++;
3214 3215 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3215 3216 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3216 3217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3217 3218 "==> nxge_tx_err_evnts(channel %d): "
3218 3219 "fatal error: pre_buf_par_err", channel));
3219 3220 /* Clear error injection source for parity error */
3220 3221 (void) npi_txdma_inj_par_error_get(handle, &value);
3221 3222 par_err.value = value;
3222 3223 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3223 3224 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3224 3225 txchan_fatal = B_TRUE;
3225 3226 }
3226 3227 if (cs.bits.ldw.nack_pref) {
3227 3228 tdc_stats->nack_pref++;
3228 3229 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3229 3230 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3230 3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3231 3232 "==> nxge_tx_err_evnts(channel %d): "
3232 3233 "fatal error: nack_pref", channel));
3233 3234 txchan_fatal = B_TRUE;
3234 3235 }
3235 3236 if (cs.bits.ldw.nack_pkt_rd) {
3236 3237 tdc_stats->nack_pkt_rd++;
3237 3238 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3238 3239 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3239 3240 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3240 3241 "==> nxge_tx_err_evnts(channel %d): "
3241 3242 "fatal error: nack_pkt_rd", channel));
3242 3243 txchan_fatal = B_TRUE;
3243 3244 }
3244 3245 if (cs.bits.ldw.conf_part_err) {
3245 3246 tdc_stats->conf_part_err++;
3246 3247 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3247 3248 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3248 3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3249 3250 "==> nxge_tx_err_evnts(channel %d): "
3250 3251 "fatal error: config_partition_err", channel));
3251 3252 txchan_fatal = B_TRUE;
3252 3253 }
3253 3254 if (cs.bits.ldw.pkt_prt_err) {
3254 3255 tdc_stats->pkt_part_err++;
3255 3256 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3256 3257 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3257 3258 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3258 3259 "==> nxge_tx_err_evnts(channel %d): "
3259 3260 "fatal error: pkt_prt_err", channel));
3260 3261 txchan_fatal = B_TRUE;
3261 3262 }
3262 3263
3263 3264 /* Clear error injection source in case this is an injected error */
3264 3265 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3265 3266
3266 3267 if (txchan_fatal) {
3267 3268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3268 3269 " nxge_tx_err_evnts: "
3269 3270 " fatal error on channel %d cs 0x%llx\n",
3270 3271 channel, cs.value));
3271 3272 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3272 3273 tx_ring_p);
3273 3274 if (status == NXGE_OK) {
3274 3275 FM_SERVICE_RESTORED(nxgep);
3275 3276 }
3276 3277 }
3277 3278
3278 3279 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3279 3280
3280 3281 return (status);
3281 3282 }
3282 3283
3283 3284 static nxge_status_t
3284 3285 nxge_txdma_fatal_err_recover(
3285 3286 p_nxge_t nxgep,
3286 3287 uint16_t channel,
3287 3288 p_tx_ring_t tx_ring_p)
3288 3289 {
3289 3290 npi_handle_t handle;
3290 3291 npi_status_t rs = NPI_SUCCESS;
3291 3292 p_tx_mbox_t tx_mbox_p;
3292 3293 nxge_status_t status = NXGE_OK;
3293 3294
3294 3295 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3295 3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3296 3297 "Recovering from TxDMAChannel#%d error...", channel));
3297 3298
3298 3299 /*
3299 3300 * Stop the dma channel waits for the stop done.
3300 3301 * If the stop done bit is not set, then create
3301 3302 * an error.
3302 3303 */
3303 3304
3304 3305 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3305 3306 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3306 3307 MUTEX_ENTER(&tx_ring_p->lock);
3307 3308 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3308 3309 if (rs != NPI_SUCCESS) {
3309 3310 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3310 3311 "==> nxge_txdma_fatal_err_recover (channel %d): "
3311 3312 "stop failed ", channel));
3312 3313 goto fail;
3313 3314 }
3314 3315
3315 3316 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3316 3317 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3317 3318
3318 3319 /*
3319 3320 * Reset TXDMA channel
3320 3321 */
3321 3322 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3322 3323 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3323 3324 NPI_SUCCESS) {
3324 3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3325 3326 "==> nxge_txdma_fatal_err_recover (channel %d)"
3326 3327 " reset channel failed 0x%x", channel, rs));
3327 3328 goto fail;
3328 3329 }
3329 3330
3330 3331 /*
3331 3332 * Reset the tail (kick) register to 0.
3332 3333 * (Hardware will not reset it. Tx overflow fatal
3333 3334 * error if tail is not set to 0 after reset!
3334 3335 */
3335 3336 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3336 3337
3337 3338 /* Restart TXDMA channel */
3338 3339
3339 3340 if (!isLDOMguest(nxgep)) {
3340 3341 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3341 3342
3342 3343 // XXX This is a problem in HIO!
3343 3344 /*
3344 3345 * Initialize the TXDMA channel specific FZC control
3345 3346 * configurations. These FZC registers are pertaining
3346 3347 * to each TX channel (i.e. logical pages).
3347 3348 */
3348 3349 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3349 3350 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3350 3351 tx_ring_p, tx_mbox_p);
3351 3352 if (status != NXGE_OK)
3352 3353 goto fail;
3353 3354 }
3354 3355
3355 3356 /*
3356 3357 * Initialize the event masks.
3357 3358 */
3358 3359 tx_ring_p->tx_evmask.value = 0;
3359 3360 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3360 3361 &tx_ring_p->tx_evmask);
3361 3362 if (status != NXGE_OK)
3362 3363 goto fail;
3363 3364
3364 3365 tx_ring_p->wr_index_wrap = B_FALSE;
3365 3366 tx_ring_p->wr_index = 0;
3366 3367 tx_ring_p->rd_index = 0;
3367 3368
3368 3369 /*
3369 3370 * Load TXDMA descriptors, buffers, mailbox,
3370 3371 * initialise the DMA channels and
3371 3372 * enable each DMA channel.
3372 3373 */
3373 3374 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3374 3375 status = nxge_enable_txdma_channel(nxgep, channel,
3375 3376 tx_ring_p, tx_mbox_p);
3376 3377 MUTEX_EXIT(&tx_ring_p->lock);
3377 3378 if (status != NXGE_OK)
3378 3379 goto fail;
3379 3380
3380 3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3381 3382 "Recovery Successful, TxDMAChannel#%d Restored",
3382 3383 channel));
3383 3384 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3384 3385
3385 3386 return (NXGE_OK);
3386 3387
3387 3388 fail:
3388 3389 MUTEX_EXIT(&tx_ring_p->lock);
3389 3390
3390 3391 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3391 3392 "nxge_txdma_fatal_err_recover (channel %d): "
3392 3393 "failed to recover this txdma channel", channel));
3393 3394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3394 3395
3395 3396 return (status);
3396 3397 }
3397 3398
3398 3399 /*
3399 3400 * nxge_tx_port_fatal_err_recover
3400 3401 *
3401 3402 * Attempt to recover from a fatal port error.
3402 3403 *
3403 3404 * Arguments:
3404 3405 * nxgep
3405 3406 *
3406 3407 * Notes:
3407 3408 * How would a guest do this?
3408 3409 *
3409 3410 * NPI/NXGE function calls:
3410 3411 *
3411 3412 * Registers accessed:
3412 3413 *
3413 3414 * Context:
3414 3415 * Service domain
3415 3416 */
3416 3417 nxge_status_t
3417 3418 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3418 3419 {
3419 3420 nxge_grp_set_t *set = &nxgep->tx_set;
3420 3421 nxge_channel_t tdc;
3421 3422
3422 3423 tx_ring_t *ring;
3423 3424 tx_mbox_t *mailbox;
3424 3425
3425 3426 npi_handle_t handle;
3426 3427 nxge_status_t status;
3427 3428 npi_status_t rs;
3428 3429
3429 3430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3430 3431 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3431 3432 "Recovering from TxPort error..."));
3432 3433
3433 3434 if (isLDOMguest(nxgep)) {
3434 3435 return (NXGE_OK);
3435 3436 }
3436 3437
3437 3438 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3438 3439 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3439 3440 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3440 3441 return (NXGE_ERROR);
3441 3442 }
3442 3443
3443 3444 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3444 3445 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3445 3446 "<== nxge_tx_port_fatal_err_recover: "
3446 3447 "NULL ring pointer(s)"));
3447 3448 return (NXGE_ERROR);
3448 3449 }
3449 3450
3450 3451 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3451 3452 if ((1 << tdc) & set->owned.map) {
3452 3453 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3453 3454 if (ring)
3454 3455 MUTEX_ENTER(&ring->lock);
3455 3456 }
3456 3457 }
3457 3458
3458 3459 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3459 3460
3460 3461 /*
3461 3462 * Stop all the TDCs owned by us.
3462 3463 * (The shared TDCs will have been stopped by their owners.)
3463 3464 */
3464 3465 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3465 3466 if ((1 << tdc) & set->owned.map) {
3466 3467 ring = nxgep->tx_rings->rings[tdc];
3467 3468 if (ring) {
3468 3469 rs = npi_txdma_channel_control
3469 3470 (handle, TXDMA_STOP, tdc);
3470 3471 if (rs != NPI_SUCCESS) {
3471 3472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3472 3473 "nxge_tx_port_fatal_err_recover "
3473 3474 "(channel %d): stop failed ", tdc));
3474 3475 goto fail;
3475 3476 }
3476 3477 }
3477 3478 }
3478 3479 }
3479 3480
3480 3481 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3481 3482
3482 3483 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3483 3484 if ((1 << tdc) & set->owned.map) {
3484 3485 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3485 3486 if (ring) {
3486 3487 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3487 3488 }
3488 3489 }
3489 3490 }
3490 3491
3491 3492 /*
3492 3493 * Reset all the TDCs.
3493 3494 */
3494 3495 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3495 3496
3496 3497 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3497 3498 if ((1 << tdc) & set->owned.map) {
3498 3499 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3499 3500 if (ring) {
3500 3501 if ((rs = npi_txdma_channel_control
3501 3502 (handle, TXDMA_RESET, tdc))
3502 3503 != NPI_SUCCESS) {
3503 3504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3504 3505 "nxge_tx_port_fatal_err_recover "
3505 3506 "(channel %d) reset channel "
3506 3507 "failed 0x%x", tdc, rs));
3507 3508 goto fail;
3508 3509 }
3509 3510 }
3510 3511 /*
3511 3512 * Reset the tail (kick) register to 0.
3512 3513 * (Hardware will not reset it. Tx overflow fatal
3513 3514 * error if tail is not set to 0 after reset!
3514 3515 */
3515 3516 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3516 3517 }
3517 3518 }
3518 3519
3519 3520 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3520 3521
3521 3522 /* Restart all the TDCs */
3522 3523 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3523 3524 if ((1 << tdc) & set->owned.map) {
3524 3525 ring = nxgep->tx_rings->rings[tdc];
3525 3526 if (ring) {
3526 3527 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3527 3528 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3528 3529 ring, mailbox);
3529 3530 ring->tx_evmask.value = 0;
3530 3531 /*
3531 3532 * Initialize the event masks.
3532 3533 */
3533 3534 status = nxge_init_txdma_channel_event_mask
3534 3535 (nxgep, tdc, &ring->tx_evmask);
3535 3536
3536 3537 ring->wr_index_wrap = B_FALSE;
3537 3538 ring->wr_index = 0;
3538 3539 ring->rd_index = 0;
3539 3540
3540 3541 if (status != NXGE_OK)
3541 3542 goto fail;
3542 3543 if (status != NXGE_OK)
3543 3544 goto fail;
3544 3545 }
3545 3546 }
3546 3547 }
3547 3548
3548 3549 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3549 3550
3550 3551 /* Re-enable all the TDCs */
3551 3552 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3552 3553 if ((1 << tdc) & set->owned.map) {
3553 3554 ring = nxgep->tx_rings->rings[tdc];
3554 3555 if (ring) {
3555 3556 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3556 3557 status = nxge_enable_txdma_channel(nxgep, tdc,
3557 3558 ring, mailbox);
3558 3559 if (status != NXGE_OK)
3559 3560 goto fail;
3560 3561 }
3561 3562 }
3562 3563 }
3563 3564
3564 3565 /*
3565 3566 * Unlock all the TDCs.
3566 3567 */
3567 3568 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3568 3569 if ((1 << tdc) & set->owned.map) {
3569 3570 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3570 3571 if (ring)
3571 3572 MUTEX_EXIT(&ring->lock);
3572 3573 }
3573 3574 }
3574 3575
3575 3576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3576 3577 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3577 3578
3578 3579 return (NXGE_OK);
3579 3580
3580 3581 fail:
3581 3582 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3582 3583 if ((1 << tdc) & set->owned.map) {
3583 3584 ring = nxgep->tx_rings->rings[tdc];
3584 3585 if (ring)
3585 3586 MUTEX_EXIT(&ring->lock);
3586 3587 }
3587 3588 }
3588 3589
3589 3590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3590 3591 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3591 3592
3592 3593 return (status);
3593 3594 }
3594 3595
3595 3596 /*
3596 3597 * nxge_txdma_inject_err
3597 3598 *
3598 3599 * Inject an error into a TDC.
3599 3600 *
3600 3601 * Arguments:
3601 3602 * nxgep
3602 3603 * err_id The error to inject.
3603 3604 * chan The channel to inject into.
3604 3605 *
3605 3606 * Notes:
3606 3607 * This is called from nxge_main.c:nxge_err_inject()
3607 3608 * Has this ioctl ever been used?
3608 3609 *
3609 3610 * NPI/NXGE function calls:
3610 3611 * npi_txdma_inj_par_error_get()
3611 3612 * npi_txdma_inj_par_error_set()
3612 3613 *
3613 3614 * Registers accessed:
3614 3615 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3615 3616 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3616 3617 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3617 3618 *
3618 3619 * Context:
3619 3620 * Service domain
3620 3621 */
3621 3622 void
3622 3623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3623 3624 {
3624 3625 tdmc_intr_dbg_t tdi;
3625 3626 tdmc_inj_par_err_t par_err;
3626 3627 uint32_t value;
3627 3628 npi_handle_t handle;
3628 3629
3629 3630 switch (err_id) {
3630 3631
3631 3632 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3632 3633 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3633 3634 /* Clear error injection source for parity error */
3634 3635 (void) npi_txdma_inj_par_error_get(handle, &value);
3635 3636 par_err.value = value;
3636 3637 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3637 3638 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3638 3639
3639 3640 par_err.bits.ldw.inject_parity_error = (1 << chan);
3640 3641 (void) npi_txdma_inj_par_error_get(handle, &value);
3641 3642 par_err.value = value;
3642 3643 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3643 3644 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3644 3645 (unsigned long long)par_err.value);
3645 3646 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3646 3647 break;
3647 3648
3648 3649 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3649 3650 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3650 3651 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3651 3652 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3652 3653 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3653 3654 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3654 3655 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3655 3656 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3656 3657 chan, &tdi.value);
3657 3658 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3658 3659 tdi.bits.ldw.pref_buf_par_err = 1;
3659 3660 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3660 3661 tdi.bits.ldw.mbox_err = 1;
3661 3662 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3662 3663 tdi.bits.ldw.nack_pref = 1;
3663 3664 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3664 3665 tdi.bits.ldw.nack_pkt_rd = 1;
3665 3666 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3666 3667 tdi.bits.ldw.pkt_size_err = 1;
3667 3668 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3668 3669 tdi.bits.ldw.tx_ring_oflow = 1;
3669 3670 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3670 3671 tdi.bits.ldw.conf_part_err = 1;
3671 3672 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3672 3673 tdi.bits.ldw.pkt_part_err = 1;
3673 3674 #if defined(__i386)
3674 3675 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3675 3676 tdi.value);
3676 3677 #else
3677 3678 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3678 3679 tdi.value);
3679 3680 #endif
3680 3681 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3681 3682 chan, tdi.value);
3682 3683
3683 3684 break;
3684 3685 }
3685 3686 }
↓ open down ↓ |
2589 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX