1 /*
2 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * This driver was derived from the FreeBSD if_msk.c driver, which
7 * bears the following copyright attributions and licenses.
8 */
9
10 /*
11 *
12 * LICENSE:
13 * Copyright (C) Marvell International Ltd. and/or its affiliates
14 *
15 * The computer program files contained in this folder ("Files")
16 * are provided to you under the BSD-type license terms provided
17 * below, and any use of such Files and any derivative works
18 * thereof created by you shall be governed by the following terms
19 * and conditions:
20 *
21 * - Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials provided
26 * with the distribution.
27 * - Neither the name of Marvell nor the names of its contributors
28 * may be used to endorse or promote products derived from this
29 * software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
34 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
35 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
36 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
37 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
42 * OF THE POSSIBILITY OF SUCH DAMAGE.
43 * /LICENSE
44 *
45 */
46 /*
47 * Copyright (c) 1997, 1998, 1999, 2000
48 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. All advertising materials mentioning features or use of this software
59 * must display the following acknowledgement:
60 * This product includes software developed by Bill Paul.
61 * 4. Neither the name of the author nor the names of any co-contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
69 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
70 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
71 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
72 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
73 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
74 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
75 * THE POSSIBILITY OF SUCH DAMAGE.
76 */
77 /*
78 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
79 *
80 * Permission to use, copy, modify, and distribute this software for any
81 * purpose with or without fee is hereby granted, provided that the above
82 * copyright notice and this permission notice appear in all copies.
83 *
84 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
85 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
86 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
87 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
88 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
89 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
90 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
91 */
92
93 #include <sys/varargs.h>
94 #include <sys/types.h>
95 #include <sys/modctl.h>
96 #include <sys/conf.h>
97 #include <sys/devops.h>
98 #include <sys/stream.h>
99 #include <sys/strsun.h>
100 #include <sys/cmn_err.h>
101 #include <sys/ethernet.h>
102 #include <sys/kmem.h>
103 #include <sys/time.h>
104 #include <sys/pci.h>
105 #include <sys/mii.h>
106 #include <sys/miiregs.h>
107 #include <sys/mac.h>
108 #include <sys/mac_ether.h>
109 #include <sys/mac_provider.h>
110 #include <sys/debug.h>
111 #include <sys/note.h>
112 #include <sys/ddi.h>
113 #include <sys/sunddi.h>
114 #include <sys/vlan.h>
115
116 #include "yge.h"
117
118 static struct ddi_device_acc_attr yge_regs_attr = {
119 DDI_DEVICE_ATTR_V0,
120 DDI_STRUCTURE_LE_ACC,
121 DDI_STRICTORDER_ACC
122 };
123
124 static struct ddi_device_acc_attr yge_ring_attr = {
125 DDI_DEVICE_ATTR_V0,
126 DDI_STRUCTURE_LE_ACC,
127 DDI_STRICTORDER_ACC
128 };
129
130 static struct ddi_device_acc_attr yge_buf_attr = {
131 DDI_DEVICE_ATTR_V0,
132 DDI_NEVERSWAP_ACC,
133 DDI_STRICTORDER_ACC
134 };
135
136 #define DESC_ALIGN 0x1000
137
138 static ddi_dma_attr_t yge_ring_dma_attr = {
139 DMA_ATTR_V0, /* dma_attr_version */
140 0, /* dma_attr_addr_lo */
141 0x00000000ffffffffull, /* dma_attr_addr_hi */
142 0x00000000ffffffffull, /* dma_attr_count_max */
143 DESC_ALIGN, /* dma_attr_align */
144 0x000007fc, /* dma_attr_burstsizes */
145 1, /* dma_attr_minxfer */
146 0x00000000ffffffffull, /* dma_attr_maxxfer */
147 0x00000000ffffffffull, /* dma_attr_seg */
148 1, /* dma_attr_sgllen */
149 1, /* dma_attr_granular */
150 0 /* dma_attr_flags */
151 };
152
153 static ddi_dma_attr_t yge_buf_dma_attr = {
154 DMA_ATTR_V0, /* dma_attr_version */
155 0, /* dma_attr_addr_lo */
156 0x00000000ffffffffull, /* dma_attr_addr_hi */
157 0x00000000ffffffffull, /* dma_attr_count_max */
158 1, /* dma_attr_align */
159 0x0000fffc, /* dma_attr_burstsizes */
160 1, /* dma_attr_minxfer */
161 0x000000000000ffffull, /* dma_attr_maxxfer */
162 0x00000000ffffffffull, /* dma_attr_seg */
163 8, /* dma_attr_sgllen */
164 1, /* dma_attr_granular */
165 0 /* dma_attr_flags */
166 };
167
168
169 static int yge_attach(yge_dev_t *);
170 static void yge_detach(yge_dev_t *);
171 static int yge_suspend(yge_dev_t *);
172 static int yge_resume(yge_dev_t *);
173
174 static void yge_reset(yge_dev_t *);
175 static void yge_setup_rambuffer(yge_dev_t *);
176
177 static int yge_init_port(yge_port_t *);
178 static void yge_uninit_port(yge_port_t *);
179 static int yge_register_port(yge_port_t *);
180
181 static void yge_tick(void *);
182 static uint_t yge_intr(caddr_t, caddr_t);
183 static int yge_intr_gmac(yge_port_t *);
184 static void yge_intr_enable(yge_dev_t *);
185 static void yge_intr_disable(yge_dev_t *);
186 static boolean_t yge_handle_events(yge_dev_t *, mblk_t **, mblk_t **, int *);
187 static void yge_handle_hwerr(yge_port_t *, uint32_t);
188 static void yge_intr_hwerr(yge_dev_t *);
189 static mblk_t *yge_rxeof(yge_port_t *, uint32_t, int);
190 static void yge_txeof(yge_port_t *, int);
191 static boolean_t yge_send(yge_port_t *, mblk_t *);
192 static void yge_set_prefetch(yge_dev_t *, int, yge_ring_t *);
193 static void yge_set_rambuffer(yge_port_t *);
194 static void yge_start_port(yge_port_t *);
195 static void yge_stop_port(yge_port_t *);
196 static void yge_phy_power(yge_dev_t *, boolean_t);
197 static int yge_alloc_ring(yge_port_t *, yge_dev_t *, yge_ring_t *, uint32_t);
198 static void yge_free_ring(yge_ring_t *);
199 static uint8_t yge_find_capability(yge_dev_t *, uint8_t);
200
201 static int yge_txrx_dma_alloc(yge_port_t *);
202 static void yge_txrx_dma_free(yge_port_t *);
203 static void yge_init_rx_ring(yge_port_t *);
204 static void yge_init_tx_ring(yge_port_t *);
205
206 static uint16_t yge_mii_readreg(yge_port_t *, uint8_t, uint8_t);
207 static void yge_mii_writereg(yge_port_t *, uint8_t, uint8_t, uint16_t);
208
209 static uint16_t yge_mii_read(void *, uint8_t, uint8_t);
210 static void yge_mii_write(void *, uint8_t, uint8_t, uint16_t);
211 static void yge_mii_notify(void *, link_state_t);
212
213 static void yge_setrxfilt(yge_port_t *);
214 static void yge_restart_task(yge_dev_t *);
215 static void yge_task(void *);
216 static void yge_dispatch(yge_dev_t *, int);
217
218 static void yge_stats_clear(yge_port_t *);
219 static void yge_stats_update(yge_port_t *);
220 static uint32_t yge_hashbit(const uint8_t *);
221
222 static int yge_m_unicst(void *, const uint8_t *);
223 static int yge_m_multicst(void *, boolean_t, const uint8_t *);
224 static int yge_m_promisc(void *, boolean_t);
225 static mblk_t *yge_m_tx(void *, mblk_t *);
226 static int yge_m_stat(void *, uint_t, uint64_t *);
227 static int yge_m_start(void *);
228 static void yge_m_stop(void *);
229 static int yge_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
230 static void yge_m_propinfo(void *, const char *, mac_prop_id_t,
231 mac_prop_info_handle_t);
232 static int yge_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
233 const void *);
234 static void yge_m_ioctl(void *, queue_t *, mblk_t *);
235
236 void yge_error(yge_dev_t *, yge_port_t *, char *, ...);
237 extern void yge_phys_update(yge_port_t *);
238 extern int yge_phys_restart(yge_port_t *, boolean_t);
239 extern int yge_phys_init(yge_port_t *, phy_readreg_t, phy_writereg_t);
240
241 static mac_callbacks_t yge_m_callbacks = {
242 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
243 yge_m_stat,
244 yge_m_start,
245 yge_m_stop,
246 yge_m_promisc,
247 yge_m_multicst,
248 yge_m_unicst,
249 yge_m_tx,
250 NULL,
251 yge_m_ioctl,
252 NULL, /* mc_getcapab */
253 NULL, /* mc_open */
254 NULL, /* mc_close */
255 yge_m_setprop,
256 yge_m_getprop,
257 yge_m_propinfo
258 };
259
260 static mii_ops_t yge_mii_ops = {
261 MII_OPS_VERSION,
262 yge_mii_read,
263 yge_mii_write,
264 yge_mii_notify,
265 NULL /* reset */
266 };
267
268 /*
269 * This is the low level interface routine to read from the PHY
270 * MII registers. There is multiple steps to these accesses. First
271 * the register number is written to an address register. Then after
272 * a specified delay status is checked until the data is present.
273 */
274 static uint16_t
275 yge_mii_readreg(yge_port_t *port, uint8_t phy, uint8_t reg)
276 {
277 yge_dev_t *dev = port->p_dev;
278 int pnum = port->p_port;
279 uint16_t val;
280
281 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
282 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
283
284 for (int i = 0; i < YGE_TIMEOUT; i += 10) {
285 drv_usecwait(10);
286 val = GMAC_READ_2(dev, pnum, GM_SMI_CTRL);
287 if ((val & GM_SMI_CT_RD_VAL) != 0) {
288 val = GMAC_READ_2(dev, pnum, GM_SMI_DATA);
289 return (val);
290 }
291 }
292
293 return (0xffff);
294 }
295
296 /*
297 * This is the low level interface routine to write to the PHY
298 * MII registers. There is multiple steps to these accesses. The
299 * data and the target registers address are written to the PHY.
300 * Then the PHY is polled until it is done with the write. Note
301 * that the delays are specified and required!
302 */
303 static void
304 yge_mii_writereg(yge_port_t *port, uint8_t phy, uint8_t reg, uint16_t val)
305 {
306 yge_dev_t *dev = port->p_dev;
307 int pnum = port->p_port;
308
309 GMAC_WRITE_2(dev, pnum, GM_SMI_DATA, val);
310 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
311 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
312
313 for (int i = 0; i < YGE_TIMEOUT; i += 10) {
314 drv_usecwait(10);
315 if ((GMAC_READ_2(dev, pnum, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0)
316 return;
317 }
318
319 yge_error(NULL, port, "phy write timeout");
320 }
321
322 static uint16_t
323 yge_mii_read(void *arg, uint8_t phy, uint8_t reg)
324 {
325 yge_port_t *port = arg;
326 uint16_t rv;
327
328 PHY_LOCK(port->p_dev);
329 rv = yge_mii_readreg(port, phy, reg);
330 PHY_UNLOCK(port->p_dev);
331 return (rv);
332 }
333
334 static void
335 yge_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
336 {
337 yge_port_t *port = arg;
338
339 PHY_LOCK(port->p_dev);
340 yge_mii_writereg(port, phy, reg, val);
341 PHY_UNLOCK(port->p_dev);
342 }
343
344 /*
345 * The MII common code calls this function to let the MAC driver
346 * know when there has been a change in status.
347 */
348 void
349 yge_mii_notify(void *arg, link_state_t link)
350 {
351 yge_port_t *port = arg;
352 yge_dev_t *dev = port->p_dev;
353 uint32_t gmac;
354 uint32_t gpcr;
355 link_flowctrl_t fc;
356 link_duplex_t duplex;
357 int speed;
358
359 fc = mii_get_flowctrl(port->p_mii);
360 duplex = mii_get_duplex(port->p_mii);
361 speed = mii_get_speed(port->p_mii);
362
363 DEV_LOCK(dev);
364
365 if (link == LINK_STATE_UP) {
366
367 /* Enable Tx FIFO Underrun. */
368 CSR_WRITE_1(dev, MR_ADDR(port->p_port, GMAC_IRQ_MSK),
369 GM_IS_TX_FF_UR | /* TX FIFO underflow */
370 GM_IS_RX_FF_OR); /* RX FIFO overflow */
371
372 gpcr = GM_GPCR_AU_ALL_DIS;
373
374 switch (fc) {
375 case LINK_FLOWCTRL_BI:
376 gmac = GMC_PAUSE_ON;
377 gpcr &= ~(GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS);
378 break;
379 case LINK_FLOWCTRL_TX:
380 gmac = GMC_PAUSE_ON;
381 gpcr |= GM_GPCR_FC_RX_DIS;
382 break;
383 case LINK_FLOWCTRL_RX:
384 gmac = GMC_PAUSE_ON;
385 gpcr |= GM_GPCR_FC_TX_DIS;
386 break;
387 case LINK_FLOWCTRL_NONE:
388 default:
389 gmac = GMC_PAUSE_OFF;
390 gpcr |= GM_GPCR_FC_RX_DIS;
391 gpcr |= GM_GPCR_FC_TX_DIS;
392 break;
393 }
394
395 gpcr &= ~((GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100));
396 switch (speed) {
397 case 1000:
398 gpcr |= GM_GPCR_SPEED_1000;
399 break;
400 case 100:
401 gpcr |= GM_GPCR_SPEED_100;
402 break;
403 case 10:
404 default:
405 break;
406 }
407
408 if (duplex == LINK_DUPLEX_FULL) {
409 gpcr |= GM_GPCR_DUP_FULL;
410 } else {
411 gpcr &= ~(GM_GPCR_DUP_FULL);
412 gmac = GMC_PAUSE_OFF;
413 gpcr |= GM_GPCR_FC_RX_DIS;
414 gpcr |= GM_GPCR_FC_TX_DIS;
415 }
416
417 gpcr |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
418 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
419
420 /* Read again to ensure writing. */
421 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
422
423 /* write out the flow control gmac setting */
424 CSR_WRITE_4(dev, MR_ADDR(port->p_port, GMAC_CTRL), gmac);
425
426 } else {
427 /* Disable Rx/Tx MAC. */
428 gpcr = GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
429 gpcr &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
430 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
431
432 /* Read again to ensure writing. */
433 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
434 }
435
436 DEV_UNLOCK(dev);
437
438 mac_link_update(port->p_mh, link);
439
440 if (port->p_running && (link == LINK_STATE_UP)) {
441 mac_tx_update(port->p_mh);
442 }
443 }
444
445 static void
446 yge_setrxfilt(yge_port_t *port)
447 {
448 yge_dev_t *dev;
449 uint16_t mode;
450 uint8_t *ea;
451 uint32_t *mchash;
452 int pnum;
453
454 dev = port->p_dev;
455 pnum = port->p_port;
456 ea = port->p_curraddr;
457 mchash = port->p_mchash;
458
459 if (dev->d_suspended)
460 return;
461
462 /* Set station address. */
463 for (int i = 0; i < (ETHERADDRL / 2); i++) {
464 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_1L + i * 4,
465 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
466 }
467 for (int i = 0; i < (ETHERADDRL / 2); i++) {
468 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_2L + i * 4,
469 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
470 }
471
472 /* Figure out receive filtering mode. */
473 mode = GMAC_READ_2(dev, pnum, GM_RX_CTRL);
474 if (port->p_promisc) {
475 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
476 } else {
477 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
478 }
479 /* Write the multicast filter. */
480 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H1, mchash[0] & 0xffff);
481 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff);
482 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H3, mchash[1] & 0xffff);
483 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff);
484 /* Write the receive filtering mode. */
485 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, mode);
486 }
487
488 static void
489 yge_init_rx_ring(yge_port_t *port)
490 {
491 yge_buf_t *rxb;
492 yge_ring_t *ring;
493 int prod;
494
495 port->p_rx_cons = 0;
496 port->p_rx_putwm = YGE_PUT_WM;
497 ring = &port->p_rx_ring;
498
499 /* ala bzero, but uses safer acch access */
500 CLEARRING(ring);
501
502 for (prod = 0; prod < YGE_RX_RING_CNT; prod++) {
503 /* Hang out receive buffers. */
504 rxb = &port->p_rx_buf[prod];
505
506 PUTADDR(ring, prod, rxb->b_paddr);
507 PUTCTRL(ring, prod, port->p_framesize | OP_PACKET | HW_OWNER);
508 }
509
510 SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
511
512 yge_set_prefetch(port->p_dev, port->p_rxq, ring);
513
514 /* Update prefetch unit. */
515 CSR_WRITE_2(port->p_dev,
516 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
517 YGE_RX_RING_CNT - 1);
518 }
519
520 static void
521 yge_init_tx_ring(yge_port_t *port)
522 {
523 yge_ring_t *ring = &port->p_tx_ring;
524
525 port->p_tx_prod = 0;
526 port->p_tx_cons = 0;
527 port->p_tx_cnt = 0;
528
529 CLEARRING(ring);
530 SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
531
532 yge_set_prefetch(port->p_dev, port->p_txq, ring);
533 }
534
535 static void
536 yge_setup_rambuffer(yge_dev_t *dev)
537 {
538 int next;
539 int i;
540
541 /* Get adapter SRAM size. */
542 dev->d_ramsize = CSR_READ_1(dev, B2_E_0) * 4;
543 if (dev->d_ramsize == 0)
544 return;
545
546 dev->d_pflags |= PORT_FLAG_RAMBUF;
547 /*
548 * Give receiver 2/3 of memory and round down to the multiple
549 * of 1024. Tx/Rx RAM buffer size of Yukon 2 should be multiple
550 * of 1024.
551 */
552 dev->d_rxqsize = (((dev->d_ramsize * 1024 * 2) / 3) & ~(1024 - 1));
553 dev->d_txqsize = (dev->d_ramsize * 1024) - dev->d_rxqsize;
554
555 for (i = 0, next = 0; i < dev->d_num_port; i++) {
556 dev->d_rxqstart[i] = next;
557 dev->d_rxqend[i] = next + dev->d_rxqsize - 1;
558 next = dev->d_rxqend[i] + 1;
559 dev->d_txqstart[i] = next;
560 dev->d_txqend[i] = next + dev->d_txqsize - 1;
561 next = dev->d_txqend[i] + 1;
562 }
563 }
564
565 static void
566 yge_phy_power(yge_dev_t *dev, boolean_t powerup)
567 {
568 uint32_t val;
569 int i;
570
571 if (powerup) {
572 /* Switch power to VCC (WA for VAUX problem). */
573 CSR_WRITE_1(dev, B0_POWER_CTRL,
574 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
575 /* Disable Core Clock Division, set Clock Select to 0. */
576 CSR_WRITE_4(dev, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
577
578 val = 0;
579 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
580 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
581 /* Enable bits are inverted. */
582 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
583 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
584 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
585 }
586 /*
587 * Enable PCI & Core Clock, enable clock gating for both Links.
588 */
589 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
590
591 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
592 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
593 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
594 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
595 /* Deassert Low Power for 1st PHY. */
596 val |= PCI_Y2_PHY1_COMA;
597 if (dev->d_num_port > 1)
598 val |= PCI_Y2_PHY2_COMA;
599 }
600
601 /* Release PHY from PowerDown/COMA mode. */
602 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
603
604 switch (dev->d_hw_id) {
605 case CHIP_ID_YUKON_EC_U:
606 case CHIP_ID_YUKON_EX:
607 case CHIP_ID_YUKON_FE_P: {
608 uint32_t our;
609
610 CSR_WRITE_2(dev, B0_CTST, Y2_HW_WOL_OFF);
611
612 /* Enable all clocks. */
613 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
614
615 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_4);
616 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
617 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
618 /* Set all bits to 0 except bits 15..12. */
619 pci_config_put32(dev->d_pcih, PCI_OUR_REG_4, our);
620
621 /* Set to default value. */
622 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_5);
623 our &= P_CTL_TIM_VMAIN_AV_MSK;
624 pci_config_put32(dev->d_pcih, PCI_OUR_REG_5, our);
625
626 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, 0);
627
628 /*
629 * Enable workaround for dev 4.107 on Yukon-Ultra
630 * and Extreme
631 */
632 our = CSR_READ_4(dev, B2_GP_IO);
633 our |= GLB_GPIO_STAT_RACE_DIS;
634 CSR_WRITE_4(dev, B2_GP_IO, our);
635
636 (void) CSR_READ_4(dev, B2_GP_IO);
637 break;
638 }
639 default:
640 break;
641 }
642
643 for (i = 0; i < dev->d_num_port; i++) {
644 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
645 GMLC_RST_SET);
646 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
647 GMLC_RST_CLR);
648 }
649 } else {
650 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
651 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
652 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
653 val &= ~PCI_Y2_PHY1_COMA;
654 if (dev->d_num_port > 1)
655 val &= ~PCI_Y2_PHY2_COMA;
656 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
657 } else {
658 val |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
659 }
660 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
661
662 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
663 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
664 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
665 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
666 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
667 /* Enable bits are inverted. */
668 val = 0;
669 }
670 /*
671 * Disable PCI & Core Clock, disable clock gating for
672 * both Links.
673 */
674 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
675 CSR_WRITE_1(dev, B0_POWER_CTRL,
676 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
677 }
678 }
679
680 static void
681 yge_reset(yge_dev_t *dev)
682 {
683 uint64_t addr;
684 uint16_t status;
685 uint32_t val;
686 int i;
687 ddi_acc_handle_t pcih = dev->d_pcih;
688
689 /* Turn off ASF */
690 if (dev->d_hw_id == CHIP_ID_YUKON_EX) {
691 status = CSR_READ_2(dev, B28_Y2_ASF_STAT_CMD);
692 /* Clear AHB bridge & microcontroller reset */
693 status &= ~Y2_ASF_CPU_MODE;
694 status &= ~Y2_ASF_AHB_RST;
695 /* Clear ASF microcontroller state */
696 status &= ~Y2_ASF_STAT_MSK;
697 CSR_WRITE_2(dev, B28_Y2_ASF_STAT_CMD, status);
698 } else {
699 CSR_WRITE_1(dev, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
700 }
701 CSR_WRITE_2(dev, B0_CTST, Y2_ASF_DISABLE);
702
703 /*
704 * Since we disabled ASF, S/W reset is required for Power Management.
705 */
706 CSR_WRITE_1(dev, B0_CTST, CS_RST_SET);
707 CSR_WRITE_1(dev, B0_CTST, CS_RST_CLR);
708
709 /* Allow writes to PCI config space */
710 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
711
712 /* Clear all error bits in the PCI status register. */
713 status = pci_config_get16(pcih, PCI_CONF_STAT);
714 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
715
716 status |= (PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
717 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
718 pci_config_put16(pcih, PCI_CONF_STAT, status);
719
720 CSR_WRITE_1(dev, B0_CTST, CS_MRST_CLR);
721
722 switch (dev->d_bustype) {
723 case PEX_BUS:
724 /* Clear all PEX errors. */
725 CSR_PCI_WRITE_4(dev, Y2_CFG_AER + AER_UNCOR_ERR, 0xffffffff);
726
727 /* is error bit status stuck? */
728 val = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
729 if ((val & PEX_RX_OV) != 0) {
730 dev->d_intrmask &= ~Y2_IS_HW_ERR;
731 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
732 }
733 break;
734 case PCI_BUS:
735 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
736 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
737 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
738 break;
739 case PCIX_BUS:
740 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
741 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
742 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
743
744 /* Set Cache Line Size opt. */
745 val = pci_config_get32(pcih, PCI_OUR_REG_1);
746 val |= PCI_CLS_OPT;
747 pci_config_put32(pcih, PCI_OUR_REG_1, val);
748 break;
749 }
750
751 /* Set PHY power state. */
752 yge_phy_power(dev, B_TRUE);
753
754 /* Reset GPHY/GMAC Control */
755 for (i = 0; i < dev->d_num_port; i++) {
756 /* GPHY Control reset. */
757 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
758 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
759 /* GMAC Control reset. */
760 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
761 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
762 if (dev->d_hw_id == CHIP_ID_YUKON_EX ||
763 dev->d_hw_id == CHIP_ID_YUKON_SUPR) {
764 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL),
765 (GMC_BYP_RETR_ON | GMC_BYP_MACSECRX_ON |
766 GMC_BYP_MACSECTX_ON));
767 }
768 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
769
770 }
771 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
772
773 /* LED On. */
774 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_ON);
775
776 /* Clear TWSI IRQ. */
777 CSR_WRITE_4(dev, B2_I2C_IRQ, I2C_CLR_IRQ);
778
779 /* Turn off hardware timer. */
780 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_STOP);
781 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_CLR_IRQ);
782
783 /* Turn off descriptor polling. */
784 CSR_WRITE_1(dev, B28_DPT_CTRL, DPT_STOP);
785
786 /* Turn off time stamps. */
787 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_STOP);
788 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
789
790 /* Don't permit config space writing */
791 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
792
793 /* enable TX Arbiters */
794 for (i = 0; i < dev->d_num_port; i++)
795 CSR_WRITE_1(dev, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB);
796
797 /* Configure timeout values. */
798 for (i = 0; i < dev->d_num_port; i++) {
799 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
800
801 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), RI_TO_53);
802 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), RI_TO_53);
803 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), RI_TO_53);
804 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), RI_TO_53);
805 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), RI_TO_53);
806 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), RI_TO_53);
807 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), RI_TO_53);
808 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), RI_TO_53);
809 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), RI_TO_53);
810 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), RI_TO_53);
811 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), RI_TO_53);
812 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), RI_TO_53);
813 }
814
815 /* Disable all interrupts. */
816 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
817 (void) CSR_READ_4(dev, B0_HWE_IMSK);
818 CSR_WRITE_4(dev, B0_IMSK, 0);
819 (void) CSR_READ_4(dev, B0_IMSK);
820
821 /*
822 * On dual port PCI-X card, there is an problem where status
823 * can be received out of order due to split transactions.
824 */
825 if (dev->d_bustype == PCIX_BUS && dev->d_num_port > 1) {
826 int pcix;
827 uint16_t pcix_cmd;
828
829 if ((pcix = yge_find_capability(dev, PCI_CAP_ID_PCIX)) != 0) {
830 pcix_cmd = pci_config_get16(pcih, pcix + 2);
831 /* Clear Max Outstanding Split Transactions. */
832 pcix_cmd &= ~0x70;
833 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
834 pci_config_put16(pcih, pcix + 2, pcix_cmd);
835 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
836 }
837 }
838 if (dev->d_bustype == PEX_BUS) {
839 uint16_t v, width;
840
841 v = pci_config_get16(pcih, PEX_DEV_CTRL);
842 /* Change Max. Read Request Size to 4096 bytes. */
843 v &= ~PEX_DC_MAX_RRS_MSK;
844 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
845 pci_config_put16(pcih, PEX_DEV_CTRL, v);
846 width = pci_config_get16(pcih, PEX_LNK_STAT);
847 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
848 v = pci_config_get16(pcih, PEX_LNK_CAP);
849 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
850 if (v != width)
851 yge_error(dev, NULL,
852 "Negotiated width of PCIe link(x%d) != "
853 "max. width of link(x%d)\n", width, v);
854 }
855
856 /* Clear status list. */
857 CLEARRING(&dev->d_status_ring);
858 SYNCRING(&dev->d_status_ring, DDI_DMA_SYNC_FORDEV);
859
860 dev->d_stat_cons = 0;
861
862 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_SET);
863 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_CLR);
864
865 /* Set the status list base address. */
866 addr = dev->d_status_ring.r_paddr;
867 CSR_WRITE_4(dev, STAT_LIST_ADDR_LO, YGE_ADDR_LO(addr));
868 CSR_WRITE_4(dev, STAT_LIST_ADDR_HI, YGE_ADDR_HI(addr));
869
870 /* Set the status list last index. */
871 CSR_WRITE_2(dev, STAT_LAST_IDX, YGE_STAT_RING_CNT - 1);
872 CSR_WRITE_2(dev, STAT_PUT_IDX, 0);
873
874 if (dev->d_hw_id == CHIP_ID_YUKON_EC &&
875 dev->d_hw_rev == CHIP_REV_YU_EC_A1) {
876 /* WA for dev. #4.3 */
877 CSR_WRITE_2(dev, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
878 /* WA for dev #4.18 */
879 CSR_WRITE_1(dev, STAT_FIFO_WM, 0x21);
880 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 7);
881 } else {
882 CSR_WRITE_2(dev, STAT_TX_IDX_TH, 10);
883 CSR_WRITE_1(dev, STAT_FIFO_WM, 16);
884
885 /* ISR status FIFO watermark */
886 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
887 dev->d_hw_rev == CHIP_REV_YU_XL_A0)
888 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 4);
889 else
890 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 16);
891
892 CSR_WRITE_4(dev, STAT_ISR_TIMER_INI, 0x0190);
893 }
894
895 /*
896 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
897 */
898 CSR_WRITE_4(dev, STAT_TX_TIMER_INI, YGE_USECS(dev, 1000));
899
900 /* Enable status unit. */
901 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_OP_ON);
902
903 CSR_WRITE_1(dev, STAT_TX_TIMER_CTRL, TIM_START);
904 CSR_WRITE_1(dev, STAT_LEV_TIMER_CTRL, TIM_START);
905 CSR_WRITE_1(dev, STAT_ISR_TIMER_CTRL, TIM_START);
906 }
907
908 static int
909 yge_init_port(yge_port_t *port)
910 {
911 yge_dev_t *dev = port->p_dev;
912 int i;
913 mac_register_t *macp;
914
915 port->p_flags = dev->d_pflags;
916 port->p_ppa = ddi_get_instance(dev->d_dip) + (port->p_port * 100);
917
918 port->p_tx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_TX_RING_CNT,
919 KM_SLEEP);
920 port->p_rx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_RX_RING_CNT,
921 KM_SLEEP);
922
923 /* Setup Tx/Rx queue register offsets. */
924 if (port->p_port == YGE_PORT_A) {
925 port->p_txq = Q_XA1;
926 port->p_txsq = Q_XS1;
927 port->p_rxq = Q_R1;
928 } else {
929 port->p_txq = Q_XA2;
930 port->p_txsq = Q_XS2;
931 port->p_rxq = Q_R2;
932 }
933
934 /* Disable jumbo frame for Yukon FE. */
935 if (dev->d_hw_id == CHIP_ID_YUKON_FE)
936 port->p_flags |= PORT_FLAG_NOJUMBO;
937
938 /*
939 * Start out assuming a regular MTU. Users can change this
940 * with dladm. The dladm daemon is supposed to issue commands
941 * to change the default MTU using m_setprop during early boot
942 * (before the interface is plumbed) if the user has so
943 * requested.
944 */
945 port->p_mtu = ETHERMTU;
946
947 port->p_mii = mii_alloc(port, dev->d_dip, &yge_mii_ops);
948 if (port->p_mii == NULL) {
949 yge_error(NULL, port, "MII handle allocation failed");
950 return (DDI_FAILURE);
951 }
952 /* We assume all parts support asymmetric pause */
953 mii_set_pauseable(port->p_mii, B_TRUE, B_TRUE);
954
955 /*
956 * Get station address for this interface. Note that
957 * dual port cards actually come with three station
958 * addresses: one for each port, plus an extra. The
959 * extra one is used by the SysKonnect driver software
960 * as a 'virtual' station address for when both ports
961 * are operating in failover mode. Currently we don't
962 * use this extra address.
963 */
964 for (i = 0; i < ETHERADDRL; i++) {
965 port->p_curraddr[i] =
966 CSR_READ_1(dev, B2_MAC_1 + (port->p_port * 8) + i);
967 }
968
969 /* Register with Nemo. */
970 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
971 yge_error(NULL, port, "MAC handle allocation failed");
972 return (DDI_FAILURE);
973 }
974 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
975 macp->m_driver = port;
976 macp->m_dip = dev->d_dip;
977 macp->m_src_addr = port->p_curraddr;
978 macp->m_callbacks = &yge_m_callbacks;
979 macp->m_min_sdu = 0;
980 macp->m_max_sdu = port->p_mtu;
981 macp->m_instance = port->p_ppa;
982 macp->m_margin = VLAN_TAGSZ;
983
984 port->p_mreg = macp;
985
986 return (DDI_SUCCESS);
987 }
988
989 static int
990 yge_add_intr(yge_dev_t *dev, int intr_type)
991 {
992 dev_info_t *dip;
993 int count;
994 int actual;
995 int rv;
996 int i, j;
997
998 dip = dev->d_dip;
999
1000 rv = ddi_intr_get_nintrs(dip, intr_type, &count);
1001 if ((rv != DDI_SUCCESS) || (count == 0)) {
1002 yge_error(dev, NULL,
1003 "ddi_intr_get_nintrs failed, rv %d, count %d", rv, count);
1004 return (DDI_FAILURE);
1005 }
1006
1007 /*
1008 * Allocate the interrupt. Note that we only bother with a single
1009 * interrupt. One could argue that for MSI devices with dual ports,
1010 * it would be nice to have a separate interrupt per port. But right
1011 * now I don't know how to configure that, so we'll just settle for
1012 * a single interrupt.
1013 */
1014 dev->d_intrcnt = 1;
1015
1016 dev->d_intrsize = count * sizeof (ddi_intr_handle_t);
1017 dev->d_intrh = kmem_zalloc(dev->d_intrsize, KM_SLEEP);
1018
1019 rv = ddi_intr_alloc(dip, dev->d_intrh, intr_type, 0, dev->d_intrcnt,
1020 &actual, DDI_INTR_ALLOC_STRICT);
1021 if ((rv != DDI_SUCCESS) || (actual == 0)) {
1022 yge_error(dev, NULL,
1023 "Unable to allocate interrupt, %d, count %d",
1024 rv, actual);
1025 kmem_free(dev->d_intrh, dev->d_intrsize);
1026 return (DDI_FAILURE);
1027 }
1028
1029 if ((rv = ddi_intr_get_pri(dev->d_intrh[0], &dev->d_intrpri)) !=
1030 DDI_SUCCESS) {
1031 for (i = 0; i < dev->d_intrcnt; i++)
1032 (void) ddi_intr_free(dev->d_intrh[i]);
1033 yge_error(dev, NULL,
1034 "Unable to get interrupt priority, %d", rv);
1035 kmem_free(dev->d_intrh, dev->d_intrsize);
1036 return (DDI_FAILURE);
1037 }
1038
1039 if ((rv = ddi_intr_get_cap(dev->d_intrh[0], &dev->d_intrcap)) !=
1040 DDI_SUCCESS) {
1041 yge_error(dev, NULL,
1042 "Unable to get interrupt capabilities, %d", rv);
1043 for (i = 0; i < dev->d_intrcnt; i++)
1044 (void) ddi_intr_free(dev->d_intrh[i]);
1045 kmem_free(dev->d_intrh, dev->d_intrsize);
1046 return (DDI_FAILURE);
1047 }
1048
1049 /* register interrupt handler to kernel */
1050 for (i = 0; i < dev->d_intrcnt; i++) {
1051 if ((rv = ddi_intr_add_handler(dev->d_intrh[i], yge_intr,
1052 dev, NULL)) != DDI_SUCCESS) {
1053 yge_error(dev, NULL,
1054 "Unable to add interrupt handler, %d", rv);
1055 for (j = 0; j < i; j++)
1056 (void) ddi_intr_remove_handler(dev->d_intrh[j]);
1057 for (i = 0; i < dev->d_intrcnt; i++)
1058 (void) ddi_intr_free(dev->d_intrh[i]);
1059 kmem_free(dev->d_intrh, dev->d_intrsize);
1060 return (DDI_FAILURE);
1061 }
1062 }
1063
1064 mutex_init(&dev->d_rxlock, NULL, MUTEX_DRIVER,
1065 DDI_INTR_PRI(dev->d_intrpri));
1066 mutex_init(&dev->d_txlock, NULL, MUTEX_DRIVER,
1067 DDI_INTR_PRI(dev->d_intrpri));
1068 mutex_init(&dev->d_phylock, NULL, MUTEX_DRIVER,
1069 DDI_INTR_PRI(dev->d_intrpri));
1070 mutex_init(&dev->d_task_mtx, NULL, MUTEX_DRIVER,
1071 DDI_INTR_PRI(dev->d_intrpri));
1072
1073 return (DDI_SUCCESS);
1074 }
1075
1076 static int
1077 yge_attach_intr(yge_dev_t *dev)
1078 {
1079 dev_info_t *dip = dev->d_dip;
1080 int intr_types;
1081 int rv;
1082
1083 /* Allocate IRQ resources. */
1084 rv = ddi_intr_get_supported_types(dip, &intr_types);
1085 if (rv != DDI_SUCCESS) {
1086 yge_error(dev, NULL,
1087 "Unable to determine supported interrupt types, %d", rv);
1088 return (DDI_FAILURE);
1089 }
1090
1091 /*
1092 * We default to not supporting MSI. We've found some device
1093 * and motherboard combinations don't always work well with
1094 * MSI interrupts. Users may override this if they choose.
1095 */
1096 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "msi_enable", 0) == 0) {
1097 /* If msi disable property present, disable both msix/msi. */
1098 if (intr_types & DDI_INTR_TYPE_FIXED) {
1099 intr_types &= ~(DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX);
1100 }
1101 }
1102
1103 if (intr_types & DDI_INTR_TYPE_MSIX) {
1104 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSIX)) ==
1105 DDI_SUCCESS)
1106 return (DDI_SUCCESS);
1107 }
1108
1109 if (intr_types & DDI_INTR_TYPE_MSI) {
1110 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSI)) ==
1111 DDI_SUCCESS)
1112 return (DDI_SUCCESS);
1113 }
1114
1115 if (intr_types & DDI_INTR_TYPE_FIXED) {
1116 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_FIXED)) ==
1117 DDI_SUCCESS)
1118 return (DDI_SUCCESS);
1119 }
1120
1121 yge_error(dev, NULL, "Unable to configure any interrupts");
1122 return (DDI_FAILURE);
1123 }
1124
1125 static void
1126 yge_intr_enable(yge_dev_t *dev)
1127 {
1128 int i;
1129 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1130 /* Call ddi_intr_block_enable() for MSI interrupts */
1131 (void) ddi_intr_block_enable(dev->d_intrh, dev->d_intrcnt);
1132 } else {
1133 /* Call ddi_intr_enable for FIXED interrupts */
1134 for (i = 0; i < dev->d_intrcnt; i++)
1135 (void) ddi_intr_enable(dev->d_intrh[i]);
1136 }
1137 }
1138
1139 void
1140 yge_intr_disable(yge_dev_t *dev)
1141 {
1142 int i;
1143
1144 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1145 (void) ddi_intr_block_disable(dev->d_intrh, dev->d_intrcnt);
1146 } else {
1147 for (i = 0; i < dev->d_intrcnt; i++)
1148 (void) ddi_intr_disable(dev->d_intrh[i]);
1149 }
1150 }
1151
1152 static uint8_t
1153 yge_find_capability(yge_dev_t *dev, uint8_t cap)
1154 {
1155 uint8_t ptr;
1156 uint16_t capit;
1157 ddi_acc_handle_t pcih = dev->d_pcih;
1158
1159 if ((pci_config_get16(pcih, PCI_CONF_STAT) & PCI_STAT_CAP) == 0) {
1160 return (0);
1161 }
1162 /* This assumes PCI, and not CardBus. */
1163 ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR);
1164 while (ptr != 0) {
1165 capit = pci_config_get8(pcih, ptr + PCI_CAP_ID);
1166 if (capit == cap) {
1167 return (ptr);
1168 }
1169 ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR);
1170 }
1171 return (0);
1172 }
1173
1174 static int
1175 yge_attach(yge_dev_t *dev)
1176 {
1177 dev_info_t *dip = dev->d_dip;
1178 int rv;
1179 int nattached;
1180 uint8_t pm_cap;
1181
1182 if (pci_config_setup(dip, &dev->d_pcih) != DDI_SUCCESS) {
1183 yge_error(dev, NULL, "Unable to map PCI configuration space");
1184 goto fail;
1185 }
1186
1187 /*
1188 * Map control/status registers.
1189 */
1190
1191 /* ensure the pmcsr status is D0 state */
1192 pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM);
1193 if (pm_cap != 0) {
1194 uint16_t pmcsr;
1195 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1196 pmcsr &= ~PCI_PMCSR_STATE_MASK;
1197 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1198 pmcsr | PCI_PMCSR_D0);
1199 }
1200
1201 /* Enable PCI access and bus master. */
1202 pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1203 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1204 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1205
1206
1207 /* Allocate I/O resource */
1208 rv = ddi_regs_map_setup(dip, 1, &dev->d_regs, 0, 0, &yge_regs_attr,
1209 &dev->d_regsh);
1210 if (rv != DDI_SUCCESS) {
1211 yge_error(dev, NULL, "Unable to map device registers");
1212 goto fail;
1213 }
1214
1215
1216 /* Enable all clocks. */
1217 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1218 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1219 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1220
1221 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1222 dev->d_hw_id = CSR_READ_1(dev, B2_CHIP_ID);
1223 dev->d_hw_rev = (CSR_READ_1(dev, B2_MAC_CFG) >> 4) & 0x0f;
1224
1225
1226 /*
1227 * Bail out if chip is not recognized. Note that we only enforce
1228 * this in production builds. The Ultra-2 (88e8057) has a problem
1229 * right now where TX works fine, but RX seems not to. So we've
1230 * disabled that for now.
1231 */
1232 if (dev->d_hw_id < CHIP_ID_YUKON_XL ||
1233 dev->d_hw_id >= CHIP_ID_YUKON_UL_2) {
1234 yge_error(dev, NULL, "Unknown device: id=0x%02x, rev=0x%02x",
1235 dev->d_hw_id, dev->d_hw_rev);
1236 #ifndef DEBUG
1237 goto fail;
1238 #endif
1239 }
1240
1241 /* Soft reset. */
1242 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1243 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1244 dev->d_pmd = CSR_READ_1(dev, B2_PMD_TYP);
1245 if (dev->d_pmd == 'L' || dev->d_pmd == 'S' || dev->d_pmd == 'P')
1246 dev->d_coppertype = 0;
1247 else
1248 dev->d_coppertype = 1;
1249 /* Check number of MACs. */
1250 dev->d_num_port = 1;
1251 if ((CSR_READ_1(dev, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1252 CFG_DUAL_MAC_MSK) {
1253 if (!(CSR_READ_1(dev, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1254 dev->d_num_port++;
1255 }
1256
1257 /* Check bus type. */
1258 if (yge_find_capability(dev, PCI_CAP_ID_PCI_E) != 0) {
1259 dev->d_bustype = PEX_BUS;
1260 } else if (yge_find_capability(dev, PCI_CAP_ID_PCIX) != 0) {
1261 dev->d_bustype = PCIX_BUS;
1262 } else {
1263 dev->d_bustype = PCI_BUS;
1264 }
1265
1266 switch (dev->d_hw_id) {
1267 case CHIP_ID_YUKON_EC:
1268 dev->d_clock = 125; /* 125 Mhz */
1269 break;
1270 case CHIP_ID_YUKON_UL_2:
1271 dev->d_clock = 125; /* 125 Mhz */
1272 break;
1273 case CHIP_ID_YUKON_SUPR:
1274 dev->d_clock = 125; /* 125 Mhz */
1275 break;
1276 case CHIP_ID_YUKON_EC_U:
1277 dev->d_clock = 125; /* 125 Mhz */
1278 break;
1279 case CHIP_ID_YUKON_EX:
1280 dev->d_clock = 125; /* 125 Mhz */
1281 break;
1282 case CHIP_ID_YUKON_FE:
1283 dev->d_clock = 100; /* 100 Mhz */
1284 break;
1285 case CHIP_ID_YUKON_FE_P:
1286 dev->d_clock = 50; /* 50 Mhz */
1287 break;
1288 case CHIP_ID_YUKON_XL:
1289 dev->d_clock = 156; /* 156 Mhz */
1290 break;
1291 default:
1292 dev->d_clock = 156; /* 156 Mhz */
1293 break;
1294 }
1295
1296 dev->d_process_limit = YGE_RX_RING_CNT/2;
1297
1298 rv = yge_alloc_ring(NULL, dev, &dev->d_status_ring, YGE_STAT_RING_CNT);
1299 if (rv != DDI_SUCCESS)
1300 goto fail;
1301
1302 /* Setup event taskq. */
1303 dev->d_task_q = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0);
1304 if (dev->d_task_q == NULL) {
1305 yge_error(dev, NULL, "failed to create taskq");
1306 goto fail;
1307 }
1308
1309 /* Init the condition variable */
1310 cv_init(&dev->d_task_cv, NULL, CV_DRIVER, NULL);
1311
1312 /* Allocate IRQ resources. */
1313 if ((rv = yge_attach_intr(dev)) != DDI_SUCCESS) {
1314 goto fail;
1315 }
1316
1317 /* Set base interrupt mask. */
1318 dev->d_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1319 dev->d_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1320 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1321
1322 /* Reset the adapter. */
1323 yge_reset(dev);
1324
1325 yge_setup_rambuffer(dev);
1326
1327 nattached = 0;
1328 for (int i = 0; i < dev->d_num_port; i++) {
1329 yge_port_t *port = dev->d_port[i];
1330 if (yge_init_port(port) != DDI_SUCCESS) {
1331 goto fail;
1332 }
1333 }
1334
1335 yge_intr_enable(dev);
1336
1337 /* set up the periodic to run once per second */
1338 dev->d_periodic = ddi_periodic_add(yge_tick, dev, 1000000000, 0);
1339
1340 for (int i = 0; i < dev->d_num_port; i++) {
1341 yge_port_t *port = dev->d_port[i];
1342 if (yge_register_port(port) == DDI_SUCCESS) {
1343 nattached++;
1344 }
1345 }
1346
1347 if (nattached == 0) {
1348 goto fail;
1349 }
1350
1351 /* Dispatch the taskq */
1352 if (ddi_taskq_dispatch(dev->d_task_q, yge_task, dev, DDI_SLEEP) !=
1353 DDI_SUCCESS) {
1354 yge_error(dev, NULL, "failed to start taskq");
1355 goto fail;
1356 }
1357
1358 ddi_report_dev(dip);
1359
1360 return (DDI_SUCCESS);
1361
1362 fail:
1363 yge_detach(dev);
1364 return (DDI_FAILURE);
1365 }
1366
1367 static int
1368 yge_register_port(yge_port_t *port)
1369 {
1370 if (mac_register(port->p_mreg, &port->p_mh) != DDI_SUCCESS) {
1371 yge_error(NULL, port, "MAC registration failed");
1372 return (DDI_FAILURE);
1373 }
1374
1375 return (DDI_SUCCESS);
1376 }
1377
1378 /*
1379 * Free up port specific resources. This is called only when the
1380 * port is not registered (and hence not running).
1381 */
1382 static void
1383 yge_uninit_port(yge_port_t *port)
1384 {
1385 ASSERT(!port->p_running);
1386
1387 if (port->p_mreg)
1388 mac_free(port->p_mreg);
1389
1390 if (port->p_mii)
1391 mii_free(port->p_mii);
1392
1393 yge_txrx_dma_free(port);
1394
1395 if (port->p_tx_buf)
1396 kmem_free(port->p_tx_buf,
1397 sizeof (yge_buf_t) * YGE_TX_RING_CNT);
1398 if (port->p_rx_buf)
1399 kmem_free(port->p_rx_buf,
1400 sizeof (yge_buf_t) * YGE_RX_RING_CNT);
1401 }
1402
1403 static void
1404 yge_detach(yge_dev_t *dev)
1405 {
1406 /*
1407 * Turn off the periodic.
1408 */
1409 if (dev->d_periodic)
1410 ddi_periodic_delete(dev->d_periodic);
1411
1412 for (int i = 0; i < dev->d_num_port; i++) {
1413 yge_uninit_port(dev->d_port[i]);
1414 }
1415
1416 /*
1417 * Make sure all interrupts are disabled.
1418 */
1419 CSR_WRITE_4(dev, B0_IMSK, 0);
1420 (void) CSR_READ_4(dev, B0_IMSK);
1421 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1422 (void) CSR_READ_4(dev, B0_HWE_IMSK);
1423
1424 /* LED Off. */
1425 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_OFF);
1426
1427 /* Put hardware reset. */
1428 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1429
1430 yge_free_ring(&dev->d_status_ring);
1431
1432 if (dev->d_task_q != NULL) {
1433 yge_dispatch(dev, YGE_TASK_EXIT);
1434 ddi_taskq_destroy(dev->d_task_q);
1435 dev->d_task_q = NULL;
1436 }
1437
1438 cv_destroy(&dev->d_task_cv);
1439
1440 yge_intr_disable(dev);
1441
1442 if (dev->d_intrh != NULL) {
1443 for (int i = 0; i < dev->d_intrcnt; i++) {
1444 (void) ddi_intr_remove_handler(dev->d_intrh[i]);
1445 (void) ddi_intr_free(dev->d_intrh[i]);
1446 }
1447 kmem_free(dev->d_intrh, dev->d_intrsize);
1448 mutex_destroy(&dev->d_phylock);
1449 mutex_destroy(&dev->d_txlock);
1450 mutex_destroy(&dev->d_rxlock);
1451 mutex_destroy(&dev->d_task_mtx);
1452 }
1453 if (dev->d_regsh != NULL)
1454 ddi_regs_map_free(&dev->d_regsh);
1455
1456 if (dev->d_pcih != NULL)
1457 pci_config_teardown(&dev->d_pcih);
1458 }
1459
1460 static int
1461 yge_alloc_ring(yge_port_t *port, yge_dev_t *dev, yge_ring_t *ring, uint32_t num)
1462 {
1463 dev_info_t *dip;
1464 caddr_t kaddr;
1465 size_t len;
1466 int rv;
1467 ddi_dma_cookie_t dmac;
1468 unsigned ndmac;
1469
1470 if (port && !dev)
1471 dev = port->p_dev;
1472 dip = dev->d_dip;
1473
1474 ring->r_num = num;
1475
1476 rv = ddi_dma_alloc_handle(dip, &yge_ring_dma_attr, DDI_DMA_DONTWAIT,
1477 NULL, &ring->r_dmah);
1478 if (rv != DDI_SUCCESS) {
1479 yge_error(dev, port, "Unable to allocate ring DMA handle");
1480 return (DDI_FAILURE);
1481 }
1482
1483 rv = ddi_dma_mem_alloc(ring->r_dmah, num * sizeof (yge_desc_t),
1484 &yge_ring_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
1485 &kaddr, &len, &ring->r_acch);
1486 if (rv != DDI_SUCCESS) {
1487 yge_error(dev, port, "Unable to allocate ring DMA memory");
1488 return (DDI_FAILURE);
1489 }
1490 ring->r_size = len;
1491 ring->r_kaddr = (void *)kaddr;
1492
1493 bzero(kaddr, len);
1494
1495 rv = ddi_dma_addr_bind_handle(ring->r_dmah, NULL, kaddr,
1496 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1497 &dmac, &ndmac);
1498 if (rv != DDI_DMA_MAPPED) {
1499 yge_error(dev, port, "Unable to bind ring DMA handle");
1500 return (DDI_FAILURE);
1501 }
1502 ASSERT(ndmac == 1);
1503 ring->r_paddr = dmac.dmac_address;
1504
1505 return (DDI_SUCCESS);
1506 }
1507
1508 static void
1509 yge_free_ring(yge_ring_t *ring)
1510 {
1511 if (ring->r_paddr)
1512 (void) ddi_dma_unbind_handle(ring->r_dmah);
1513 ring->r_paddr = 0;
1514 if (ring->r_acch)
1515 ddi_dma_mem_free(&ring->r_acch);
1516 ring->r_kaddr = NULL;
1517 ring->r_acch = NULL;
1518 if (ring->r_dmah)
1519 ddi_dma_free_handle(&ring->r_dmah);
1520 ring->r_dmah = NULL;
1521 }
1522
1523 static int
1524 yge_alloc_buf(yge_port_t *port, yge_buf_t *b, size_t bufsz, int flag)
1525 {
1526 yge_dev_t *dev = port->p_dev;
1527 size_t l;
1528 int sflag;
1529 int rv;
1530 ddi_dma_cookie_t dmac;
1531 unsigned ndmac;
1532
1533 sflag = flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT);
1534
1535 /* Now allocate Tx buffers. */
1536 rv = ddi_dma_alloc_handle(dev->d_dip, &yge_buf_dma_attr,
1537 DDI_DMA_DONTWAIT, NULL, &b->b_dmah);
1538 if (rv != DDI_SUCCESS) {
1539 yge_error(NULL, port, "Unable to alloc DMA handle for buffer");
1540 return (DDI_FAILURE);
1541 }
1542
1543 rv = ddi_dma_mem_alloc(b->b_dmah, bufsz, &yge_buf_attr,
1544 sflag, DDI_DMA_DONTWAIT, NULL, &b->b_buf, &l, &b->b_acch);
1545 if (rv != DDI_SUCCESS) {
1546 yge_error(NULL, port, "Unable to alloc DMA memory for buffer");
1547 return (DDI_FAILURE);
1548 }
1549
1550 rv = ddi_dma_addr_bind_handle(b->b_dmah, NULL, b->b_buf, l, flag,
1551 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
1552 if (rv != DDI_DMA_MAPPED) {
1553 yge_error(NULL, port, "Unable to bind DMA handle for buffer");
1554 return (DDI_FAILURE);
1555 }
1556 ASSERT(ndmac == 1);
1557 b->b_paddr = dmac.dmac_address;
1558 return (DDI_SUCCESS);
1559 }
1560
1561 static void
1562 yge_free_buf(yge_buf_t *b)
1563 {
1564 if (b->b_paddr)
1565 (void) ddi_dma_unbind_handle(b->b_dmah);
1566 b->b_paddr = 0;
1567 if (b->b_acch)
1568 ddi_dma_mem_free(&b->b_acch);
1569 b->b_buf = NULL;
1570 b->b_acch = NULL;
1571 if (b->b_dmah)
1572 ddi_dma_free_handle(&b->b_dmah);
1573 b->b_dmah = NULL;
1574 }
1575
1576 static int
1577 yge_txrx_dma_alloc(yge_port_t *port)
1578 {
1579 uint32_t bufsz;
1580 int rv;
1581 int i;
1582 yge_buf_t *b;
1583
1584 /*
1585 * It seems that Yukon II supports full 64 bit DMA operations.
1586 * But we limit it to 32 bits only for now. The 64 bit
1587 * operation would require substantially more complex
1588 * descriptor handling, since in such a case we would need two
1589 * LEs to represent a single physical address.
1590 *
1591 * If we find that this is limiting us, then we should go back
1592 * and re-examine it.
1593 */
1594
1595 /* Note our preferred buffer size. */
1596 bufsz = port->p_mtu;
1597
1598 /* Allocate Tx ring. */
1599 rv = yge_alloc_ring(port, NULL, &port->p_tx_ring, YGE_TX_RING_CNT);
1600 if (rv != DDI_SUCCESS) {
1601 return (DDI_FAILURE);
1602 }
1603
1604 /* Now allocate Tx buffers. */
1605 b = port->p_tx_buf;
1606 for (i = 0; i < YGE_TX_RING_CNT; i++) {
1607 rv = yge_alloc_buf(port, b, bufsz,
1608 DDI_DMA_STREAMING | DDI_DMA_WRITE);
1609 if (rv != DDI_SUCCESS) {
1610 return (DDI_FAILURE);
1611 }
1612 b++;
1613 }
1614
1615 /* Allocate Rx ring. */
1616 rv = yge_alloc_ring(port, NULL, &port->p_rx_ring, YGE_RX_RING_CNT);
1617 if (rv != DDI_SUCCESS) {
1618 return (DDI_FAILURE);
1619 }
1620
1621 /* Now allocate Rx buffers. */
1622 b = port->p_rx_buf;
1623 for (i = 0; i < YGE_RX_RING_CNT; i++) {
1624 rv = yge_alloc_buf(port, b, bufsz,
1625 DDI_DMA_STREAMING | DDI_DMA_READ);
1626 if (rv != DDI_SUCCESS) {
1627 return (DDI_FAILURE);
1628 }
1629 b++;
1630 }
1631
1632 return (DDI_SUCCESS);
1633 }
1634
1635 static void
1636 yge_txrx_dma_free(yge_port_t *port)
1637 {
1638 yge_buf_t *b;
1639
1640 /* Tx ring. */
1641 yge_free_ring(&port->p_tx_ring);
1642
1643 /* Rx ring. */
1644 yge_free_ring(&port->p_rx_ring);
1645
1646 /* Tx buffers. */
1647 b = port->p_tx_buf;
1648 for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) {
1649 yge_free_buf(b);
1650 }
1651 /* Rx buffers. */
1652 b = port->p_rx_buf;
1653 for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) {
1654 yge_free_buf(b);
1655 }
1656 }
1657
1658 boolean_t
1659 yge_send(yge_port_t *port, mblk_t *mp)
1660 {
1661 yge_ring_t *ring = &port->p_tx_ring;
1662 yge_buf_t *txb;
1663 int16_t prod;
1664 size_t len;
1665
1666 /*
1667 * For now we're not going to support checksum offload or LSO.
1668 */
1669
1670 len = msgsize(mp);
1671 if (len > port->p_framesize) {
1672 /* too big! */
1673 freemsg(mp);
1674 return (B_TRUE);
1675 }
1676
1677 /* Check number of available descriptors. */
1678 if (port->p_tx_cnt + 1 >=
1679 (YGE_TX_RING_CNT - YGE_RESERVED_TX_DESC_CNT)) {
1680 port->p_wantw = B_TRUE;
1681 return (B_FALSE);
1682 }
1683
1684 prod = port->p_tx_prod;
1685
1686 txb = &port->p_tx_buf[prod];
1687 mcopymsg(mp, txb->b_buf);
1688 SYNCBUF(txb, DDI_DMA_SYNC_FORDEV);
1689
1690 PUTADDR(ring, prod, txb->b_paddr);
1691 PUTCTRL(ring, prod, len | OP_PACKET | HW_OWNER | EOP);
1692 SYNCENTRY(ring, prod, DDI_DMA_SYNC_FORDEV);
1693 port->p_tx_cnt++;
1694
1695 YGE_INC(prod, YGE_TX_RING_CNT);
1696
1697 /* Update producer index. */
1698 port->p_tx_prod = prod;
1699
1700 return (B_TRUE);
1701 }
1702
1703 static int
1704 yge_suspend(yge_dev_t *dev)
1705 {
1706 for (int i = 0; i < dev->d_num_port; i++) {
1707 yge_port_t *port = dev->d_port[i];
1708 mii_suspend(port->p_mii);
1709 }
1710
1711
1712 DEV_LOCK(dev);
1713
1714 for (int i = 0; i < dev->d_num_port; i++) {
1715 yge_port_t *port = dev->d_port[i];
1716
1717 if (port->p_running) {
1718 yge_stop_port(port);
1719 }
1720 }
1721
1722 /* Disable all interrupts. */
1723 CSR_WRITE_4(dev, B0_IMSK, 0);
1724 (void) CSR_READ_4(dev, B0_IMSK);
1725 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1726 (void) CSR_READ_4(dev, B0_HWE_IMSK);
1727
1728 yge_phy_power(dev, B_FALSE);
1729
1730 /* Put hardware reset. */
1731 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1732 dev->d_suspended = B_TRUE;
1733
1734 DEV_UNLOCK(dev);
1735
1736 return (DDI_SUCCESS);
1737 }
1738
1739 static int
1740 yge_resume(yge_dev_t *dev)
1741 {
1742 uint8_t pm_cap;
1743
1744 DEV_LOCK(dev);
1745
1746 /* ensure the pmcsr status is D0 state */
1747 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1748
1749 if ((pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM)) != 0) {
1750 uint16_t pmcsr;
1751 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1752 pmcsr &= ~PCI_PMCSR_STATE_MASK;
1753 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1754 pmcsr | PCI_PMCSR_D0);
1755 }
1756
1757 /* Enable PCI access and bus master. */
1758 pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1759 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1760 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1761
1762 /* Enable all clocks. */
1763 switch (dev->d_hw_id) {
1764 case CHIP_ID_YUKON_EX:
1765 case CHIP_ID_YUKON_EC_U:
1766 case CHIP_ID_YUKON_FE_P:
1767 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1768 break;
1769 }
1770
1771 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1772
1773 yge_reset(dev);
1774
1775 /* Make sure interrupts are reenabled */
1776 CSR_WRITE_4(dev, B0_IMSK, 0);
1777 CSR_WRITE_4(dev, B0_IMSK, Y2_IS_HW_ERR | Y2_IS_STAT_BMU);
1778 CSR_WRITE_4(dev, B0_HWE_IMSK,
1779 Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1780 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP);
1781
1782 for (int i = 0; i < dev->d_num_port; i++) {
1783 yge_port_t *port = dev->d_port[i];
1784
1785 if (port != NULL && port->p_running) {
1786 yge_start_port(port);
1787 }
1788 }
1789 dev->d_suspended = B_FALSE;
1790
1791 DEV_UNLOCK(dev);
1792
1793 /* Reset MII layer */
1794 for (int i = 0; i < dev->d_num_port; i++) {
1795 yge_port_t *port = dev->d_port[i];
1796
1797 if (port->p_running) {
1798 mii_resume(port->p_mii);
1799 mac_tx_update(port->p_mh);
1800 }
1801 }
1802
1803 return (DDI_SUCCESS);
1804 }
1805
1806 static mblk_t *
1807 yge_rxeof(yge_port_t *port, uint32_t status, int len)
1808 {
1809 yge_dev_t *dev = port->p_dev;
1810 mblk_t *mp;
1811 int cons, rxlen;
1812 yge_buf_t *rxb;
1813 yge_ring_t *ring;
1814
1815 ASSERT(mutex_owned(&dev->d_rxlock));
1816
1817 if (!port->p_running)
1818 return (NULL);
1819
1820 ring = &port->p_rx_ring;
1821 cons = port->p_rx_cons;
1822 rxlen = status >> 16;
1823 rxb = &port->p_rx_buf[cons];
1824 mp = NULL;
1825
1826
1827 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
1828 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
1829 /*
1830 * Apparently the status for this chip is not reliable.
1831 * Only perform minimal consistency checking; the MAC
1832 * and upper protocols will have to filter any garbage.
1833 */
1834 if ((len > port->p_framesize) || (rxlen != len)) {
1835 goto bad;
1836 }
1837 } else {
1838 if ((len > port->p_framesize) || (rxlen != len) ||
1839 ((status & GMR_FS_ANY_ERR) != 0) ||
1840 ((status & GMR_FS_RX_OK) == 0)) {
1841 goto bad;
1842 }
1843 }
1844
1845 if ((mp = allocb(len + YGE_HEADROOM, BPRI_HI)) != NULL) {
1846
1847 /* good packet - yay */
1848 mp->b_rptr += YGE_HEADROOM;
1849 SYNCBUF(rxb, DDI_DMA_SYNC_FORKERNEL);
1850 bcopy(rxb->b_buf, mp->b_rptr, len);
1851 mp->b_wptr = mp->b_rptr + len;
1852 } else {
1853 port->p_stats.rx_nobuf++;
1854 }
1855
1856 bad:
1857
1858 PUTCTRL(ring, cons, port->p_framesize | OP_PACKET | HW_OWNER);
1859 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
1860
1861 CSR_WRITE_2(dev,
1862 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
1863 cons);
1864
1865 YGE_INC(port->p_rx_cons, YGE_RX_RING_CNT);
1866
1867 return (mp);
1868 }
1869
1870 static boolean_t
1871 yge_txeof_locked(yge_port_t *port, int idx)
1872 {
1873 int prog;
1874 int16_t cons;
1875 boolean_t resched;
1876
1877 if (!port->p_running) {
1878 return (B_FALSE);
1879 }
1880
1881 cons = port->p_tx_cons;
1882 prog = 0;
1883 for (; cons != idx; YGE_INC(cons, YGE_TX_RING_CNT)) {
1884 if (port->p_tx_cnt <= 0)
1885 break;
1886 prog++;
1887 port->p_tx_cnt--;
1888 /* No need to sync LEs as we didn't update LEs. */
1889 }
1890
1891 port->p_tx_cons = cons;
1892
1893 if (prog > 0) {
1894 resched = port->p_wantw;
1895 port->p_tx_wdog = 0;
1896 port->p_wantw = B_FALSE;
1897 return (resched);
1898 } else {
1899 return (B_FALSE);
1900 }
1901 }
1902
1903 static void
1904 yge_txeof(yge_port_t *port, int idx)
1905 {
1906 boolean_t resched;
1907
1908 TX_LOCK(port->p_dev);
1909
1910 resched = yge_txeof_locked(port, idx);
1911
1912 TX_UNLOCK(port->p_dev);
1913
1914 if (resched && port->p_running) {
1915 mac_tx_update(port->p_mh);
1916 }
1917 }
1918
1919 static void
1920 yge_restart_task(yge_dev_t *dev)
1921 {
1922 yge_port_t *port;
1923
1924 DEV_LOCK(dev);
1925
1926 /* Cancel pending I/O and free all Rx/Tx buffers. */
1927 for (int i = 0; i < dev->d_num_port; i++) {
1928 port = dev->d_port[i];
1929 if (port->p_running)
1930 yge_stop_port(dev->d_port[i]);
1931 }
1932 yge_reset(dev);
1933 for (int i = 0; i < dev->d_num_port; i++) {
1934 port = dev->d_port[i];
1935
1936 if (port->p_running)
1937 yge_start_port(port);
1938 }
1939
1940 DEV_UNLOCK(dev);
1941
1942 for (int i = 0; i < dev->d_num_port; i++) {
1943 port = dev->d_port[i];
1944
1945 mii_reset(port->p_mii);
1946 if (port->p_running)
1947 mac_tx_update(port->p_mh);
1948 }
1949 }
1950
1951 static void
1952 yge_tick(void *arg)
1953 {
1954 yge_dev_t *dev = arg;
1955 yge_port_t *port;
1956 boolean_t restart = B_FALSE;
1957 boolean_t resched = B_FALSE;
1958 int idx;
1959
1960 DEV_LOCK(dev);
1961
1962 if (dev->d_suspended) {
1963 DEV_UNLOCK(dev);
1964 return;
1965 }
1966
1967 for (int i = 0; i < dev->d_num_port; i++) {
1968 port = dev->d_port[i];
1969
1970 if (!port->p_running)
1971 continue;
1972
1973 if (port->p_tx_cnt) {
1974 uint32_t ridx;
1975
1976 /*
1977 * Reclaim first as there is a possibility of losing
1978 * Tx completion interrupts.
1979 */
1980 ridx = port->p_port == YGE_PORT_A ?
1981 STAT_TXA1_RIDX : STAT_TXA2_RIDX;
1982 idx = CSR_READ_2(dev, ridx);
1983 if (port->p_tx_cons != idx) {
1984 resched = yge_txeof_locked(port, idx);
1985
1986 } else {
1987
1988 /* detect TX hang */
1989 port->p_tx_wdog++;
1990 if (port->p_tx_wdog > YGE_TX_TIMEOUT) {
1991 port->p_tx_wdog = 0;
1992 yge_error(NULL, port,
1993 "TX hang detected!");
1994 restart = B_TRUE;
1995 }
1996 }
1997 }
1998 }
1999
2000 DEV_UNLOCK(dev);
2001 if (restart) {
2002 yge_dispatch(dev, YGE_TASK_RESTART);
2003 } else {
2004 if (resched) {
2005 for (int i = 0; i < dev->d_num_port; i++) {
2006 port = dev->d_port[i];
2007
2008 if (port->p_running)
2009 mac_tx_update(port->p_mh);
2010 }
2011 }
2012 }
2013 }
2014
2015 static int
2016 yge_intr_gmac(yge_port_t *port)
2017 {
2018 yge_dev_t *dev = port->p_dev;
2019 int pnum = port->p_port;
2020 uint8_t status;
2021 int dispatch_wrk = 0;
2022
2023 status = CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2024
2025 /* GMAC Rx FIFO overrun. */
2026 if ((status & GM_IS_RX_FF_OR) != 0) {
2027 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2028 yge_error(NULL, port, "Rx FIFO overrun!");
2029 dispatch_wrk |= YGE_TASK_RESTART;
2030 }
2031 /* GMAC Tx FIFO underrun. */
2032 if ((status & GM_IS_TX_FF_UR) != 0) {
2033 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2034 yge_error(NULL, port, "Tx FIFO underrun!");
2035 /*
2036 * In case of Tx underrun, we may need to flush/reset
2037 * Tx MAC but that would also require
2038 * resynchronization with status LEs. Reinitializing
2039 * status LEs would affect the other port in dual MAC
2040 * configuration so it should be avoided if we can.
2041 * Due to lack of documentation it's all vague guess
2042 * but it needs more investigation.
2043 */
2044 }
2045 return (dispatch_wrk);
2046 }
2047
2048 static void
2049 yge_handle_hwerr(yge_port_t *port, uint32_t status)
2050 {
2051 yge_dev_t *dev = port->p_dev;
2052
2053 if ((status & Y2_IS_PAR_RD1) != 0) {
2054 yge_error(NULL, port, "RAM buffer read parity error");
2055 /* Clear IRQ. */
2056 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2057 RI_CLR_RD_PERR);
2058 }
2059 if ((status & Y2_IS_PAR_WR1) != 0) {
2060 yge_error(NULL, port, "RAM buffer write parity error");
2061 /* Clear IRQ. */
2062 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2063 RI_CLR_WR_PERR);
2064 }
2065 if ((status & Y2_IS_PAR_MAC1) != 0) {
2066 yge_error(NULL, port, "Tx MAC parity error");
2067 /* Clear IRQ. */
2068 CSR_WRITE_4(dev, MR_ADDR(port->p_port, TX_GMF_CTRL_T),
2069 GMF_CLI_TX_PE);
2070 }
2071 if ((status & Y2_IS_PAR_RX1) != 0) {
2072 yge_error(NULL, port, "Rx parity error");
2073 /* Clear IRQ. */
2074 CSR_WRITE_4(dev, Q_ADDR(port->p_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
2075 }
2076 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
2077 yge_error(NULL, port, "TCP segmentation error");
2078 /* Clear IRQ. */
2079 CSR_WRITE_4(dev, Q_ADDR(port->p_txq, Q_CSR), BMU_CLR_IRQ_TCP);
2080 }
2081 }
2082
2083 static void
2084 yge_intr_hwerr(yge_dev_t *dev)
2085 {
2086 uint32_t status;
2087 uint32_t tlphead[4];
2088
2089 status = CSR_READ_4(dev, B0_HWE_ISRC);
2090 /* Time Stamp timer overflow. */
2091 if ((status & Y2_IS_TIST_OV) != 0)
2092 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2093 if ((status & Y2_IS_PCI_NEXP) != 0) {
2094 /*
2095 * PCI Express Error occurred which is not described in PEX
2096 * spec.
2097 * This error is also mapped either to Master Abort(
2098 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
2099 * can only be cleared there.
2100 */
2101 yge_error(dev, NULL, "PCI Express protocol violation error");
2102 }
2103
2104 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
2105 uint16_t v16;
2106
2107 if ((status & Y2_IS_IRQ_STAT) != 0)
2108 yge_error(dev, NULL, "Unexpected IRQ Status error");
2109 if ((status & Y2_IS_MST_ERR) != 0)
2110 yge_error(dev, NULL, "Unexpected IRQ Master error");
2111 /* Reset all bits in the PCI status register. */
2112 v16 = pci_config_get16(dev->d_pcih, PCI_CONF_STAT);
2113 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2114 pci_config_put16(dev->d_pcih, PCI_CONF_STAT, v16 |
2115 PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
2116 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
2117 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2118 }
2119
2120 /* Check for PCI Express Uncorrectable Error. */
2121 if ((status & Y2_IS_PCI_EXP) != 0) {
2122 uint32_t v32;
2123
2124 /*
2125 * On PCI Express bus bridges are called root complexes (RC).
2126 * PCI Express errors are recognized by the root complex too,
2127 * which requests the system to handle the problem. After
2128 * error occurrence it may be that no access to the adapter
2129 * may be performed any longer.
2130 */
2131
2132 v32 = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
2133 if ((v32 & PEX_UNSUP_REQ) != 0) {
2134 /* Ignore unsupported request error. */
2135 yge_error(dev, NULL,
2136 "Uncorrectable PCI Express error");
2137 }
2138 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
2139 int i;
2140
2141 /* Get TLP header form Log Registers. */
2142 for (i = 0; i < 4; i++)
2143 tlphead[i] = CSR_PCI_READ_4(dev,
2144 PEX_HEADER_LOG + i * 4);
2145 /* Check for vendor defined broadcast message. */
2146 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
2147 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
2148 CSR_WRITE_4(dev, B0_HWE_IMSK,
2149 dev->d_intrhwemask);
2150 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2151 }
2152 }
2153 /* Clear the interrupt. */
2154 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2155 CSR_PCI_WRITE_4(dev, PEX_UNC_ERR_STAT, 0xffffffff);
2156 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2157 }
2158
2159 if ((status & Y2_HWE_L1_MASK) != 0 && dev->d_port[YGE_PORT_A] != NULL)
2160 yge_handle_hwerr(dev->d_port[YGE_PORT_A], status);
2161 if ((status & Y2_HWE_L2_MASK) != 0 && dev->d_port[YGE_PORT_B] != NULL)
2162 yge_handle_hwerr(dev->d_port[YGE_PORT_B], status >> 8);
2163 }
2164
2165 /*
2166 * Returns B_TRUE if there is potentially more work to do.
2167 */
2168 static boolean_t
2169 yge_handle_events(yge_dev_t *dev, mblk_t **heads, mblk_t **tails, int *txindex)
2170 {
2171 yge_port_t *port;
2172 yge_ring_t *ring;
2173 uint32_t control, status;
2174 int cons, idx, len, pnum;
2175 mblk_t *mp;
2176 uint32_t rxprogs[2];
2177
2178 rxprogs[0] = rxprogs[1] = 0;
2179
2180 idx = CSR_READ_2(dev, STAT_PUT_IDX);
2181 if (idx == dev->d_stat_cons) {
2182 return (B_FALSE);
2183 }
2184
2185 ring = &dev->d_status_ring;
2186
2187 for (cons = dev->d_stat_cons; cons != idx; ) {
2188 /* Sync status LE. */
2189 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORKERNEL);
2190 control = GETCTRL(ring, cons);
2191 if ((control & HW_OWNER) == 0) {
2192 yge_error(dev, NULL, "Status descriptor error: "
2193 "index %d, control %x", cons, control);
2194 break;
2195 }
2196
2197 status = GETSTAT(ring, cons);
2198
2199 control &= ~HW_OWNER;
2200 len = control & STLE_LEN_MASK;
2201 pnum = ((control >> 16) & 0x01);
2202 port = dev->d_port[pnum];
2203 if (port == NULL) {
2204 yge_error(dev, NULL, "Invalid port opcode: 0x%08x",
2205 control & STLE_OP_MASK);
2206 goto finish;
2207 }
2208
2209 switch (control & STLE_OP_MASK) {
2210 case OP_RXSTAT:
2211 mp = yge_rxeof(port, status, len);
2212 if (mp != NULL) {
2213 if (heads[pnum] == NULL)
2214 heads[pnum] = mp;
2215 else
2216 tails[pnum]->b_next = mp;
2217 tails[pnum] = mp;
2218 }
2219
2220 rxprogs[pnum]++;
2221 break;
2222
2223 case OP_TXINDEXLE:
2224 txindex[0] = status & STLE_TXA1_MSKL;
2225 txindex[1] =
2226 ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) |
2227 ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH);
2228 break;
2229 default:
2230 yge_error(dev, NULL, "Unhandled opcode: 0x%08x",
2231 control & STLE_OP_MASK);
2232 break;
2233 }
2234 finish:
2235
2236 /* Give it back to HW. */
2237 PUTCTRL(ring, cons, control);
2238 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
2239
2240 YGE_INC(cons, YGE_STAT_RING_CNT);
2241 if (rxprogs[pnum] > dev->d_process_limit) {
2242 break;
2243 }
2244 }
2245
2246 dev->d_stat_cons = cons;
2247 if (dev->d_stat_cons != CSR_READ_2(dev, STAT_PUT_IDX))
2248 return (B_TRUE);
2249 else
2250 return (B_FALSE);
2251 }
2252
2253 /*ARGSUSED1*/
2254 static uint_t
2255 yge_intr(caddr_t arg1, caddr_t arg2)
2256 {
2257 yge_dev_t *dev;
2258 yge_port_t *port1;
2259 yge_port_t *port2;
2260 uint32_t status;
2261 mblk_t *heads[2], *tails[2];
2262 int txindex[2];
2263 int dispatch_wrk;
2264
2265 dev = (void *)arg1;
2266
2267 heads[0] = heads[1] = NULL;
2268 tails[0] = tails[1] = NULL;
2269 txindex[0] = txindex[1] = -1;
2270 dispatch_wrk = 0;
2271
2272 port1 = dev->d_port[YGE_PORT_A];
2273 port2 = dev->d_port[YGE_PORT_B];
2274
2275 RX_LOCK(dev);
2276
2277 if (dev->d_suspended) {
2278 RX_UNLOCK(dev);
2279 return (DDI_INTR_UNCLAIMED);
2280 }
2281
2282 /* Get interrupt source. */
2283 status = CSR_READ_4(dev, B0_Y2_SP_ISRC2);
2284 if (status == 0 || status == 0xffffffff ||
2285 (status & dev->d_intrmask) == 0) { /* Stray interrupt ? */
2286 /* Reenable interrupts. */
2287 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2288 RX_UNLOCK(dev);
2289 return (DDI_INTR_UNCLAIMED);
2290 }
2291
2292 if ((status & Y2_IS_HW_ERR) != 0) {
2293 yge_intr_hwerr(dev);
2294 }
2295
2296 if (status & Y2_IS_IRQ_MAC1) {
2297 dispatch_wrk |= yge_intr_gmac(port1);
2298 }
2299 if (status & Y2_IS_IRQ_MAC2) {
2300 dispatch_wrk |= yge_intr_gmac(port2);
2301 }
2302
2303 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
2304 yge_error(NULL, status & Y2_IS_CHK_RX1 ? port1 : port2,
2305 "Rx descriptor error");
2306 dev->d_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
2307 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2308 (void) CSR_READ_4(dev, B0_IMSK);
2309 }
2310 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
2311 yge_error(NULL, status & Y2_IS_CHK_TXA1 ? port1 : port2,
2312 "Tx descriptor error");
2313 dev->d_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
2314 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2315 (void) CSR_READ_4(dev, B0_IMSK);
2316 }
2317
2318 /* handle events until it returns false */
2319 while (yge_handle_events(dev, heads, tails, txindex))
2320 /* NOP */;
2321
2322 /* Do receive/transmit events */
2323 if ((status & Y2_IS_STAT_BMU)) {
2324 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_CLR_IRQ);
2325 }
2326
2327 /* Reenable interrupts. */
2328 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2329
2330 RX_UNLOCK(dev);
2331
2332 if (dispatch_wrk) {
2333 yge_dispatch(dev, dispatch_wrk);
2334 }
2335
2336 if (port1->p_running) {
2337 if (txindex[0] >= 0) {
2338 yge_txeof(port1, txindex[0]);
2339 }
2340 if (heads[0])
2341 mac_rx(port1->p_mh, NULL, heads[0]);
2342 } else {
2343 if (heads[0]) {
2344 mblk_t *mp;
2345 while ((mp = heads[0]) != NULL) {
2346 heads[0] = mp->b_next;
2347 freemsg(mp);
2348 }
2349 }
2350 }
2351
2352 if (port2->p_running) {
2353 if (txindex[1] >= 0) {
2354 yge_txeof(port2, txindex[1]);
2355 }
2356 if (heads[1])
2357 mac_rx(port2->p_mh, NULL, heads[1]);
2358 } else {
2359 if (heads[1]) {
2360 mblk_t *mp;
2361 while ((mp = heads[1]) != NULL) {
2362 heads[1] = mp->b_next;
2363 freemsg(mp);
2364 }
2365 }
2366 }
2367
2368 return (DDI_INTR_CLAIMED);
2369 }
2370
2371 static void
2372 yge_set_tx_stfwd(yge_port_t *port)
2373 {
2374 yge_dev_t *dev = port->p_dev;
2375 int pnum = port->p_port;
2376
2377 switch (dev->d_hw_id) {
2378 case CHIP_ID_YUKON_EX:
2379 if (dev->d_hw_rev == CHIP_REV_YU_EX_A0)
2380 goto yukon_ex_workaround;
2381
2382 if (port->p_mtu > ETHERMTU)
2383 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2384 TX_JUMBO_ENA | TX_STFW_ENA);
2385 else
2386 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2387 TX_JUMBO_DIS | TX_STFW_ENA);
2388 break;
2389 default:
2390 yukon_ex_workaround:
2391 if (port->p_mtu > ETHERMTU) {
2392 /* Set Tx GMAC FIFO Almost Empty Threshold. */
2393 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_AE_THR),
2394 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
2395 /* Disable Store & Forward mode for Tx. */
2396 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2397 TX_JUMBO_ENA | TX_STFW_DIS);
2398 } else {
2399 /* Enable Store & Forward mode for Tx. */
2400 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2401 TX_JUMBO_DIS | TX_STFW_ENA);
2402 }
2403 break;
2404 }
2405 }
2406
2407 static void
2408 yge_start_port(yge_port_t *port)
2409 {
2410 yge_dev_t *dev = port->p_dev;
2411 uint16_t gmac;
2412 int32_t pnum;
2413 int32_t rxq;
2414 int32_t txq;
2415 uint32_t reg;
2416
2417 pnum = port->p_port;
2418 txq = port->p_txq;
2419 rxq = port->p_rxq;
2420
2421 if (port->p_mtu < ETHERMTU)
2422 port->p_framesize = ETHERMTU;
2423 else
2424 port->p_framesize = port->p_mtu;
2425 port->p_framesize += sizeof (struct ether_vlan_header);
2426
2427 /*
2428 * Note for the future, if we enable offloads:
2429 * In Yukon EC Ultra, TSO & checksum offload is not
2430 * supported for jumbo frame.
2431 */
2432
2433 /* GMAC Control reset */
2434 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_SET);
2435 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_CLR);
2436 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_F_LOOPB_OFF);
2437 if (dev->d_hw_id == CHIP_ID_YUKON_EX)
2438 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL),
2439 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
2440 GMC_BYP_RETR_ON);
2441 /*
2442 * Initialize GMAC first such that speed/duplex/flow-control
2443 * parameters are renegotiated with the interface is brought up.
2444 */
2445 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, 0);
2446
2447 /* Dummy read the Interrupt Source Register. */
2448 (void) CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2449
2450 /* Clear MIB stats. */
2451 yge_stats_clear(port);
2452
2453 /* Disable FCS. */
2454 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, GM_RXCR_CRC_DIS);
2455
2456 /* Setup Transmit Control Register. */
2457 GMAC_WRITE_2(dev, pnum, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
2458
2459 /* Setup Transmit Flow Control Register. */
2460 GMAC_WRITE_2(dev, pnum, GM_TX_FLOW_CTRL, 0xffff);
2461
2462 /* Setup Transmit Parameter Register. */
2463 GMAC_WRITE_2(dev, pnum, GM_TX_PARAM,
2464 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
2465 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
2466
2467 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2468 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2469
2470 if (port->p_mtu > ETHERMTU)
2471 gmac |= GM_SMOD_JUMBO_ENA;
2472 GMAC_WRITE_2(dev, pnum, GM_SERIAL_MODE, gmac);
2473
2474 /* Disable interrupts for counter overflows. */
2475 GMAC_WRITE_2(dev, pnum, GM_TX_IRQ_MSK, 0);
2476 GMAC_WRITE_2(dev, pnum, GM_RX_IRQ_MSK, 0);
2477 GMAC_WRITE_2(dev, pnum, GM_TR_IRQ_MSK, 0);
2478
2479 /* Configure Rx MAC FIFO. */
2480 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2481 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_CLR);
2482 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2483 if (dev->d_hw_id == CHIP_ID_YUKON_FE_P ||
2484 dev->d_hw_id == CHIP_ID_YUKON_EX)
2485 reg |= GMF_RX_OVER_ON;
2486 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), reg);
2487
2488 /* Set receive filter. */
2489 yge_setrxfilt(port);
2490
2491 /* Flush Rx MAC FIFO on any flow control or error. */
2492 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
2493
2494 /*
2495 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
2496 * due to hardware hang on receipt of pause frames.
2497 */
2498 reg = RX_GMF_FL_THR_DEF + 1;
2499 /* FE+ magic */
2500 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2501 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0))
2502 reg = 0x178;
2503
2504 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_THR), reg);
2505
2506 /* Configure Tx MAC FIFO. */
2507 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2508 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_CLR);
2509 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_OPER_ON);
2510
2511 /* Disable hardware VLAN tag insertion/stripping. */
2512 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
2513 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
2514
2515 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) {
2516 /* Set Rx Pause threshold. */
2517 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2518 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2519 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2520 MSK_ECU_LLPP);
2521 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2522 MSK_FEP_ULPP);
2523 } else {
2524 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2525 MSK_ECU_LLPP);
2526 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2527 MSK_ECU_ULPP);
2528 }
2529 /* Configure store-and-forward for TX */
2530 yge_set_tx_stfwd(port);
2531 }
2532
2533 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2534 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2535 /* Disable dynamic watermark */
2536 reg = CSR_READ_4(dev, MR_ADDR(pnum, TX_GMF_EA));
2537 reg &= ~TX_DYN_WM_ENA;
2538 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_EA), reg);
2539 }
2540
2541 /*
2542 * Disable Force Sync bit and Alloc bit in Tx RAM interface
2543 * arbiter as we don't use Sync Tx queue.
2544 */
2545 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL),
2546 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2547 /* Enable the RAM Interface Arbiter. */
2548 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_ENA_ARB);
2549
2550 /* Setup RAM buffer. */
2551 yge_set_rambuffer(port);
2552
2553 /* Disable Tx sync Queue. */
2554 CSR_WRITE_1(dev, RB_ADDR(port->p_txsq, RB_CTRL), RB_RST_SET);
2555
2556 /* Setup Tx Queue Bus Memory Interface. */
2557 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_CLR_RESET);
2558 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_OPER_INIT);
2559 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_FIFO_OP_ON);
2560 CSR_WRITE_2(dev, Q_ADDR(txq, Q_WM), MSK_BMU_TX_WM);
2561
2562 switch (dev->d_hw_id) {
2563 case CHIP_ID_YUKON_EC_U:
2564 if (dev->d_hw_rev == CHIP_REV_YU_EC_U_A0) {
2565 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
2566 CSR_WRITE_2(dev, Q_ADDR(txq, Q_AL), MSK_ECU_TXFF_LEV);
2567 }
2568 break;
2569 case CHIP_ID_YUKON_EX:
2570 /*
2571 * Yukon Extreme seems to have silicon bug for
2572 * automatic Tx checksum calculation capability.
2573 */
2574 if (dev->d_hw_rev == CHIP_REV_YU_EX_B0)
2575 CSR_WRITE_4(dev, Q_ADDR(txq, Q_F), F_TX_CHK_AUTO_OFF);
2576 break;
2577 }
2578
2579 /* Setup Rx Queue Bus Memory Interface. */
2580 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET);
2581 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT);
2582 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON);
2583 if (dev->d_bustype == PEX_BUS) {
2584 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80);
2585 } else {
2586 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM);
2587 }
2588 if (dev->d_hw_id == CHIP_ID_YUKON_EC_U &&
2589 dev->d_hw_rev >= CHIP_REV_YU_EC_U_A1) {
2590 /* MAC Rx RAM Read is controlled by hardware. */
2591 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
2592 }
2593
2594 yge_init_tx_ring(port);
2595
2596 /* Disable Rx checksum offload and RSS hash. */
2597 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR),
2598 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
2599
2600 yge_init_rx_ring(port);
2601
2602 /* Configure interrupt handling. */
2603 if (port == dev->d_port[YGE_PORT_A]) {
2604 dev->d_intrmask |= Y2_IS_PORT_A;
2605 dev->d_intrhwemask |= Y2_HWE_L1_MASK;
2606 } else if (port == dev->d_port[YGE_PORT_B]) {
2607 dev->d_intrmask |= Y2_IS_PORT_B;
2608 dev->d_intrhwemask |= Y2_HWE_L2_MASK;
2609 }
2610 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2611 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2612 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2613 (void) CSR_READ_4(dev, B0_IMSK);
2614
2615 /* Enable RX/TX GMAC */
2616 gmac = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2617 gmac |= (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2618 GMAC_WRITE_2(port->p_dev, port->p_port, GM_GP_CTRL, gmac);
2619 /* Read again to ensure writing. */
2620 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2621
2622 /* Reset TX timer */
2623 port->p_tx_wdog = 0;
2624 }
2625
2626 static void
2627 yge_set_rambuffer(yge_port_t *port)
2628 {
2629 yge_dev_t *dev;
2630 int ltpp, utpp;
2631 int pnum;
2632 uint32_t rxq;
2633 uint32_t txq;
2634
2635 dev = port->p_dev;
2636 pnum = port->p_port;
2637 rxq = port->p_rxq;
2638 txq = port->p_txq;
2639
2640 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0)
2641 return;
2642
2643 /* Setup Rx Queue. */
2644 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_CLR);
2645 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_START), dev->d_rxqstart[pnum] / 8);
2646 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_END), dev->d_rxqend[pnum] / 8);
2647 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_WP), dev->d_rxqstart[pnum] / 8);
2648 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RP), dev->d_rxqstart[pnum] / 8);
2649
2650 utpp =
2651 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_ULPP) / 8;
2652 ltpp =
2653 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_LLPP_B) / 8;
2654
2655 if (dev->d_rxqsize < MSK_MIN_RXQ_SIZE)
2656 ltpp += (RB_LLPP_B - RB_LLPP_S) / 8;
2657
2658 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_UTPP), utpp);
2659 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_LTPP), ltpp);
2660 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
2661
2662 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_ENA_OP_MD);
2663 (void) CSR_READ_1(dev, RB_ADDR(rxq, RB_CTRL));
2664
2665 /* Setup Tx Queue. */
2666 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_CLR);
2667 CSR_WRITE_4(dev, RB_ADDR(txq, RB_START), dev->d_txqstart[pnum] / 8);
2668 CSR_WRITE_4(dev, RB_ADDR(txq, RB_END), dev->d_txqend[pnum] / 8);
2669 CSR_WRITE_4(dev, RB_ADDR(txq, RB_WP), dev->d_txqstart[pnum] / 8);
2670 CSR_WRITE_4(dev, RB_ADDR(txq, RB_RP), dev->d_txqstart[pnum] / 8);
2671 /* Enable Store & Forward for Tx side. */
2672 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_STFWD);
2673 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_OP_MD);
2674 (void) CSR_READ_1(dev, RB_ADDR(txq, RB_CTRL));
2675 }
2676
2677 static void
2678 yge_set_prefetch(yge_dev_t *dev, int qaddr, yge_ring_t *ring)
2679 {
2680 /* Reset the prefetch unit. */
2681 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2682 PREF_UNIT_RST_SET);
2683 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2684 PREF_UNIT_RST_CLR);
2685 /* Set LE base address. */
2686 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
2687 YGE_ADDR_LO(ring->r_paddr));
2688 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
2689 YGE_ADDR_HI(ring->r_paddr));
2690 /* Set the list last index. */
2691 CSR_WRITE_2(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
2692 ring->r_num - 1);
2693 /* Turn on prefetch unit. */
2694 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2695 PREF_UNIT_OP_ON);
2696 /* Dummy read to ensure write. */
2697 (void) CSR_READ_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
2698 }
2699
2700 static void
2701 yge_stop_port(yge_port_t *port)
2702 {
2703 yge_dev_t *dev = port->p_dev;
2704 int pnum = port->p_port;
2705 uint32_t txq = port->p_txq;
2706 uint32_t rxq = port->p_rxq;
2707 uint32_t val;
2708 int i;
2709
2710 dev = port->p_dev;
2711
2712 /*
2713 * shutdown timeout
2714 */
2715 port->p_tx_wdog = 0;
2716
2717 /* Disable interrupts. */
2718 if (pnum == YGE_PORT_A) {
2719 dev->d_intrmask &= ~Y2_IS_PORT_A;
2720 dev->d_intrhwemask &= ~Y2_HWE_L1_MASK;
2721 } else {
2722 dev->d_intrmask &= ~Y2_IS_PORT_B;
2723 dev->d_intrhwemask &= ~Y2_HWE_L2_MASK;
2724 }
2725 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2726 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2727 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2728 (void) CSR_READ_4(dev, B0_IMSK);
2729
2730 /* Disable Tx/Rx MAC. */
2731 val = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2732 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2733 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, val);
2734 /* Read again to ensure writing. */
2735 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2736
2737 /* Update stats and clear counters. */
2738 yge_stats_update(port);
2739
2740 /* Stop Tx BMU. */
2741 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2742 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2743 for (i = 0; i < YGE_TIMEOUT; i += 10) {
2744 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
2745 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2746 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2747 } else
2748 break;
2749 drv_usecwait(10);
2750 }
2751 /* This is probably fairly catastrophic. */
2752 if ((val & (BMU_STOP | BMU_IDLE)) == 0)
2753 yge_error(NULL, port, "Tx BMU stop failed");
2754
2755 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD);
2756
2757 /* Disable all GMAC interrupt. */
2758 CSR_WRITE_1(dev, MR_ADDR(pnum, GMAC_IRQ_MSK), 0);
2759
2760 /* Disable the RAM Interface Arbiter. */
2761 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_DIS_ARB);
2762
2763 /* Reset the PCI FIFO of the async Tx queue */
2764 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2765
2766 /* Reset the Tx prefetch units. */
2767 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(txq, PREF_UNIT_CTRL_REG),
2768 PREF_UNIT_RST_SET);
2769
2770 /* Reset the RAM Buffer async Tx queue. */
2771 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET);
2772
2773 /* Reset Tx MAC FIFO. */
2774 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2775 /* Set Pause Off. */
2776 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_PAUSE_OFF);
2777
2778 /*
2779 * The Rx Stop command will not work for Yukon-2 if the BMU does not
2780 * reach the end of packet and since we can't make sure that we have
2781 * incoming data, we must reset the BMU while it is not during a DMA
2782 * transfer. Since it is possible that the Rx path is still active,
2783 * the Rx RAM buffer will be stopped first, so any possible incoming
2784 * data will not trigger a DMA. After the RAM buffer is stopped, the
2785 * BMU is polled until any DMA in progress is ended and only then it
2786 * will be reset.
2787 */
2788
2789 /* Disable the RAM Buffer receive queue. */
2790 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
2791 for (i = 0; i < YGE_TIMEOUT; i += 10) {
2792 if (CSR_READ_1(dev, RB_ADDR(rxq, Q_RSL)) ==
2793 CSR_READ_1(dev, RB_ADDR(rxq, Q_RL)))
2794 break;
2795 drv_usecwait(10);
2796 }
2797 /* This is probably nearly a fatal error. */
2798 if (i == YGE_TIMEOUT)
2799 yge_error(NULL, port, "Rx BMU stop failed");
2800
2801 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2802 /* Reset the Rx prefetch unit. */
2803 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(rxq, PREF_UNIT_CTRL_REG),
2804 PREF_UNIT_RST_SET);
2805 /* Reset the RAM Buffer receive queue. */
2806 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
2807 /* Reset Rx MAC FIFO. */
2808 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2809 }
2810
2811 /*
2812 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
2813 * counter clears high 16 bits of the counter such that accessing
2814 * lower 16 bits should be the last operation.
2815 */
2816 #define YGE_READ_MIB32(x, y) \
2817 GMAC_READ_4(dev, x, y)
2818
2819 #define YGE_READ_MIB64(x, y) \
2820 ((((uint64_t)YGE_READ_MIB32(x, (y) + 8)) << 32) + \
2821 (uint64_t)YGE_READ_MIB32(x, y))
2822
2823 static void
2824 yge_stats_clear(yge_port_t *port)
2825 {
2826 yge_dev_t *dev;
2827 uint16_t gmac;
2828 int32_t pnum;
2829
2830 pnum = port->p_port;
2831 dev = port->p_dev;
2832
2833 /* Set MIB Clear Counter Mode. */
2834 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2835 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2836 /* Read all MIB Counters with Clear Mode set. */
2837 for (int i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += 4)
2838 (void) YGE_READ_MIB32(pnum, i);
2839 /* Clear MIB Clear Counter Mode. */
2840 gmac &= ~GM_PAR_MIB_CLR;
2841 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2842 }
2843
2844 static void
2845 yge_stats_update(yge_port_t *port)
2846 {
2847 yge_dev_t *dev;
2848 struct yge_hw_stats *stats;
2849 uint16_t gmac;
2850 int32_t pnum;
2851
2852 dev = port->p_dev;
2853 pnum = port->p_port;
2854
2855 if (dev->d_suspended || !port->p_running) {
2856 return;
2857 }
2858 stats = &port->p_stats;
2859 /* Set MIB Clear Counter Mode. */
2860 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2861 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2862
2863 /* Rx stats. */
2864 stats->rx_ucast_frames += YGE_READ_MIB32(pnum, GM_RXF_UC_OK);
2865 stats->rx_bcast_frames += YGE_READ_MIB32(pnum, GM_RXF_BC_OK);
2866 stats->rx_pause_frames += YGE_READ_MIB32(pnum, GM_RXF_MPAUSE);
2867 stats->rx_mcast_frames += YGE_READ_MIB32(pnum, GM_RXF_MC_OK);
2868 stats->rx_crc_errs += YGE_READ_MIB32(pnum, GM_RXF_FCS_ERR);
2869 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE1);
2870 stats->rx_good_octets += YGE_READ_MIB64(pnum, GM_RXO_OK_LO);
2871 stats->rx_bad_octets += YGE_READ_MIB64(pnum, GM_RXO_ERR_LO);
2872 stats->rx_runts += YGE_READ_MIB32(pnum, GM_RXF_SHT);
2873 stats->rx_runt_errs += YGE_READ_MIB32(pnum, GM_RXE_FRAG);
2874 stats->rx_pkts_64 += YGE_READ_MIB32(pnum, GM_RXF_64B);
2875 stats->rx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_RXF_127B);
2876 stats->rx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_RXF_255B);
2877 stats->rx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_RXF_511B);
2878 stats->rx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_RXF_1023B);
2879 stats->rx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_RXF_1518B);
2880 stats->rx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_RXF_MAX_SZ);
2881 stats->rx_pkts_too_long += YGE_READ_MIB32(pnum, GM_RXF_LNG_ERR);
2882 stats->rx_pkts_jabbers += YGE_READ_MIB32(pnum, GM_RXF_JAB_PKT);
2883 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE2);
2884 stats->rx_fifo_oflows += YGE_READ_MIB32(pnum, GM_RXE_FIFO_OV);
2885 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE3);
2886
2887 /* Tx stats. */
2888 stats->tx_ucast_frames += YGE_READ_MIB32(pnum, GM_TXF_UC_OK);
2889 stats->tx_bcast_frames += YGE_READ_MIB32(pnum, GM_TXF_BC_OK);
2890 stats->tx_pause_frames += YGE_READ_MIB32(pnum, GM_TXF_MPAUSE);
2891 stats->tx_mcast_frames += YGE_READ_MIB32(pnum, GM_TXF_MC_OK);
2892 stats->tx_octets += YGE_READ_MIB64(pnum, GM_TXO_OK_LO);
2893 stats->tx_pkts_64 += YGE_READ_MIB32(pnum, GM_TXF_64B);
2894 stats->tx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_TXF_127B);
2895 stats->tx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_TXF_255B);
2896 stats->tx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_TXF_511B);
2897 stats->tx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_TXF_1023B);
2898 stats->tx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_TXF_1518B);
2899 stats->tx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_TXF_MAX_SZ);
2900 (void) YGE_READ_MIB32(pnum, GM_TXF_SPARE1);
2901 stats->tx_colls += YGE_READ_MIB32(pnum, GM_TXF_COL);
2902 stats->tx_late_colls += YGE_READ_MIB32(pnum, GM_TXF_LAT_COL);
2903 stats->tx_excess_colls += YGE_READ_MIB32(pnum, GM_TXF_ABO_COL);
2904 stats->tx_multi_colls += YGE_READ_MIB32(pnum, GM_TXF_MUL_COL);
2905 stats->tx_single_colls += YGE_READ_MIB32(pnum, GM_TXF_SNG_COL);
2906 stats->tx_underflows += YGE_READ_MIB32(pnum, GM_TXE_FIFO_UR);
2907 /* Clear MIB Clear Counter Mode. */
2908 gmac &= ~GM_PAR_MIB_CLR;
2909 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2910 }
2911
2912 #undef YGE_READ_MIB32
2913 #undef YGE_READ_MIB64
2914
2915 uint32_t
2916 yge_hashbit(const uint8_t *addr)
2917 {
2918 int idx;
2919 int bit;
2920 uint_t data;
2921 uint32_t crc;
2922 #define POLY_BE 0x04c11db7
2923
2924 crc = 0xffffffff;
2925 for (idx = 0; idx < 6; idx++) {
2926 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
2927 crc = (crc << 1)
2928 ^ ((((crc >> 31) ^ data) & 1) ? POLY_BE : 0);
2929 }
2930 }
2931 #undef POLY_BE
2932
2933 return (crc % 64);
2934 }
2935
2936 int
2937 yge_m_stat(void *arg, uint_t stat, uint64_t *val)
2938 {
2939 yge_port_t *port = arg;
2940 struct yge_hw_stats *stats = &port->p_stats;
2941
2942 if (stat == MAC_STAT_IFSPEED) {
2943 /*
2944 * This is the first stat we are asked about. We update only
2945 * for this stat, to avoid paying the hefty cost of the update
2946 * once for each stat.
2947 */
2948 DEV_LOCK(port->p_dev);
2949 yge_stats_update(port);
2950 DEV_UNLOCK(port->p_dev);
2951 }
2952
2953 if (mii_m_getstat(port->p_mii, stat, val) == 0) {
2954 return (0);
2955 }
2956
2957 switch (stat) {
2958 case MAC_STAT_MULTIRCV:
2959 *val = stats->rx_mcast_frames;
2960 break;
2961
2962 case MAC_STAT_BRDCSTRCV:
2963 *val = stats->rx_bcast_frames;
2964 break;
2965
2966 case MAC_STAT_MULTIXMT:
2967 *val = stats->tx_mcast_frames;
2968 break;
2969
2970 case MAC_STAT_BRDCSTXMT:
2971 *val = stats->tx_bcast_frames;
2972 break;
2973
2974 case MAC_STAT_IPACKETS:
2975 *val = stats->rx_ucast_frames;
2976 break;
2977
2978 case MAC_STAT_RBYTES:
2979 *val = stats->rx_good_octets;
2980 break;
2981
2982 case MAC_STAT_OPACKETS:
2983 *val = stats->tx_ucast_frames;
2984 break;
2985
2986 case MAC_STAT_OBYTES:
2987 *val = stats->tx_octets;
2988 break;
2989
2990 case MAC_STAT_NORCVBUF:
2991 *val = stats->rx_nobuf;
2992 break;
2993
2994 case MAC_STAT_COLLISIONS:
2995 *val = stats->tx_colls;
2996 break;
2997
2998 case ETHER_STAT_ALIGN_ERRORS:
2999 *val = stats->rx_runt_errs;
3000 break;
3001
3002 case ETHER_STAT_FCS_ERRORS:
3003 *val = stats->rx_crc_errs;
3004 break;
3005
3006 case ETHER_STAT_FIRST_COLLISIONS:
3007 *val = stats->tx_single_colls;
3008 break;
3009
3010 case ETHER_STAT_MULTI_COLLISIONS:
3011 *val = stats->tx_multi_colls;
3012 break;
3013
3014 case ETHER_STAT_TX_LATE_COLLISIONS:
3015 *val = stats->tx_late_colls;
3016 break;
3017
3018 case ETHER_STAT_EX_COLLISIONS:
3019 *val = stats->tx_excess_colls;
3020 break;
3021
3022 case ETHER_STAT_TOOLONG_ERRORS:
3023 *val = stats->rx_pkts_too_long;
3024 break;
3025
3026 case MAC_STAT_OVERFLOWS:
3027 *val = stats->rx_fifo_oflows;
3028 break;
3029
3030 case MAC_STAT_UNDERFLOWS:
3031 *val = stats->tx_underflows;
3032 break;
3033
3034 case ETHER_STAT_TOOSHORT_ERRORS:
3035 *val = stats->rx_runts;
3036 break;
3037
3038 case ETHER_STAT_JABBER_ERRORS:
3039 *val = stats->rx_pkts_jabbers;
3040 break;
3041
3042 default:
3043 return (ENOTSUP);
3044 }
3045 return (0);
3046 }
3047
3048 int
3049 yge_m_start(void *arg)
3050 {
3051 yge_port_t *port = arg;
3052
3053 DEV_LOCK(port->p_dev);
3054
3055 /*
3056 * We defer resource allocation to this point, because we
3057 * don't want to waste DMA resources that might better be used
3058 * elsewhere, if the port is not actually being used.
3059 *
3060 * Furthermore, this gives us a more graceful handling of dynamic
3061 * MTU modification.
3062 */
3063 if (yge_txrx_dma_alloc(port) != DDI_SUCCESS) {
3064 /* Make sure we free up partially allocated resources. */
3065 yge_txrx_dma_free(port);
3066 DEV_UNLOCK(port->p_dev);
3067 return (ENOMEM);
3068 }
3069
3070 if (!port->p_dev->d_suspended)
3071 yge_start_port(port);
3072 port->p_running = B_TRUE;
3073 DEV_UNLOCK(port->p_dev);
3074
3075 mii_start(port->p_mii);
3076
3077 return (0);
3078 }
3079
3080 void
3081 yge_m_stop(void *arg)
3082 {
3083 yge_port_t *port = arg;
3084 yge_dev_t *dev = port->p_dev;
3085
3086 DEV_LOCK(dev);
3087 if (!dev->d_suspended)
3088 yge_stop_port(port);
3089
3090 port->p_running = B_FALSE;
3091
3092 /* Release resources we don't need */
3093 yge_txrx_dma_free(port);
3094 DEV_UNLOCK(dev);
3095 }
3096
3097 int
3098 yge_m_promisc(void *arg, boolean_t on)
3099 {
3100 yge_port_t *port = arg;
3101
3102 DEV_LOCK(port->p_dev);
3103
3104 /* Save current promiscuous mode. */
3105 port->p_promisc = on;
3106 yge_setrxfilt(port);
3107
3108 DEV_UNLOCK(port->p_dev);
3109
3110 return (0);
3111 }
3112
3113 int
3114 yge_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
3115 {
3116 yge_port_t *port = arg;
3117 int bit;
3118 boolean_t update;
3119
3120 bit = yge_hashbit(addr);
3121 ASSERT(bit < 64);
3122
3123 DEV_LOCK(port->p_dev);
3124 if (add) {
3125 if (port->p_mccount[bit] == 0) {
3126 /* Set the corresponding bit in the hash table. */
3127 port->p_mchash[bit / 32] |= (1 << (bit % 32));
3128 update = B_TRUE;
3129 }
3130 port->p_mccount[bit]++;
3131 } else {
3132 ASSERT(port->p_mccount[bit] > 0);
3133 port->p_mccount[bit]--;
3134 if (port->p_mccount[bit] == 0) {
3135 port->p_mchash[bit / 32] &= ~(1 << (bit % 32));
3136 update = B_TRUE;
3137 }
3138 }
3139
3140 if (update) {
3141 yge_setrxfilt(port);
3142 }
3143 DEV_UNLOCK(port->p_dev);
3144 return (0);
3145 }
3146
3147 int
3148 yge_m_unicst(void *arg, const uint8_t *macaddr)
3149 {
3150 yge_port_t *port = arg;
3151
3152 DEV_LOCK(port->p_dev);
3153
3154 bcopy(macaddr, port->p_curraddr, ETHERADDRL);
3155 yge_setrxfilt(port);
3156
3157 DEV_UNLOCK(port->p_dev);
3158
3159 return (0);
3160 }
3161
3162 mblk_t *
3163 yge_m_tx(void *arg, mblk_t *mp)
3164 {
3165 yge_port_t *port = arg;
3166 mblk_t *nmp;
3167 int enq = 0;
3168 uint32_t ridx;
3169 int idx;
3170 boolean_t resched = B_FALSE;
3171
3172 TX_LOCK(port->p_dev);
3173
3174 if (port->p_dev->d_suspended) {
3175
3176 TX_UNLOCK(port->p_dev);
3177
3178 while ((nmp = mp) != NULL) {
3179 /* carrier_errors++; */
3180 mp = mp->b_next;
3181 freemsg(nmp);
3182 }
3183 return (NULL);
3184 }
3185
3186 /* attempt a reclaim */
3187 ridx = port->p_port == YGE_PORT_A ?
3188 STAT_TXA1_RIDX : STAT_TXA2_RIDX;
3189 idx = CSR_READ_2(port->p_dev, ridx);
3190 if (port->p_tx_cons != idx)
3191 resched = yge_txeof_locked(port, idx);
3192
3193 while (mp != NULL) {
3194 nmp = mp->b_next;
3195 mp->b_next = NULL;
3196
3197 if (!yge_send(port, mp)) {
3198 mp->b_next = nmp;
3199 break;
3200 }
3201 enq++;
3202 mp = nmp;
3203
3204 }
3205 if (enq > 0) {
3206 /* Transmit */
3207 CSR_WRITE_2(port->p_dev,
3208 Y2_PREF_Q_ADDR(port->p_txq, PREF_UNIT_PUT_IDX_REG),
3209 port->p_tx_prod);
3210 }
3211
3212 TX_UNLOCK(port->p_dev);
3213
3214 if (resched)
3215 mac_tx_update(port->p_mh);
3216
3217 return (mp);
3218 }
3219
3220 void
3221 yge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3222 {
3223 #ifdef YGE_MII_LOOPBACK
3224 /* LINTED E_FUNC_SET_NOT_USED */
3225 yge_port_t *port = arg;
3226
3227 /*
3228 * Right now, the MII common layer does not properly handle
3229 * loopback on these PHYs. Fixing this should be done at some
3230 * point in the future.
3231 */
3232 if (mii_m_loop_ioctl(port->p_mii, wq, mp))
3233 return;
3234 #else
3235 _NOTE(ARGUNUSED(arg));
3236 #endif
3237
3238 miocnak(wq, mp, 0, EINVAL);
3239 }
3240
3241 int
3242 yge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3243 uint_t pr_valsize, const void *pr_val)
3244 {
3245 yge_port_t *port = arg;
3246 uint32_t new_mtu;
3247 int err = 0;
3248
3249 err = mii_m_setprop(port->p_mii, pr_name, pr_num, pr_valsize, pr_val);
3250 if (err != ENOTSUP) {
3251 return (err);
3252 }
3253
3254 DEV_LOCK(port->p_dev);
3255
3256 switch (pr_num) {
3257 case MAC_PROP_MTU:
3258 if (pr_valsize < sizeof (new_mtu)) {
3259 err = EINVAL;
3260 break;
3261 }
3262 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3263 if (new_mtu == port->p_mtu) {
3264 /* no change */
3265 err = 0;
3266 break;
3267 }
3268 if (new_mtu < ETHERMTU) {
3269 yge_error(NULL, port,
3270 "Maximum MTU size too small: %d", new_mtu);
3271 err = EINVAL;
3272 break;
3273 }
3274 if (new_mtu > (port->p_flags & PORT_FLAG_NOJUMBO ?
3275 ETHERMTU : YGE_JUMBO_MTU)) {
3276 yge_error(NULL, port,
3277 "Maximum MTU size too big: %d", new_mtu);
3278 err = EINVAL;
3279 break;
3280 }
3281 if (port->p_running) {
3282 yge_error(NULL, port,
3283 "Unable to change maximum MTU while running");
3284 err = EBUSY;
3285 break;
3286 }
3287
3288
3289 /*
3290 * NB: It would probably be better not to hold the
3291 * DEVLOCK, but releasing it creates a potential race
3292 * if m_start is called concurrently.
3293 *
3294 * It turns out that the MAC layer guarantees safety
3295 * for us here by using a cut out for this kind of
3296 * notification call back anyway.
3297 *
3298 * See R8. and R14. in mac.c locking comments, which read
3299 * as follows:
3300 *
3301 * R8. Since it is not guaranteed (see R14) that
3302 * drivers won't hold locks across mac driver
3303 * interfaces, the MAC layer must provide a cut out
3304 * for control interfaces like upcall notifications
3305 * and start them in a separate thread.
3306 *
3307 * R14. It would be preferable if MAC drivers don't
3308 * hold any locks across any mac call. However at a
3309 * minimum they must not hold any locks across data
3310 * upcalls. They must also make sure that all
3311 * references to mac data structures are cleaned up
3312 * and that it is single threaded at mac_unregister
3313 * time.
3314 */
3315 err = mac_maxsdu_update(port->p_mh, new_mtu);
3316 if (err != 0) {
3317 /* This should never occur! */
3318 yge_error(NULL, port,
3319 "Failed notifying GLDv3 of new maximum MTU");
3320 } else {
3321 port->p_mtu = new_mtu;
3322 }
3323 break;
3324
3325 default:
3326 err = ENOTSUP;
3327 break;
3328 }
3329
3330 err:
3331 DEV_UNLOCK(port->p_dev);
3332
3333 return (err);
3334 }
3335
3336 int
3337 yge_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3338 uint_t pr_valsize, void *pr_val)
3339 {
3340 yge_port_t *port = arg;
3341
3342 return (mii_m_getprop(port->p_mii, pr_name, pr_num, pr_valsize,
3343 pr_val));
3344 }
3345
3346 static void
3347 yge_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3348 mac_prop_info_handle_t prh)
3349 {
3350 yge_port_t *port = arg;
3351
3352 switch (pr_num) {
3353 case MAC_PROP_MTU:
3354 mac_prop_info_set_range_uint32(prh, ETHERMTU,
3355 port->p_flags & PORT_FLAG_NOJUMBO ?
3356 ETHERMTU : YGE_JUMBO_MTU);
3357 break;
3358 default:
3359 mii_m_propinfo(port->p_mii, pr_name, pr_num, prh);
3360 break;
3361 }
3362 }
3363
3364 void
3365 yge_dispatch(yge_dev_t *dev, int flag)
3366 {
3367 TASK_LOCK(dev);
3368 dev->d_task_flags |= flag;
3369 TASK_SIGNAL(dev);
3370 TASK_UNLOCK(dev);
3371 }
3372
3373 void
3374 yge_task(void *arg)
3375 {
3376 yge_dev_t *dev = arg;
3377 int flags;
3378
3379 for (;;) {
3380
3381 TASK_LOCK(dev);
3382 while ((flags = dev->d_task_flags) == 0)
3383 TASK_WAIT(dev);
3384
3385 dev->d_task_flags = 0;
3386 TASK_UNLOCK(dev);
3387
3388 /*
3389 * This should be the first thing after the sleep so if we are
3390 * requested to exit we do that and not waste time doing work
3391 * we will then abandone.
3392 */
3393 if (flags & YGE_TASK_EXIT)
3394 break;
3395
3396 /* all processing done without holding locks */
3397 if (flags & YGE_TASK_RESTART)
3398 yge_restart_task(dev);
3399 }
3400 }
3401
3402 void
3403 yge_error(yge_dev_t *dev, yge_port_t *port, char *fmt, ...)
3404 {
3405 va_list ap;
3406 char buf[256];
3407 int ppa;
3408
3409 va_start(ap, fmt);
3410 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
3411 va_end(ap);
3412
3413 if (dev == NULL && port == NULL) {
3414 cmn_err(CE_WARN, "yge: %s", buf);
3415 } else {
3416 if (port != NULL)
3417 ppa = port->p_ppa;
3418 else
3419 ppa = ddi_get_instance(dev->d_dip);
3420 cmn_err(CE_WARN, "yge%d: %s", ppa, buf);
3421 }
3422 }
3423
3424 static int
3425 yge_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3426 {
3427 yge_dev_t *dev;
3428 int rv;
3429
3430 switch (cmd) {
3431 case DDI_ATTACH:
3432 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
3433 dev->d_port[0] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3434 dev->d_port[1] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3435 dev->d_dip = dip;
3436 ddi_set_driver_private(dip, dev);
3437
3438 dev->d_port[0]->p_port = 0;
3439 dev->d_port[0]->p_dev = dev;
3440 dev->d_port[1]->p_port = 0;
3441 dev->d_port[1]->p_dev = dev;
3442
3443 rv = yge_attach(dev);
3444 if (rv != DDI_SUCCESS) {
3445 ddi_set_driver_private(dip, 0);
3446 kmem_free(dev->d_port[1], sizeof (yge_port_t));
3447 kmem_free(dev->d_port[0], sizeof (yge_port_t));
3448 kmem_free(dev, sizeof (*dev));
3449 }
3450 return (rv);
3451
3452 case DDI_RESUME:
3453 dev = ddi_get_driver_private(dip);
3454 ASSERT(dev != NULL);
3455 return (yge_resume(dev));
3456
3457 default:
3458 return (DDI_FAILURE);
3459 }
3460 }
3461
3462 static int
3463 yge_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3464 {
3465 yge_dev_t *dev;
3466 mac_handle_t mh;
3467
3468 switch (cmd) {
3469 case DDI_DETACH:
3470
3471 dev = ddi_get_driver_private(dip);
3472
3473 /* attempt to unregister MACs from Nemo */
3474 for (int i = 0; i < dev->d_num_port; i++) {
3475
3476 if (((mh = dev->d_port[i]->p_mh) != NULL) &&
3477 (mac_disable(mh) != 0)) {
3478 /*
3479 * We'd really like a mac_enable to reenable
3480 * any MACs that we previously disabled. Too
3481 * bad GLDv3 doesn't have one.
3482 */
3483 return (DDI_FAILURE);
3484 }
3485 }
3486
3487 ASSERT(dip == dev->d_dip);
3488 yge_detach(dev);
3489 ddi_set_driver_private(dip, 0);
3490 for (int i = 0; i < dev->d_num_port; i++) {
3491 if ((mh = dev->d_port[i]->p_mh) != NULL) {
3492 /* This can't fail after mac_disable above. */
3493 (void) mac_unregister(mh);
3494 }
3495 }
3496 kmem_free(dev->d_port[1], sizeof (yge_port_t));
3497 kmem_free(dev->d_port[0], sizeof (yge_port_t));
3498 kmem_free(dev, sizeof (*dev));
3499 return (DDI_SUCCESS);
3500
3501 case DDI_SUSPEND:
3502 dev = ddi_get_driver_private(dip);
3503 ASSERT(dev != NULL);
3504 return (yge_suspend(dev));
3505
3506 default:
3507 return (DDI_FAILURE);
3508 }
3509 }
3510
3511 static int
3512 yge_quiesce(dev_info_t *dip)
3513 {
3514 yge_dev_t *dev;
3515
3516 dev = ddi_get_driver_private(dip);
3517 ASSERT(dev != NULL);
3518
3519 /* NB: No locking! We are called in single threaded context */
3520 for (int i = 0; i < dev->d_num_port; i++) {
3521 yge_port_t *port = dev->d_port[i];
3522 if (port->p_running)
3523 yge_stop_port(port);
3524 }
3525
3526 /* Disable all interrupts. */
3527 CSR_WRITE_4(dev, B0_IMSK, 0);
3528 (void) CSR_READ_4(dev, B0_IMSK);
3529 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
3530 (void) CSR_READ_4(dev, B0_HWE_IMSK);
3531
3532 /* Put hardware into reset. */
3533 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
3534
3535 return (DDI_SUCCESS);
3536 }
3537
3538 /*
3539 * Stream information
3540 */
3541 DDI_DEFINE_STREAM_OPS(yge_devops, nulldev, nulldev, yge_ddi_attach,
3542 yge_ddi_detach, nodev, NULL, D_MP, NULL, yge_quiesce);
3543
3544 /*
3545 * Module linkage information.
3546 */
3547
3548 static struct modldrv yge_modldrv = {
3549 &mod_driverops, /* drv_modops */
3550 "Yukon 2 Ethernet", /* drv_linkinfo */
3551 &yge_devops /* drv_dev_ops */
3552 };
3553
3554 static struct modlinkage yge_modlinkage = {
3555 MODREV_1, /* ml_rev */
3556 &yge_modldrv, /* ml_linkage */
3557 NULL
3558 };
3559
3560 /*
3561 * DDI entry points.
3562 */
3563 int
3564 _init(void)
3565 {
3566 int rv;
3567 mac_init_ops(&yge_devops, "yge");
3568 if ((rv = mod_install(&yge_modlinkage)) != DDI_SUCCESS) {
3569 mac_fini_ops(&yge_devops);
3570 }
3571 return (rv);
3572 }
3573
3574 int
3575 _fini(void)
3576 {
3577 int rv;
3578 if ((rv = mod_remove(&yge_modlinkage)) == DDI_SUCCESS) {
3579 mac_fini_ops(&yge_devops);
3580 }
3581 return (rv);
3582 }
3583
3584 int
3585 _info(struct modinfo *modinfop)
3586 {
3587 return (mod_info(&yge_modlinkage, modinfop));
3588 }