Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/yge/yge.c
+++ new/usr/src/uts/common/io/yge/yge.c
1 1 /*
2 2 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 */
4 4
5 5 /*
6 6 * This driver was derived from the FreeBSD if_msk.c driver, which
7 7 * bears the following copyright attributions and licenses.
8 8 */
9 9
10 10 /*
11 11 *
12 12 * LICENSE:
13 13 * Copyright (C) Marvell International Ltd. and/or its affiliates
14 14 *
15 15 * The computer program files contained in this folder ("Files")
16 16 * are provided to you under the BSD-type license terms provided
17 17 * below, and any use of such Files and any derivative works
18 18 * thereof created by you shall be governed by the following terms
19 19 * and conditions:
20 20 *
21 21 * - Redistributions of source code must retain the above copyright
22 22 * notice, this list of conditions and the following disclaimer.
23 23 * - Redistributions in binary form must reproduce the above
24 24 * copyright notice, this list of conditions and the following
25 25 * disclaimer in the documentation and/or other materials provided
26 26 * with the distribution.
27 27 * - Neither the name of Marvell nor the names of its contributors
28 28 * may be used to endorse or promote products derived from this
29 29 * software without specific prior written permission.
30 30 *
31 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
34 34 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
35 35 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
36 36 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
37 37 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38 38 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
42 42 * OF THE POSSIBILITY OF SUCH DAMAGE.
43 43 * /LICENSE
44 44 *
45 45 */
46 46 /*
47 47 * Copyright (c) 1997, 1998, 1999, 2000
48 48 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
49 49 *
50 50 * Redistribution and use in source and binary forms, with or without
51 51 * modification, are permitted provided that the following conditions
52 52 * are met:
53 53 * 1. Redistributions of source code must retain the above copyright
54 54 * notice, this list of conditions and the following disclaimer.
55 55 * 2. Redistributions in binary form must reproduce the above copyright
56 56 * notice, this list of conditions and the following disclaimer in the
57 57 * documentation and/or other materials provided with the distribution.
58 58 * 3. All advertising materials mentioning features or use of this software
59 59 * must display the following acknowledgement:
60 60 * This product includes software developed by Bill Paul.
61 61 * 4. Neither the name of the author nor the names of any co-contributors
62 62 * may be used to endorse or promote products derived from this software
63 63 * without specific prior written permission.
64 64 *
65 65 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
66 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 68 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
69 69 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
70 70 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
71 71 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
72 72 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
73 73 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
74 74 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
75 75 * THE POSSIBILITY OF SUCH DAMAGE.
76 76 */
77 77 /*
78 78 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
79 79 *
80 80 * Permission to use, copy, modify, and distribute this software for any
81 81 * purpose with or without fee is hereby granted, provided that the above
82 82 * copyright notice and this permission notice appear in all copies.
83 83 *
84 84 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
85 85 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
86 86 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
87 87 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
88 88 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
89 89 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
90 90 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
91 91 */
92 92
93 93 #include <sys/varargs.h>
94 94 #include <sys/types.h>
95 95 #include <sys/modctl.h>
96 96 #include <sys/conf.h>
97 97 #include <sys/devops.h>
98 98 #include <sys/stream.h>
99 99 #include <sys/strsun.h>
100 100 #include <sys/cmn_err.h>
101 101 #include <sys/ethernet.h>
102 102 #include <sys/kmem.h>
103 103 #include <sys/time.h>
104 104 #include <sys/pci.h>
105 105 #include <sys/mii.h>
106 106 #include <sys/miiregs.h>
107 107 #include <sys/mac.h>
108 108 #include <sys/mac_ether.h>
109 109 #include <sys/mac_provider.h>
110 110 #include <sys/debug.h>
111 111 #include <sys/note.h>
112 112 #include <sys/ddi.h>
113 113 #include <sys/sunddi.h>
114 114 #include <sys/vlan.h>
115 115
116 116 #include "yge.h"
117 117
118 118 static struct ddi_device_acc_attr yge_regs_attr = {
119 119 DDI_DEVICE_ATTR_V0,
120 120 DDI_STRUCTURE_LE_ACC,
121 121 DDI_STRICTORDER_ACC
122 122 };
123 123
124 124 static struct ddi_device_acc_attr yge_ring_attr = {
125 125 DDI_DEVICE_ATTR_V0,
126 126 DDI_STRUCTURE_LE_ACC,
127 127 DDI_STRICTORDER_ACC
128 128 };
129 129
130 130 static struct ddi_device_acc_attr yge_buf_attr = {
131 131 DDI_DEVICE_ATTR_V0,
132 132 DDI_NEVERSWAP_ACC,
133 133 DDI_STRICTORDER_ACC
134 134 };
135 135
136 136 #define DESC_ALIGN 0x1000
137 137
138 138 static ddi_dma_attr_t yge_ring_dma_attr = {
139 139 DMA_ATTR_V0, /* dma_attr_version */
140 140 0, /* dma_attr_addr_lo */
141 141 0x00000000ffffffffull, /* dma_attr_addr_hi */
142 142 0x00000000ffffffffull, /* dma_attr_count_max */
143 143 DESC_ALIGN, /* dma_attr_align */
144 144 0x000007fc, /* dma_attr_burstsizes */
145 145 1, /* dma_attr_minxfer */
146 146 0x00000000ffffffffull, /* dma_attr_maxxfer */
147 147 0x00000000ffffffffull, /* dma_attr_seg */
148 148 1, /* dma_attr_sgllen */
149 149 1, /* dma_attr_granular */
150 150 0 /* dma_attr_flags */
151 151 };
152 152
153 153 static ddi_dma_attr_t yge_buf_dma_attr = {
154 154 DMA_ATTR_V0, /* dma_attr_version */
155 155 0, /* dma_attr_addr_lo */
156 156 0x00000000ffffffffull, /* dma_attr_addr_hi */
157 157 0x00000000ffffffffull, /* dma_attr_count_max */
158 158 1, /* dma_attr_align */
159 159 0x0000fffc, /* dma_attr_burstsizes */
160 160 1, /* dma_attr_minxfer */
161 161 0x000000000000ffffull, /* dma_attr_maxxfer */
162 162 0x00000000ffffffffull, /* dma_attr_seg */
163 163 8, /* dma_attr_sgllen */
164 164 1, /* dma_attr_granular */
165 165 0 /* dma_attr_flags */
166 166 };
167 167
168 168
169 169 static int yge_attach(yge_dev_t *);
170 170 static void yge_detach(yge_dev_t *);
171 171 static int yge_suspend(yge_dev_t *);
172 172 static int yge_resume(yge_dev_t *);
173 173
174 174 static void yge_reset(yge_dev_t *);
175 175 static void yge_setup_rambuffer(yge_dev_t *);
176 176
177 177 static int yge_init_port(yge_port_t *);
178 178 static void yge_uninit_port(yge_port_t *);
179 179 static int yge_register_port(yge_port_t *);
180 180
181 181 static void yge_tick(void *);
182 182 static uint_t yge_intr(caddr_t, caddr_t);
183 183 static int yge_intr_gmac(yge_port_t *);
184 184 static void yge_intr_enable(yge_dev_t *);
185 185 static void yge_intr_disable(yge_dev_t *);
186 186 static boolean_t yge_handle_events(yge_dev_t *, mblk_t **, mblk_t **, int *);
187 187 static void yge_handle_hwerr(yge_port_t *, uint32_t);
188 188 static void yge_intr_hwerr(yge_dev_t *);
189 189 static mblk_t *yge_rxeof(yge_port_t *, uint32_t, int);
190 190 static void yge_txeof(yge_port_t *, int);
191 191 static boolean_t yge_send(yge_port_t *, mblk_t *);
192 192 static void yge_set_prefetch(yge_dev_t *, int, yge_ring_t *);
193 193 static void yge_set_rambuffer(yge_port_t *);
194 194 static void yge_start_port(yge_port_t *);
195 195 static void yge_stop_port(yge_port_t *);
196 196 static void yge_phy_power(yge_dev_t *, boolean_t);
197 197 static int yge_alloc_ring(yge_port_t *, yge_dev_t *, yge_ring_t *, uint32_t);
198 198 static void yge_free_ring(yge_ring_t *);
199 199 static uint8_t yge_find_capability(yge_dev_t *, uint8_t);
200 200
201 201 static int yge_txrx_dma_alloc(yge_port_t *);
202 202 static void yge_txrx_dma_free(yge_port_t *);
203 203 static void yge_init_rx_ring(yge_port_t *);
204 204 static void yge_init_tx_ring(yge_port_t *);
205 205
206 206 static uint16_t yge_mii_readreg(yge_port_t *, uint8_t, uint8_t);
207 207 static void yge_mii_writereg(yge_port_t *, uint8_t, uint8_t, uint16_t);
208 208
209 209 static uint16_t yge_mii_read(void *, uint8_t, uint8_t);
210 210 static void yge_mii_write(void *, uint8_t, uint8_t, uint16_t);
211 211 static void yge_mii_notify(void *, link_state_t);
212 212
213 213 static void yge_setrxfilt(yge_port_t *);
214 214 static void yge_restart_task(yge_dev_t *);
215 215 static void yge_task(void *);
216 216 static void yge_dispatch(yge_dev_t *, int);
217 217
218 218 static void yge_stats_clear(yge_port_t *);
219 219 static void yge_stats_update(yge_port_t *);
220 220 static uint32_t yge_hashbit(const uint8_t *);
221 221
222 222 static int yge_m_unicst(void *, const uint8_t *);
223 223 static int yge_m_multicst(void *, boolean_t, const uint8_t *);
224 224 static int yge_m_promisc(void *, boolean_t);
225 225 static mblk_t *yge_m_tx(void *, mblk_t *);
226 226 static int yge_m_stat(void *, uint_t, uint64_t *);
227 227 static int yge_m_start(void *);
228 228 static void yge_m_stop(void *);
229 229 static int yge_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
230 230 static void yge_m_propinfo(void *, const char *, mac_prop_id_t,
231 231 mac_prop_info_handle_t);
232 232 static int yge_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
233 233 const void *);
234 234 static void yge_m_ioctl(void *, queue_t *, mblk_t *);
235 235
236 236 void yge_error(yge_dev_t *, yge_port_t *, char *, ...);
237 237 extern void yge_phys_update(yge_port_t *);
238 238 extern int yge_phys_restart(yge_port_t *, boolean_t);
239 239 extern int yge_phys_init(yge_port_t *, phy_readreg_t, phy_writereg_t);
240 240
241 241 static mac_callbacks_t yge_m_callbacks = {
242 242 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
243 243 yge_m_stat,
244 244 yge_m_start,
245 245 yge_m_stop,
246 246 yge_m_promisc,
247 247 yge_m_multicst,
248 248 yge_m_unicst,
249 249 yge_m_tx,
250 250 NULL,
251 251 yge_m_ioctl,
252 252 NULL, /* mc_getcapab */
253 253 NULL, /* mc_open */
254 254 NULL, /* mc_close */
255 255 yge_m_setprop,
256 256 yge_m_getprop,
257 257 yge_m_propinfo
258 258 };
259 259
260 260 static mii_ops_t yge_mii_ops = {
261 261 MII_OPS_VERSION,
262 262 yge_mii_read,
263 263 yge_mii_write,
264 264 yge_mii_notify,
265 265 NULL /* reset */
266 266 };
267 267
268 268 /*
269 269 * This is the low level interface routine to read from the PHY
270 270 * MII registers. There is multiple steps to these accesses. First
271 271 * the register number is written to an address register. Then after
272 272 * a specified delay status is checked until the data is present.
273 273 */
274 274 static uint16_t
275 275 yge_mii_readreg(yge_port_t *port, uint8_t phy, uint8_t reg)
276 276 {
277 277 yge_dev_t *dev = port->p_dev;
278 278 int pnum = port->p_port;
279 279 uint16_t val;
280 280
281 281 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
282 282 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
283 283
284 284 for (int i = 0; i < YGE_TIMEOUT; i += 10) {
285 285 drv_usecwait(10);
286 286 val = GMAC_READ_2(dev, pnum, GM_SMI_CTRL);
287 287 if ((val & GM_SMI_CT_RD_VAL) != 0) {
288 288 val = GMAC_READ_2(dev, pnum, GM_SMI_DATA);
289 289 return (val);
290 290 }
291 291 }
292 292
293 293 return (0xffff);
294 294 }
295 295
296 296 /*
297 297 * This is the low level interface routine to write to the PHY
298 298 * MII registers. There is multiple steps to these accesses. The
299 299 * data and the target registers address are written to the PHY.
300 300 * Then the PHY is polled until it is done with the write. Note
301 301 * that the delays are specified and required!
302 302 */
303 303 static void
304 304 yge_mii_writereg(yge_port_t *port, uint8_t phy, uint8_t reg, uint16_t val)
305 305 {
306 306 yge_dev_t *dev = port->p_dev;
307 307 int pnum = port->p_port;
308 308
309 309 GMAC_WRITE_2(dev, pnum, GM_SMI_DATA, val);
310 310 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
311 311 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
312 312
313 313 for (int i = 0; i < YGE_TIMEOUT; i += 10) {
314 314 drv_usecwait(10);
315 315 if ((GMAC_READ_2(dev, pnum, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0)
316 316 return;
317 317 }
318 318
319 319 yge_error(NULL, port, "phy write timeout");
320 320 }
321 321
322 322 static uint16_t
323 323 yge_mii_read(void *arg, uint8_t phy, uint8_t reg)
324 324 {
325 325 yge_port_t *port = arg;
326 326 uint16_t rv;
327 327
328 328 PHY_LOCK(port->p_dev);
329 329 rv = yge_mii_readreg(port, phy, reg);
330 330 PHY_UNLOCK(port->p_dev);
331 331 return (rv);
332 332 }
333 333
334 334 static void
335 335 yge_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
336 336 {
337 337 yge_port_t *port = arg;
338 338
339 339 PHY_LOCK(port->p_dev);
340 340 yge_mii_writereg(port, phy, reg, val);
341 341 PHY_UNLOCK(port->p_dev);
342 342 }
343 343
344 344 /*
345 345 * The MII common code calls this function to let the MAC driver
346 346 * know when there has been a change in status.
347 347 */
348 348 void
349 349 yge_mii_notify(void *arg, link_state_t link)
350 350 {
351 351 yge_port_t *port = arg;
352 352 yge_dev_t *dev = port->p_dev;
353 353 uint32_t gmac;
354 354 uint32_t gpcr;
355 355 link_flowctrl_t fc;
356 356 link_duplex_t duplex;
357 357 int speed;
358 358
359 359 fc = mii_get_flowctrl(port->p_mii);
360 360 duplex = mii_get_duplex(port->p_mii);
361 361 speed = mii_get_speed(port->p_mii);
362 362
363 363 DEV_LOCK(dev);
364 364
365 365 if (link == LINK_STATE_UP) {
366 366
367 367 /* Enable Tx FIFO Underrun. */
368 368 CSR_WRITE_1(dev, MR_ADDR(port->p_port, GMAC_IRQ_MSK),
369 369 GM_IS_TX_FF_UR | /* TX FIFO underflow */
370 370 GM_IS_RX_FF_OR); /* RX FIFO overflow */
371 371
372 372 gpcr = GM_GPCR_AU_ALL_DIS;
373 373
374 374 switch (fc) {
375 375 case LINK_FLOWCTRL_BI:
376 376 gmac = GMC_PAUSE_ON;
377 377 gpcr &= ~(GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS);
378 378 break;
379 379 case LINK_FLOWCTRL_TX:
380 380 gmac = GMC_PAUSE_ON;
381 381 gpcr |= GM_GPCR_FC_RX_DIS;
382 382 break;
383 383 case LINK_FLOWCTRL_RX:
384 384 gmac = GMC_PAUSE_ON;
385 385 gpcr |= GM_GPCR_FC_TX_DIS;
386 386 break;
387 387 case LINK_FLOWCTRL_NONE:
388 388 default:
389 389 gmac = GMC_PAUSE_OFF;
390 390 gpcr |= GM_GPCR_FC_RX_DIS;
391 391 gpcr |= GM_GPCR_FC_TX_DIS;
392 392 break;
393 393 }
394 394
395 395 gpcr &= ~((GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100));
396 396 switch (speed) {
397 397 case 1000:
398 398 gpcr |= GM_GPCR_SPEED_1000;
399 399 break;
400 400 case 100:
401 401 gpcr |= GM_GPCR_SPEED_100;
402 402 break;
403 403 case 10:
404 404 default:
405 405 break;
406 406 }
407 407
408 408 if (duplex == LINK_DUPLEX_FULL) {
409 409 gpcr |= GM_GPCR_DUP_FULL;
410 410 } else {
411 411 gpcr &= ~(GM_GPCR_DUP_FULL);
412 412 gmac = GMC_PAUSE_OFF;
413 413 gpcr |= GM_GPCR_FC_RX_DIS;
414 414 gpcr |= GM_GPCR_FC_TX_DIS;
415 415 }
416 416
417 417 gpcr |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
418 418 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
419 419
420 420 /* Read again to ensure writing. */
421 421 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
422 422
423 423 /* write out the flow control gmac setting */
424 424 CSR_WRITE_4(dev, MR_ADDR(port->p_port, GMAC_CTRL), gmac);
425 425
426 426 } else {
427 427 /* Disable Rx/Tx MAC. */
428 428 gpcr = GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
429 429 gpcr &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
430 430 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
431 431
432 432 /* Read again to ensure writing. */
433 433 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
434 434 }
435 435
436 436 DEV_UNLOCK(dev);
437 437
438 438 mac_link_update(port->p_mh, link);
439 439
440 440 if (port->p_running && (link == LINK_STATE_UP)) {
441 441 mac_tx_update(port->p_mh);
442 442 }
443 443 }
444 444
445 445 static void
446 446 yge_setrxfilt(yge_port_t *port)
447 447 {
448 448 yge_dev_t *dev;
449 449 uint16_t mode;
450 450 uint8_t *ea;
451 451 uint32_t *mchash;
452 452 int pnum;
453 453
454 454 dev = port->p_dev;
455 455 pnum = port->p_port;
456 456 ea = port->p_curraddr;
457 457 mchash = port->p_mchash;
458 458
459 459 if (dev->d_suspended)
460 460 return;
461 461
462 462 /* Set station address. */
463 463 for (int i = 0; i < (ETHERADDRL / 2); i++) {
464 464 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_1L + i * 4,
465 465 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
466 466 }
467 467 for (int i = 0; i < (ETHERADDRL / 2); i++) {
468 468 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_2L + i * 4,
469 469 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
470 470 }
471 471
472 472 /* Figure out receive filtering mode. */
473 473 mode = GMAC_READ_2(dev, pnum, GM_RX_CTRL);
474 474 if (port->p_promisc) {
475 475 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
476 476 } else {
477 477 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
478 478 }
479 479 /* Write the multicast filter. */
480 480 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H1, mchash[0] & 0xffff);
481 481 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff);
482 482 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H3, mchash[1] & 0xffff);
483 483 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff);
484 484 /* Write the receive filtering mode. */
485 485 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, mode);
486 486 }
487 487
488 488 static void
489 489 yge_init_rx_ring(yge_port_t *port)
490 490 {
491 491 yge_buf_t *rxb;
492 492 yge_ring_t *ring;
493 493 int prod;
494 494
495 495 port->p_rx_cons = 0;
496 496 port->p_rx_putwm = YGE_PUT_WM;
497 497 ring = &port->p_rx_ring;
498 498
499 499 /* ala bzero, but uses safer acch access */
500 500 CLEARRING(ring);
501 501
502 502 for (prod = 0; prod < YGE_RX_RING_CNT; prod++) {
503 503 /* Hang out receive buffers. */
504 504 rxb = &port->p_rx_buf[prod];
505 505
506 506 PUTADDR(ring, prod, rxb->b_paddr);
507 507 PUTCTRL(ring, prod, port->p_framesize | OP_PACKET | HW_OWNER);
508 508 }
509 509
510 510 SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
511 511
512 512 yge_set_prefetch(port->p_dev, port->p_rxq, ring);
513 513
514 514 /* Update prefetch unit. */
515 515 CSR_WRITE_2(port->p_dev,
516 516 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
517 517 YGE_RX_RING_CNT - 1);
518 518 }
519 519
520 520 static void
521 521 yge_init_tx_ring(yge_port_t *port)
522 522 {
523 523 yge_ring_t *ring = &port->p_tx_ring;
524 524
525 525 port->p_tx_prod = 0;
526 526 port->p_tx_cons = 0;
527 527 port->p_tx_cnt = 0;
528 528
529 529 CLEARRING(ring);
530 530 SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
531 531
532 532 yge_set_prefetch(port->p_dev, port->p_txq, ring);
533 533 }
534 534
535 535 static void
536 536 yge_setup_rambuffer(yge_dev_t *dev)
537 537 {
538 538 int next;
539 539 int i;
540 540
541 541 /* Get adapter SRAM size. */
542 542 dev->d_ramsize = CSR_READ_1(dev, B2_E_0) * 4;
543 543 if (dev->d_ramsize == 0)
544 544 return;
545 545
546 546 dev->d_pflags |= PORT_FLAG_RAMBUF;
547 547 /*
548 548 * Give receiver 2/3 of memory and round down to the multiple
549 549 * of 1024. Tx/Rx RAM buffer size of Yukon 2 should be multiple
550 550 * of 1024.
551 551 */
552 552 dev->d_rxqsize = (((dev->d_ramsize * 1024 * 2) / 3) & ~(1024 - 1));
553 553 dev->d_txqsize = (dev->d_ramsize * 1024) - dev->d_rxqsize;
554 554
555 555 for (i = 0, next = 0; i < dev->d_num_port; i++) {
556 556 dev->d_rxqstart[i] = next;
557 557 dev->d_rxqend[i] = next + dev->d_rxqsize - 1;
558 558 next = dev->d_rxqend[i] + 1;
559 559 dev->d_txqstart[i] = next;
560 560 dev->d_txqend[i] = next + dev->d_txqsize - 1;
561 561 next = dev->d_txqend[i] + 1;
562 562 }
563 563 }
564 564
565 565 static void
566 566 yge_phy_power(yge_dev_t *dev, boolean_t powerup)
567 567 {
568 568 uint32_t val;
569 569 int i;
570 570
571 571 if (powerup) {
572 572 /* Switch power to VCC (WA for VAUX problem). */
573 573 CSR_WRITE_1(dev, B0_POWER_CTRL,
574 574 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
575 575 /* Disable Core Clock Division, set Clock Select to 0. */
576 576 CSR_WRITE_4(dev, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
577 577
578 578 val = 0;
579 579 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
580 580 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
581 581 /* Enable bits are inverted. */
582 582 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
583 583 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
584 584 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
585 585 }
586 586 /*
587 587 * Enable PCI & Core Clock, enable clock gating for both Links.
588 588 */
589 589 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
590 590
591 591 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
592 592 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
593 593 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
594 594 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
595 595 /* Deassert Low Power for 1st PHY. */
596 596 val |= PCI_Y2_PHY1_COMA;
597 597 if (dev->d_num_port > 1)
598 598 val |= PCI_Y2_PHY2_COMA;
599 599 }
600 600
601 601 /* Release PHY from PowerDown/COMA mode. */
602 602 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
603 603
604 604 switch (dev->d_hw_id) {
605 605 case CHIP_ID_YUKON_EC_U:
606 606 case CHIP_ID_YUKON_EX:
607 607 case CHIP_ID_YUKON_FE_P: {
608 608 uint32_t our;
609 609
610 610 CSR_WRITE_2(dev, B0_CTST, Y2_HW_WOL_OFF);
611 611
612 612 /* Enable all clocks. */
613 613 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
614 614
615 615 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_4);
616 616 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
617 617 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
618 618 /* Set all bits to 0 except bits 15..12. */
619 619 pci_config_put32(dev->d_pcih, PCI_OUR_REG_4, our);
620 620
621 621 /* Set to default value. */
622 622 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_5);
623 623 our &= P_CTL_TIM_VMAIN_AV_MSK;
624 624 pci_config_put32(dev->d_pcih, PCI_OUR_REG_5, our);
625 625
626 626 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, 0);
627 627
628 628 /*
629 629 * Enable workaround for dev 4.107 on Yukon-Ultra
630 630 * and Extreme
631 631 */
632 632 our = CSR_READ_4(dev, B2_GP_IO);
633 633 our |= GLB_GPIO_STAT_RACE_DIS;
634 634 CSR_WRITE_4(dev, B2_GP_IO, our);
635 635
636 636 (void) CSR_READ_4(dev, B2_GP_IO);
637 637 break;
638 638 }
639 639 default:
640 640 break;
641 641 }
642 642
643 643 for (i = 0; i < dev->d_num_port; i++) {
644 644 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
645 645 GMLC_RST_SET);
646 646 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
647 647 GMLC_RST_CLR);
648 648 }
649 649 } else {
650 650 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
651 651 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
652 652 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
653 653 val &= ~PCI_Y2_PHY1_COMA;
654 654 if (dev->d_num_port > 1)
655 655 val &= ~PCI_Y2_PHY2_COMA;
656 656 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
657 657 } else {
658 658 val |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
659 659 }
660 660 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
661 661
662 662 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
663 663 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
664 664 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
665 665 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
666 666 dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
667 667 /* Enable bits are inverted. */
668 668 val = 0;
669 669 }
670 670 /*
671 671 * Disable PCI & Core Clock, disable clock gating for
672 672 * both Links.
673 673 */
674 674 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
675 675 CSR_WRITE_1(dev, B0_POWER_CTRL,
676 676 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
677 677 }
678 678 }
679 679
680 680 static void
681 681 yge_reset(yge_dev_t *dev)
682 682 {
683 683 uint64_t addr;
684 684 uint16_t status;
685 685 uint32_t val;
686 686 int i;
687 687 ddi_acc_handle_t pcih = dev->d_pcih;
688 688
689 689 /* Turn off ASF */
690 690 if (dev->d_hw_id == CHIP_ID_YUKON_EX) {
691 691 status = CSR_READ_2(dev, B28_Y2_ASF_STAT_CMD);
692 692 /* Clear AHB bridge & microcontroller reset */
693 693 status &= ~Y2_ASF_CPU_MODE;
694 694 status &= ~Y2_ASF_AHB_RST;
695 695 /* Clear ASF microcontroller state */
696 696 status &= ~Y2_ASF_STAT_MSK;
697 697 CSR_WRITE_2(dev, B28_Y2_ASF_STAT_CMD, status);
698 698 } else {
699 699 CSR_WRITE_1(dev, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
700 700 }
701 701 CSR_WRITE_2(dev, B0_CTST, Y2_ASF_DISABLE);
702 702
703 703 /*
704 704 * Since we disabled ASF, S/W reset is required for Power Management.
705 705 */
706 706 CSR_WRITE_1(dev, B0_CTST, CS_RST_SET);
707 707 CSR_WRITE_1(dev, B0_CTST, CS_RST_CLR);
708 708
709 709 /* Allow writes to PCI config space */
710 710 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
711 711
712 712 /* Clear all error bits in the PCI status register. */
713 713 status = pci_config_get16(pcih, PCI_CONF_STAT);
714 714 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
715 715
716 716 status |= (PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
717 717 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
718 718 pci_config_put16(pcih, PCI_CONF_STAT, status);
719 719
720 720 CSR_WRITE_1(dev, B0_CTST, CS_MRST_CLR);
721 721
722 722 switch (dev->d_bustype) {
723 723 case PEX_BUS:
724 724 /* Clear all PEX errors. */
725 725 CSR_PCI_WRITE_4(dev, Y2_CFG_AER + AER_UNCOR_ERR, 0xffffffff);
726 726
727 727 /* is error bit status stuck? */
728 728 val = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
729 729 if ((val & PEX_RX_OV) != 0) {
730 730 dev->d_intrmask &= ~Y2_IS_HW_ERR;
731 731 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
732 732 }
733 733 break;
734 734 case PCI_BUS:
735 735 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
736 736 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
737 737 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
738 738 break;
739 739 case PCIX_BUS:
740 740 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
741 741 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
742 742 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
743 743
744 744 /* Set Cache Line Size opt. */
745 745 val = pci_config_get32(pcih, PCI_OUR_REG_1);
746 746 val |= PCI_CLS_OPT;
747 747 pci_config_put32(pcih, PCI_OUR_REG_1, val);
748 748 break;
749 749 }
750 750
751 751 /* Set PHY power state. */
752 752 yge_phy_power(dev, B_TRUE);
753 753
754 754 /* Reset GPHY/GMAC Control */
755 755 for (i = 0; i < dev->d_num_port; i++) {
756 756 /* GPHY Control reset. */
757 757 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
758 758 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
759 759 /* GMAC Control reset. */
760 760 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
761 761 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
762 762 if (dev->d_hw_id == CHIP_ID_YUKON_EX ||
763 763 dev->d_hw_id == CHIP_ID_YUKON_SUPR) {
764 764 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL),
765 765 (GMC_BYP_RETR_ON | GMC_BYP_MACSECRX_ON |
766 766 GMC_BYP_MACSECTX_ON));
767 767 }
768 768 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
769 769
770 770 }
771 771 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
772 772
773 773 /* LED On. */
774 774 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_ON);
775 775
776 776 /* Clear TWSI IRQ. */
777 777 CSR_WRITE_4(dev, B2_I2C_IRQ, I2C_CLR_IRQ);
778 778
779 779 /* Turn off hardware timer. */
780 780 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_STOP);
781 781 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_CLR_IRQ);
782 782
783 783 /* Turn off descriptor polling. */
784 784 CSR_WRITE_1(dev, B28_DPT_CTRL, DPT_STOP);
785 785
786 786 /* Turn off time stamps. */
787 787 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_STOP);
788 788 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
789 789
790 790 /* Don't permit config space writing */
791 791 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
792 792
793 793 /* enable TX Arbiters */
794 794 for (i = 0; i < dev->d_num_port; i++)
795 795 CSR_WRITE_1(dev, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB);
796 796
797 797 /* Configure timeout values. */
798 798 for (i = 0; i < dev->d_num_port; i++) {
799 799 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
800 800
801 801 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), RI_TO_53);
802 802 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), RI_TO_53);
803 803 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), RI_TO_53);
804 804 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), RI_TO_53);
805 805 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), RI_TO_53);
806 806 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), RI_TO_53);
807 807 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), RI_TO_53);
808 808 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), RI_TO_53);
809 809 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), RI_TO_53);
810 810 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), RI_TO_53);
811 811 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), RI_TO_53);
812 812 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), RI_TO_53);
813 813 }
814 814
815 815 /* Disable all interrupts. */
816 816 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
817 817 (void) CSR_READ_4(dev, B0_HWE_IMSK);
818 818 CSR_WRITE_4(dev, B0_IMSK, 0);
819 819 (void) CSR_READ_4(dev, B0_IMSK);
820 820
821 821 /*
822 822 * On dual port PCI-X card, there is an problem where status
823 823 * can be received out of order due to split transactions.
824 824 */
825 825 if (dev->d_bustype == PCIX_BUS && dev->d_num_port > 1) {
826 826 int pcix;
827 827 uint16_t pcix_cmd;
828 828
829 829 if ((pcix = yge_find_capability(dev, PCI_CAP_ID_PCIX)) != 0) {
830 830 pcix_cmd = pci_config_get16(pcih, pcix + 2);
831 831 /* Clear Max Outstanding Split Transactions. */
832 832 pcix_cmd &= ~0x70;
833 833 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
834 834 pci_config_put16(pcih, pcix + 2, pcix_cmd);
835 835 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
836 836 }
837 837 }
838 838 if (dev->d_bustype == PEX_BUS) {
839 839 uint16_t v, width;
840 840
841 841 v = pci_config_get16(pcih, PEX_DEV_CTRL);
842 842 /* Change Max. Read Request Size to 4096 bytes. */
843 843 v &= ~PEX_DC_MAX_RRS_MSK;
844 844 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
845 845 pci_config_put16(pcih, PEX_DEV_CTRL, v);
846 846 width = pci_config_get16(pcih, PEX_LNK_STAT);
847 847 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
848 848 v = pci_config_get16(pcih, PEX_LNK_CAP);
849 849 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
850 850 if (v != width)
851 851 yge_error(dev, NULL,
852 852 "Negotiated width of PCIe link(x%d) != "
853 853 "max. width of link(x%d)\n", width, v);
854 854 }
855 855
856 856 /* Clear status list. */
857 857 CLEARRING(&dev->d_status_ring);
858 858 SYNCRING(&dev->d_status_ring, DDI_DMA_SYNC_FORDEV);
859 859
860 860 dev->d_stat_cons = 0;
861 861
862 862 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_SET);
863 863 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_CLR);
864 864
865 865 /* Set the status list base address. */
866 866 addr = dev->d_status_ring.r_paddr;
867 867 CSR_WRITE_4(dev, STAT_LIST_ADDR_LO, YGE_ADDR_LO(addr));
868 868 CSR_WRITE_4(dev, STAT_LIST_ADDR_HI, YGE_ADDR_HI(addr));
869 869
870 870 /* Set the status list last index. */
871 871 CSR_WRITE_2(dev, STAT_LAST_IDX, YGE_STAT_RING_CNT - 1);
872 872 CSR_WRITE_2(dev, STAT_PUT_IDX, 0);
873 873
874 874 if (dev->d_hw_id == CHIP_ID_YUKON_EC &&
875 875 dev->d_hw_rev == CHIP_REV_YU_EC_A1) {
876 876 /* WA for dev. #4.3 */
877 877 CSR_WRITE_2(dev, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
878 878 /* WA for dev #4.18 */
879 879 CSR_WRITE_1(dev, STAT_FIFO_WM, 0x21);
880 880 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 7);
881 881 } else {
882 882 CSR_WRITE_2(dev, STAT_TX_IDX_TH, 10);
883 883 CSR_WRITE_1(dev, STAT_FIFO_WM, 16);
884 884
885 885 /* ISR status FIFO watermark */
886 886 if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
887 887 dev->d_hw_rev == CHIP_REV_YU_XL_A0)
888 888 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 4);
889 889 else
890 890 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 16);
891 891
892 892 CSR_WRITE_4(dev, STAT_ISR_TIMER_INI, 0x0190);
893 893 }
894 894
895 895 /*
896 896 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
897 897 */
898 898 CSR_WRITE_4(dev, STAT_TX_TIMER_INI, YGE_USECS(dev, 1000));
899 899
900 900 /* Enable status unit. */
901 901 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_OP_ON);
902 902
903 903 CSR_WRITE_1(dev, STAT_TX_TIMER_CTRL, TIM_START);
904 904 CSR_WRITE_1(dev, STAT_LEV_TIMER_CTRL, TIM_START);
905 905 CSR_WRITE_1(dev, STAT_ISR_TIMER_CTRL, TIM_START);
906 906 }
907 907
908 908 static int
909 909 yge_init_port(yge_port_t *port)
910 910 {
911 911 yge_dev_t *dev = port->p_dev;
912 912 int i;
913 913 mac_register_t *macp;
914 914
915 915 port->p_flags = dev->d_pflags;
916 916 port->p_ppa = ddi_get_instance(dev->d_dip) + (port->p_port * 100);
917 917
918 918 port->p_tx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_TX_RING_CNT,
919 919 KM_SLEEP);
920 920 port->p_rx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_RX_RING_CNT,
921 921 KM_SLEEP);
922 922
923 923 /* Setup Tx/Rx queue register offsets. */
924 924 if (port->p_port == YGE_PORT_A) {
925 925 port->p_txq = Q_XA1;
926 926 port->p_txsq = Q_XS1;
927 927 port->p_rxq = Q_R1;
928 928 } else {
929 929 port->p_txq = Q_XA2;
930 930 port->p_txsq = Q_XS2;
931 931 port->p_rxq = Q_R2;
932 932 }
933 933
934 934 /* Disable jumbo frame for Yukon FE. */
935 935 if (dev->d_hw_id == CHIP_ID_YUKON_FE)
936 936 port->p_flags |= PORT_FLAG_NOJUMBO;
937 937
938 938 /*
939 939 * Start out assuming a regular MTU. Users can change this
940 940 * with dladm. The dladm daemon is supposed to issue commands
941 941 * to change the default MTU using m_setprop during early boot
942 942 * (before the interface is plumbed) if the user has so
943 943 * requested.
944 944 */
945 945 port->p_mtu = ETHERMTU;
946 946
947 947 port->p_mii = mii_alloc(port, dev->d_dip, &yge_mii_ops);
948 948 if (port->p_mii == NULL) {
949 949 yge_error(NULL, port, "MII handle allocation failed");
950 950 return (DDI_FAILURE);
951 951 }
952 952 /* We assume all parts support asymmetric pause */
953 953 mii_set_pauseable(port->p_mii, B_TRUE, B_TRUE);
954 954
955 955 /*
956 956 * Get station address for this interface. Note that
957 957 * dual port cards actually come with three station
958 958 * addresses: one for each port, plus an extra. The
959 959 * extra one is used by the SysKonnect driver software
960 960 * as a 'virtual' station address for when both ports
961 961 * are operating in failover mode. Currently we don't
962 962 * use this extra address.
963 963 */
964 964 for (i = 0; i < ETHERADDRL; i++) {
965 965 port->p_curraddr[i] =
966 966 CSR_READ_1(dev, B2_MAC_1 + (port->p_port * 8) + i);
967 967 }
968 968
969 969 /* Register with Nemo. */
970 970 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
971 971 yge_error(NULL, port, "MAC handle allocation failed");
972 972 return (DDI_FAILURE);
973 973 }
974 974 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
975 975 macp->m_driver = port;
976 976 macp->m_dip = dev->d_dip;
977 977 macp->m_src_addr = port->p_curraddr;
978 978 macp->m_callbacks = &yge_m_callbacks;
979 979 macp->m_min_sdu = 0;
980 980 macp->m_max_sdu = port->p_mtu;
981 981 macp->m_instance = port->p_ppa;
982 982 macp->m_margin = VLAN_TAGSZ;
983 983
984 984 port->p_mreg = macp;
985 985
986 986 return (DDI_SUCCESS);
987 987 }
988 988
989 989 static int
990 990 yge_add_intr(yge_dev_t *dev, int intr_type)
991 991 {
992 992 dev_info_t *dip;
993 993 int count;
994 994 int actual;
995 995 int rv;
996 996 int i, j;
997 997
998 998 dip = dev->d_dip;
999 999
1000 1000 rv = ddi_intr_get_nintrs(dip, intr_type, &count);
1001 1001 if ((rv != DDI_SUCCESS) || (count == 0)) {
1002 1002 yge_error(dev, NULL,
1003 1003 "ddi_intr_get_nintrs failed, rv %d, count %d", rv, count);
1004 1004 return (DDI_FAILURE);
1005 1005 }
1006 1006
1007 1007 /*
↓ open down ↓ |
1007 lines elided |
↑ open up ↑ |
1008 1008 * Allocate the interrupt. Note that we only bother with a single
1009 1009 * interrupt. One could argue that for MSI devices with dual ports,
1010 1010 * it would be nice to have a separate interrupt per port. But right
1011 1011 * now I don't know how to configure that, so we'll just settle for
1012 1012 * a single interrupt.
1013 1013 */
1014 1014 dev->d_intrcnt = 1;
1015 1015
1016 1016 dev->d_intrsize = count * sizeof (ddi_intr_handle_t);
1017 1017 dev->d_intrh = kmem_zalloc(dev->d_intrsize, KM_SLEEP);
1018 - if (dev->d_intrh == NULL) {
1019 - yge_error(dev, NULL, "Unable to allocate interrupt handle");
1020 - return (DDI_FAILURE);
1021 - }
1022 1018
1023 1019 rv = ddi_intr_alloc(dip, dev->d_intrh, intr_type, 0, dev->d_intrcnt,
1024 1020 &actual, DDI_INTR_ALLOC_STRICT);
1025 1021 if ((rv != DDI_SUCCESS) || (actual == 0)) {
1026 1022 yge_error(dev, NULL,
1027 1023 "Unable to allocate interrupt, %d, count %d",
1028 1024 rv, actual);
1029 1025 kmem_free(dev->d_intrh, dev->d_intrsize);
1030 1026 return (DDI_FAILURE);
1031 1027 }
1032 1028
1033 1029 if ((rv = ddi_intr_get_pri(dev->d_intrh[0], &dev->d_intrpri)) !=
1034 1030 DDI_SUCCESS) {
1035 1031 for (i = 0; i < dev->d_intrcnt; i++)
1036 1032 (void) ddi_intr_free(dev->d_intrh[i]);
1037 1033 yge_error(dev, NULL,
1038 1034 "Unable to get interrupt priority, %d", rv);
1039 1035 kmem_free(dev->d_intrh, dev->d_intrsize);
1040 1036 return (DDI_FAILURE);
1041 1037 }
1042 1038
1043 1039 if ((rv = ddi_intr_get_cap(dev->d_intrh[0], &dev->d_intrcap)) !=
1044 1040 DDI_SUCCESS) {
1045 1041 yge_error(dev, NULL,
1046 1042 "Unable to get interrupt capabilities, %d", rv);
1047 1043 for (i = 0; i < dev->d_intrcnt; i++)
1048 1044 (void) ddi_intr_free(dev->d_intrh[i]);
1049 1045 kmem_free(dev->d_intrh, dev->d_intrsize);
1050 1046 return (DDI_FAILURE);
1051 1047 }
1052 1048
1053 1049 /* register interrupt handler to kernel */
1054 1050 for (i = 0; i < dev->d_intrcnt; i++) {
1055 1051 if ((rv = ddi_intr_add_handler(dev->d_intrh[i], yge_intr,
1056 1052 dev, NULL)) != DDI_SUCCESS) {
1057 1053 yge_error(dev, NULL,
1058 1054 "Unable to add interrupt handler, %d", rv);
1059 1055 for (j = 0; j < i; j++)
1060 1056 (void) ddi_intr_remove_handler(dev->d_intrh[j]);
1061 1057 for (i = 0; i < dev->d_intrcnt; i++)
1062 1058 (void) ddi_intr_free(dev->d_intrh[i]);
1063 1059 kmem_free(dev->d_intrh, dev->d_intrsize);
1064 1060 return (DDI_FAILURE);
1065 1061 }
1066 1062 }
1067 1063
1068 1064 mutex_init(&dev->d_rxlock, NULL, MUTEX_DRIVER,
1069 1065 DDI_INTR_PRI(dev->d_intrpri));
1070 1066 mutex_init(&dev->d_txlock, NULL, MUTEX_DRIVER,
1071 1067 DDI_INTR_PRI(dev->d_intrpri));
1072 1068 mutex_init(&dev->d_phylock, NULL, MUTEX_DRIVER,
1073 1069 DDI_INTR_PRI(dev->d_intrpri));
1074 1070 mutex_init(&dev->d_task_mtx, NULL, MUTEX_DRIVER,
1075 1071 DDI_INTR_PRI(dev->d_intrpri));
1076 1072
1077 1073 return (DDI_SUCCESS);
1078 1074 }
1079 1075
1080 1076 static int
1081 1077 yge_attach_intr(yge_dev_t *dev)
1082 1078 {
1083 1079 dev_info_t *dip = dev->d_dip;
1084 1080 int intr_types;
1085 1081 int rv;
1086 1082
1087 1083 /* Allocate IRQ resources. */
1088 1084 rv = ddi_intr_get_supported_types(dip, &intr_types);
1089 1085 if (rv != DDI_SUCCESS) {
1090 1086 yge_error(dev, NULL,
1091 1087 "Unable to determine supported interrupt types, %d", rv);
1092 1088 return (DDI_FAILURE);
1093 1089 }
1094 1090
1095 1091 /*
1096 1092 * We default to not supporting MSI. We've found some device
1097 1093 * and motherboard combinations don't always work well with
1098 1094 * MSI interrupts. Users may override this if they choose.
1099 1095 */
1100 1096 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "msi_enable", 0) == 0) {
1101 1097 /* If msi disable property present, disable both msix/msi. */
1102 1098 if (intr_types & DDI_INTR_TYPE_FIXED) {
1103 1099 intr_types &= ~(DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX);
1104 1100 }
1105 1101 }
1106 1102
1107 1103 if (intr_types & DDI_INTR_TYPE_MSIX) {
1108 1104 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSIX)) ==
1109 1105 DDI_SUCCESS)
1110 1106 return (DDI_SUCCESS);
1111 1107 }
1112 1108
1113 1109 if (intr_types & DDI_INTR_TYPE_MSI) {
1114 1110 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSI)) ==
1115 1111 DDI_SUCCESS)
1116 1112 return (DDI_SUCCESS);
1117 1113 }
1118 1114
1119 1115 if (intr_types & DDI_INTR_TYPE_FIXED) {
1120 1116 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_FIXED)) ==
1121 1117 DDI_SUCCESS)
1122 1118 return (DDI_SUCCESS);
1123 1119 }
1124 1120
1125 1121 yge_error(dev, NULL, "Unable to configure any interrupts");
1126 1122 return (DDI_FAILURE);
1127 1123 }
1128 1124
1129 1125 static void
1130 1126 yge_intr_enable(yge_dev_t *dev)
1131 1127 {
1132 1128 int i;
1133 1129 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1134 1130 /* Call ddi_intr_block_enable() for MSI interrupts */
1135 1131 (void) ddi_intr_block_enable(dev->d_intrh, dev->d_intrcnt);
1136 1132 } else {
1137 1133 /* Call ddi_intr_enable for FIXED interrupts */
1138 1134 for (i = 0; i < dev->d_intrcnt; i++)
1139 1135 (void) ddi_intr_enable(dev->d_intrh[i]);
1140 1136 }
1141 1137 }
1142 1138
1143 1139 void
1144 1140 yge_intr_disable(yge_dev_t *dev)
1145 1141 {
1146 1142 int i;
1147 1143
1148 1144 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1149 1145 (void) ddi_intr_block_disable(dev->d_intrh, dev->d_intrcnt);
1150 1146 } else {
1151 1147 for (i = 0; i < dev->d_intrcnt; i++)
1152 1148 (void) ddi_intr_disable(dev->d_intrh[i]);
1153 1149 }
1154 1150 }
1155 1151
1156 1152 static uint8_t
1157 1153 yge_find_capability(yge_dev_t *dev, uint8_t cap)
1158 1154 {
1159 1155 uint8_t ptr;
1160 1156 uint16_t capit;
1161 1157 ddi_acc_handle_t pcih = dev->d_pcih;
1162 1158
1163 1159 if ((pci_config_get16(pcih, PCI_CONF_STAT) & PCI_STAT_CAP) == 0) {
1164 1160 return (0);
1165 1161 }
1166 1162 /* This assumes PCI, and not CardBus. */
1167 1163 ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR);
1168 1164 while (ptr != 0) {
1169 1165 capit = pci_config_get8(pcih, ptr + PCI_CAP_ID);
1170 1166 if (capit == cap) {
1171 1167 return (ptr);
1172 1168 }
1173 1169 ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR);
1174 1170 }
1175 1171 return (0);
1176 1172 }
1177 1173
1178 1174 static int
1179 1175 yge_attach(yge_dev_t *dev)
1180 1176 {
1181 1177 dev_info_t *dip = dev->d_dip;
1182 1178 int rv;
1183 1179 int nattached;
1184 1180 uint8_t pm_cap;
1185 1181
1186 1182 if (pci_config_setup(dip, &dev->d_pcih) != DDI_SUCCESS) {
1187 1183 yge_error(dev, NULL, "Unable to map PCI configuration space");
1188 1184 goto fail;
1189 1185 }
1190 1186
1191 1187 /*
1192 1188 * Map control/status registers.
1193 1189 */
1194 1190
1195 1191 /* ensure the pmcsr status is D0 state */
1196 1192 pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM);
1197 1193 if (pm_cap != 0) {
1198 1194 uint16_t pmcsr;
1199 1195 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1200 1196 pmcsr &= ~PCI_PMCSR_STATE_MASK;
1201 1197 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1202 1198 pmcsr | PCI_PMCSR_D0);
1203 1199 }
1204 1200
1205 1201 /* Enable PCI access and bus master. */
1206 1202 pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1207 1203 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1208 1204 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1209 1205
1210 1206
1211 1207 /* Allocate I/O resource */
1212 1208 rv = ddi_regs_map_setup(dip, 1, &dev->d_regs, 0, 0, &yge_regs_attr,
1213 1209 &dev->d_regsh);
1214 1210 if (rv != DDI_SUCCESS) {
1215 1211 yge_error(dev, NULL, "Unable to map device registers");
1216 1212 goto fail;
1217 1213 }
1218 1214
1219 1215
1220 1216 /* Enable all clocks. */
1221 1217 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1222 1218 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1223 1219 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1224 1220
1225 1221 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1226 1222 dev->d_hw_id = CSR_READ_1(dev, B2_CHIP_ID);
1227 1223 dev->d_hw_rev = (CSR_READ_1(dev, B2_MAC_CFG) >> 4) & 0x0f;
1228 1224
1229 1225
1230 1226 /*
1231 1227 * Bail out if chip is not recognized. Note that we only enforce
1232 1228 * this in production builds. The Ultra-2 (88e8057) has a problem
1233 1229 * right now where TX works fine, but RX seems not to. So we've
1234 1230 * disabled that for now.
1235 1231 */
1236 1232 if (dev->d_hw_id < CHIP_ID_YUKON_XL ||
1237 1233 dev->d_hw_id >= CHIP_ID_YUKON_UL_2) {
1238 1234 yge_error(dev, NULL, "Unknown device: id=0x%02x, rev=0x%02x",
1239 1235 dev->d_hw_id, dev->d_hw_rev);
1240 1236 #ifndef DEBUG
1241 1237 goto fail;
1242 1238 #endif
1243 1239 }
1244 1240
1245 1241 /* Soft reset. */
1246 1242 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1247 1243 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1248 1244 dev->d_pmd = CSR_READ_1(dev, B2_PMD_TYP);
1249 1245 if (dev->d_pmd == 'L' || dev->d_pmd == 'S' || dev->d_pmd == 'P')
1250 1246 dev->d_coppertype = 0;
1251 1247 else
1252 1248 dev->d_coppertype = 1;
1253 1249 /* Check number of MACs. */
1254 1250 dev->d_num_port = 1;
1255 1251 if ((CSR_READ_1(dev, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1256 1252 CFG_DUAL_MAC_MSK) {
1257 1253 if (!(CSR_READ_1(dev, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1258 1254 dev->d_num_port++;
1259 1255 }
1260 1256
1261 1257 /* Check bus type. */
1262 1258 if (yge_find_capability(dev, PCI_CAP_ID_PCI_E) != 0) {
1263 1259 dev->d_bustype = PEX_BUS;
1264 1260 } else if (yge_find_capability(dev, PCI_CAP_ID_PCIX) != 0) {
1265 1261 dev->d_bustype = PCIX_BUS;
1266 1262 } else {
1267 1263 dev->d_bustype = PCI_BUS;
1268 1264 }
1269 1265
1270 1266 switch (dev->d_hw_id) {
1271 1267 case CHIP_ID_YUKON_EC:
1272 1268 dev->d_clock = 125; /* 125 Mhz */
1273 1269 break;
1274 1270 case CHIP_ID_YUKON_UL_2:
1275 1271 dev->d_clock = 125; /* 125 Mhz */
1276 1272 break;
1277 1273 case CHIP_ID_YUKON_SUPR:
1278 1274 dev->d_clock = 125; /* 125 Mhz */
1279 1275 break;
1280 1276 case CHIP_ID_YUKON_EC_U:
1281 1277 dev->d_clock = 125; /* 125 Mhz */
1282 1278 break;
1283 1279 case CHIP_ID_YUKON_EX:
1284 1280 dev->d_clock = 125; /* 125 Mhz */
1285 1281 break;
1286 1282 case CHIP_ID_YUKON_FE:
1287 1283 dev->d_clock = 100; /* 100 Mhz */
1288 1284 break;
1289 1285 case CHIP_ID_YUKON_FE_P:
1290 1286 dev->d_clock = 50; /* 50 Mhz */
1291 1287 break;
1292 1288 case CHIP_ID_YUKON_XL:
1293 1289 dev->d_clock = 156; /* 156 Mhz */
1294 1290 break;
1295 1291 default:
1296 1292 dev->d_clock = 156; /* 156 Mhz */
1297 1293 break;
1298 1294 }
1299 1295
1300 1296 dev->d_process_limit = YGE_RX_RING_CNT/2;
1301 1297
1302 1298 rv = yge_alloc_ring(NULL, dev, &dev->d_status_ring, YGE_STAT_RING_CNT);
1303 1299 if (rv != DDI_SUCCESS)
1304 1300 goto fail;
1305 1301
1306 1302 /* Setup event taskq. */
1307 1303 dev->d_task_q = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0);
1308 1304 if (dev->d_task_q == NULL) {
1309 1305 yge_error(dev, NULL, "failed to create taskq");
1310 1306 goto fail;
1311 1307 }
1312 1308
1313 1309 /* Init the condition variable */
1314 1310 cv_init(&dev->d_task_cv, NULL, CV_DRIVER, NULL);
1315 1311
1316 1312 /* Allocate IRQ resources. */
1317 1313 if ((rv = yge_attach_intr(dev)) != DDI_SUCCESS) {
1318 1314 goto fail;
1319 1315 }
1320 1316
1321 1317 /* Set base interrupt mask. */
1322 1318 dev->d_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1323 1319 dev->d_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1324 1320 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1325 1321
1326 1322 /* Reset the adapter. */
1327 1323 yge_reset(dev);
1328 1324
1329 1325 yge_setup_rambuffer(dev);
1330 1326
1331 1327 nattached = 0;
1332 1328 for (int i = 0; i < dev->d_num_port; i++) {
1333 1329 yge_port_t *port = dev->d_port[i];
1334 1330 if (yge_init_port(port) != DDI_SUCCESS) {
1335 1331 goto fail;
1336 1332 }
1337 1333 }
1338 1334
1339 1335 yge_intr_enable(dev);
1340 1336
1341 1337 /* set up the periodic to run once per second */
1342 1338 dev->d_periodic = ddi_periodic_add(yge_tick, dev, 1000000000, 0);
1343 1339
1344 1340 for (int i = 0; i < dev->d_num_port; i++) {
1345 1341 yge_port_t *port = dev->d_port[i];
1346 1342 if (yge_register_port(port) == DDI_SUCCESS) {
1347 1343 nattached++;
1348 1344 }
1349 1345 }
1350 1346
1351 1347 if (nattached == 0) {
1352 1348 goto fail;
1353 1349 }
1354 1350
1355 1351 /* Dispatch the taskq */
1356 1352 if (ddi_taskq_dispatch(dev->d_task_q, yge_task, dev, DDI_SLEEP) !=
1357 1353 DDI_SUCCESS) {
1358 1354 yge_error(dev, NULL, "failed to start taskq");
1359 1355 goto fail;
1360 1356 }
1361 1357
1362 1358 ddi_report_dev(dip);
1363 1359
1364 1360 return (DDI_SUCCESS);
1365 1361
1366 1362 fail:
1367 1363 yge_detach(dev);
1368 1364 return (DDI_FAILURE);
1369 1365 }
1370 1366
1371 1367 static int
1372 1368 yge_register_port(yge_port_t *port)
1373 1369 {
1374 1370 if (mac_register(port->p_mreg, &port->p_mh) != DDI_SUCCESS) {
1375 1371 yge_error(NULL, port, "MAC registration failed");
1376 1372 return (DDI_FAILURE);
1377 1373 }
1378 1374
1379 1375 return (DDI_SUCCESS);
1380 1376 }
1381 1377
1382 1378 /*
1383 1379 * Free up port specific resources. This is called only when the
1384 1380 * port is not registered (and hence not running).
1385 1381 */
1386 1382 static void
1387 1383 yge_uninit_port(yge_port_t *port)
1388 1384 {
1389 1385 ASSERT(!port->p_running);
1390 1386
1391 1387 if (port->p_mreg)
1392 1388 mac_free(port->p_mreg);
1393 1389
1394 1390 if (port->p_mii)
1395 1391 mii_free(port->p_mii);
1396 1392
1397 1393 yge_txrx_dma_free(port);
1398 1394
1399 1395 if (port->p_tx_buf)
1400 1396 kmem_free(port->p_tx_buf,
1401 1397 sizeof (yge_buf_t) * YGE_TX_RING_CNT);
1402 1398 if (port->p_rx_buf)
1403 1399 kmem_free(port->p_rx_buf,
1404 1400 sizeof (yge_buf_t) * YGE_RX_RING_CNT);
1405 1401 }
1406 1402
1407 1403 static void
1408 1404 yge_detach(yge_dev_t *dev)
1409 1405 {
1410 1406 /*
1411 1407 * Turn off the periodic.
1412 1408 */
1413 1409 if (dev->d_periodic)
1414 1410 ddi_periodic_delete(dev->d_periodic);
1415 1411
1416 1412 for (int i = 0; i < dev->d_num_port; i++) {
1417 1413 yge_uninit_port(dev->d_port[i]);
1418 1414 }
1419 1415
1420 1416 /*
1421 1417 * Make sure all interrupts are disabled.
1422 1418 */
1423 1419 CSR_WRITE_4(dev, B0_IMSK, 0);
1424 1420 (void) CSR_READ_4(dev, B0_IMSK);
1425 1421 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1426 1422 (void) CSR_READ_4(dev, B0_HWE_IMSK);
1427 1423
1428 1424 /* LED Off. */
1429 1425 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_OFF);
1430 1426
1431 1427 /* Put hardware reset. */
1432 1428 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1433 1429
1434 1430 yge_free_ring(&dev->d_status_ring);
1435 1431
1436 1432 if (dev->d_task_q != NULL) {
1437 1433 yge_dispatch(dev, YGE_TASK_EXIT);
1438 1434 ddi_taskq_destroy(dev->d_task_q);
1439 1435 dev->d_task_q = NULL;
1440 1436 }
1441 1437
1442 1438 cv_destroy(&dev->d_task_cv);
1443 1439
1444 1440 yge_intr_disable(dev);
1445 1441
1446 1442 if (dev->d_intrh != NULL) {
1447 1443 for (int i = 0; i < dev->d_intrcnt; i++) {
1448 1444 (void) ddi_intr_remove_handler(dev->d_intrh[i]);
1449 1445 (void) ddi_intr_free(dev->d_intrh[i]);
1450 1446 }
1451 1447 kmem_free(dev->d_intrh, dev->d_intrsize);
1452 1448 mutex_destroy(&dev->d_phylock);
1453 1449 mutex_destroy(&dev->d_txlock);
1454 1450 mutex_destroy(&dev->d_rxlock);
1455 1451 mutex_destroy(&dev->d_task_mtx);
1456 1452 }
1457 1453 if (dev->d_regsh != NULL)
1458 1454 ddi_regs_map_free(&dev->d_regsh);
1459 1455
1460 1456 if (dev->d_pcih != NULL)
1461 1457 pci_config_teardown(&dev->d_pcih);
1462 1458 }
1463 1459
1464 1460 static int
1465 1461 yge_alloc_ring(yge_port_t *port, yge_dev_t *dev, yge_ring_t *ring, uint32_t num)
1466 1462 {
1467 1463 dev_info_t *dip;
1468 1464 caddr_t kaddr;
1469 1465 size_t len;
1470 1466 int rv;
1471 1467 ddi_dma_cookie_t dmac;
1472 1468 unsigned ndmac;
1473 1469
1474 1470 if (port && !dev)
1475 1471 dev = port->p_dev;
1476 1472 dip = dev->d_dip;
1477 1473
1478 1474 ring->r_num = num;
1479 1475
1480 1476 rv = ddi_dma_alloc_handle(dip, &yge_ring_dma_attr, DDI_DMA_DONTWAIT,
1481 1477 NULL, &ring->r_dmah);
1482 1478 if (rv != DDI_SUCCESS) {
1483 1479 yge_error(dev, port, "Unable to allocate ring DMA handle");
1484 1480 return (DDI_FAILURE);
1485 1481 }
1486 1482
1487 1483 rv = ddi_dma_mem_alloc(ring->r_dmah, num * sizeof (yge_desc_t),
1488 1484 &yge_ring_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
1489 1485 &kaddr, &len, &ring->r_acch);
1490 1486 if (rv != DDI_SUCCESS) {
1491 1487 yge_error(dev, port, "Unable to allocate ring DMA memory");
1492 1488 return (DDI_FAILURE);
1493 1489 }
1494 1490 ring->r_size = len;
1495 1491 ring->r_kaddr = (void *)kaddr;
1496 1492
1497 1493 bzero(kaddr, len);
1498 1494
1499 1495 rv = ddi_dma_addr_bind_handle(ring->r_dmah, NULL, kaddr,
1500 1496 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1501 1497 &dmac, &ndmac);
1502 1498 if (rv != DDI_DMA_MAPPED) {
1503 1499 yge_error(dev, port, "Unable to bind ring DMA handle");
1504 1500 return (DDI_FAILURE);
1505 1501 }
1506 1502 ASSERT(ndmac == 1);
1507 1503 ring->r_paddr = dmac.dmac_address;
1508 1504
1509 1505 return (DDI_SUCCESS);
1510 1506 }
1511 1507
1512 1508 static void
1513 1509 yge_free_ring(yge_ring_t *ring)
1514 1510 {
1515 1511 if (ring->r_paddr)
1516 1512 (void) ddi_dma_unbind_handle(ring->r_dmah);
1517 1513 ring->r_paddr = 0;
1518 1514 if (ring->r_acch)
1519 1515 ddi_dma_mem_free(&ring->r_acch);
1520 1516 ring->r_kaddr = NULL;
1521 1517 ring->r_acch = NULL;
1522 1518 if (ring->r_dmah)
1523 1519 ddi_dma_free_handle(&ring->r_dmah);
1524 1520 ring->r_dmah = NULL;
1525 1521 }
1526 1522
1527 1523 static int
1528 1524 yge_alloc_buf(yge_port_t *port, yge_buf_t *b, size_t bufsz, int flag)
1529 1525 {
1530 1526 yge_dev_t *dev = port->p_dev;
1531 1527 size_t l;
1532 1528 int sflag;
1533 1529 int rv;
1534 1530 ddi_dma_cookie_t dmac;
1535 1531 unsigned ndmac;
1536 1532
1537 1533 sflag = flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT);
1538 1534
1539 1535 /* Now allocate Tx buffers. */
1540 1536 rv = ddi_dma_alloc_handle(dev->d_dip, &yge_buf_dma_attr,
1541 1537 DDI_DMA_DONTWAIT, NULL, &b->b_dmah);
1542 1538 if (rv != DDI_SUCCESS) {
1543 1539 yge_error(NULL, port, "Unable to alloc DMA handle for buffer");
1544 1540 return (DDI_FAILURE);
1545 1541 }
1546 1542
1547 1543 rv = ddi_dma_mem_alloc(b->b_dmah, bufsz, &yge_buf_attr,
1548 1544 sflag, DDI_DMA_DONTWAIT, NULL, &b->b_buf, &l, &b->b_acch);
1549 1545 if (rv != DDI_SUCCESS) {
1550 1546 yge_error(NULL, port, "Unable to alloc DMA memory for buffer");
1551 1547 return (DDI_FAILURE);
1552 1548 }
1553 1549
1554 1550 rv = ddi_dma_addr_bind_handle(b->b_dmah, NULL, b->b_buf, l, flag,
1555 1551 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
1556 1552 if (rv != DDI_DMA_MAPPED) {
1557 1553 yge_error(NULL, port, "Unable to bind DMA handle for buffer");
1558 1554 return (DDI_FAILURE);
1559 1555 }
1560 1556 ASSERT(ndmac == 1);
1561 1557 b->b_paddr = dmac.dmac_address;
1562 1558 return (DDI_SUCCESS);
1563 1559 }
1564 1560
1565 1561 static void
1566 1562 yge_free_buf(yge_buf_t *b)
1567 1563 {
1568 1564 if (b->b_paddr)
1569 1565 (void) ddi_dma_unbind_handle(b->b_dmah);
1570 1566 b->b_paddr = 0;
1571 1567 if (b->b_acch)
1572 1568 ddi_dma_mem_free(&b->b_acch);
1573 1569 b->b_buf = NULL;
1574 1570 b->b_acch = NULL;
1575 1571 if (b->b_dmah)
1576 1572 ddi_dma_free_handle(&b->b_dmah);
1577 1573 b->b_dmah = NULL;
1578 1574 }
1579 1575
1580 1576 static int
1581 1577 yge_txrx_dma_alloc(yge_port_t *port)
1582 1578 {
1583 1579 uint32_t bufsz;
1584 1580 int rv;
1585 1581 int i;
1586 1582 yge_buf_t *b;
1587 1583
1588 1584 /*
1589 1585 * It seems that Yukon II supports full 64 bit DMA operations.
1590 1586 * But we limit it to 32 bits only for now. The 64 bit
1591 1587 * operation would require substantially more complex
1592 1588 * descriptor handling, since in such a case we would need two
1593 1589 * LEs to represent a single physical address.
1594 1590 *
1595 1591 * If we find that this is limiting us, then we should go back
1596 1592 * and re-examine it.
1597 1593 */
1598 1594
1599 1595 /* Note our preferred buffer size. */
1600 1596 bufsz = port->p_mtu;
1601 1597
1602 1598 /* Allocate Tx ring. */
1603 1599 rv = yge_alloc_ring(port, NULL, &port->p_tx_ring, YGE_TX_RING_CNT);
1604 1600 if (rv != DDI_SUCCESS) {
1605 1601 return (DDI_FAILURE);
1606 1602 }
1607 1603
1608 1604 /* Now allocate Tx buffers. */
1609 1605 b = port->p_tx_buf;
1610 1606 for (i = 0; i < YGE_TX_RING_CNT; i++) {
1611 1607 rv = yge_alloc_buf(port, b, bufsz,
1612 1608 DDI_DMA_STREAMING | DDI_DMA_WRITE);
1613 1609 if (rv != DDI_SUCCESS) {
1614 1610 return (DDI_FAILURE);
1615 1611 }
1616 1612 b++;
1617 1613 }
1618 1614
1619 1615 /* Allocate Rx ring. */
1620 1616 rv = yge_alloc_ring(port, NULL, &port->p_rx_ring, YGE_RX_RING_CNT);
1621 1617 if (rv != DDI_SUCCESS) {
1622 1618 return (DDI_FAILURE);
1623 1619 }
1624 1620
1625 1621 /* Now allocate Rx buffers. */
1626 1622 b = port->p_rx_buf;
1627 1623 for (i = 0; i < YGE_RX_RING_CNT; i++) {
1628 1624 rv = yge_alloc_buf(port, b, bufsz,
1629 1625 DDI_DMA_STREAMING | DDI_DMA_READ);
1630 1626 if (rv != DDI_SUCCESS) {
1631 1627 return (DDI_FAILURE);
1632 1628 }
1633 1629 b++;
1634 1630 }
1635 1631
1636 1632 return (DDI_SUCCESS);
1637 1633 }
1638 1634
1639 1635 static void
1640 1636 yge_txrx_dma_free(yge_port_t *port)
1641 1637 {
1642 1638 yge_buf_t *b;
1643 1639
1644 1640 /* Tx ring. */
1645 1641 yge_free_ring(&port->p_tx_ring);
1646 1642
1647 1643 /* Rx ring. */
1648 1644 yge_free_ring(&port->p_rx_ring);
1649 1645
1650 1646 /* Tx buffers. */
1651 1647 b = port->p_tx_buf;
1652 1648 for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) {
1653 1649 yge_free_buf(b);
1654 1650 }
1655 1651 /* Rx buffers. */
1656 1652 b = port->p_rx_buf;
1657 1653 for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) {
1658 1654 yge_free_buf(b);
1659 1655 }
1660 1656 }
1661 1657
1662 1658 boolean_t
1663 1659 yge_send(yge_port_t *port, mblk_t *mp)
1664 1660 {
1665 1661 yge_ring_t *ring = &port->p_tx_ring;
1666 1662 yge_buf_t *txb;
1667 1663 int16_t prod;
1668 1664 size_t len;
1669 1665
1670 1666 /*
1671 1667 * For now we're not going to support checksum offload or LSO.
1672 1668 */
1673 1669
1674 1670 len = msgsize(mp);
1675 1671 if (len > port->p_framesize) {
1676 1672 /* too big! */
1677 1673 freemsg(mp);
1678 1674 return (B_TRUE);
1679 1675 }
1680 1676
1681 1677 /* Check number of available descriptors. */
1682 1678 if (port->p_tx_cnt + 1 >=
1683 1679 (YGE_TX_RING_CNT - YGE_RESERVED_TX_DESC_CNT)) {
1684 1680 port->p_wantw = B_TRUE;
1685 1681 return (B_FALSE);
1686 1682 }
1687 1683
1688 1684 prod = port->p_tx_prod;
1689 1685
1690 1686 txb = &port->p_tx_buf[prod];
1691 1687 mcopymsg(mp, txb->b_buf);
1692 1688 SYNCBUF(txb, DDI_DMA_SYNC_FORDEV);
1693 1689
1694 1690 PUTADDR(ring, prod, txb->b_paddr);
1695 1691 PUTCTRL(ring, prod, len | OP_PACKET | HW_OWNER | EOP);
1696 1692 SYNCENTRY(ring, prod, DDI_DMA_SYNC_FORDEV);
1697 1693 port->p_tx_cnt++;
1698 1694
1699 1695 YGE_INC(prod, YGE_TX_RING_CNT);
1700 1696
1701 1697 /* Update producer index. */
1702 1698 port->p_tx_prod = prod;
1703 1699
1704 1700 return (B_TRUE);
1705 1701 }
1706 1702
1707 1703 static int
1708 1704 yge_suspend(yge_dev_t *dev)
1709 1705 {
1710 1706 for (int i = 0; i < dev->d_num_port; i++) {
1711 1707 yge_port_t *port = dev->d_port[i];
1712 1708 mii_suspend(port->p_mii);
1713 1709 }
1714 1710
1715 1711
1716 1712 DEV_LOCK(dev);
1717 1713
1718 1714 for (int i = 0; i < dev->d_num_port; i++) {
1719 1715 yge_port_t *port = dev->d_port[i];
1720 1716
1721 1717 if (port->p_running) {
1722 1718 yge_stop_port(port);
1723 1719 }
1724 1720 }
1725 1721
1726 1722 /* Disable all interrupts. */
1727 1723 CSR_WRITE_4(dev, B0_IMSK, 0);
1728 1724 (void) CSR_READ_4(dev, B0_IMSK);
1729 1725 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1730 1726 (void) CSR_READ_4(dev, B0_HWE_IMSK);
1731 1727
1732 1728 yge_phy_power(dev, B_FALSE);
1733 1729
1734 1730 /* Put hardware reset. */
1735 1731 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1736 1732 dev->d_suspended = B_TRUE;
1737 1733
1738 1734 DEV_UNLOCK(dev);
1739 1735
1740 1736 return (DDI_SUCCESS);
1741 1737 }
1742 1738
1743 1739 static int
1744 1740 yge_resume(yge_dev_t *dev)
1745 1741 {
1746 1742 uint8_t pm_cap;
1747 1743
1748 1744 DEV_LOCK(dev);
1749 1745
1750 1746 /* ensure the pmcsr status is D0 state */
1751 1747 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1752 1748
1753 1749 if ((pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM)) != 0) {
1754 1750 uint16_t pmcsr;
1755 1751 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1756 1752 pmcsr &= ~PCI_PMCSR_STATE_MASK;
1757 1753 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1758 1754 pmcsr | PCI_PMCSR_D0);
1759 1755 }
1760 1756
1761 1757 /* Enable PCI access and bus master. */
1762 1758 pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1763 1759 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1764 1760 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1765 1761
1766 1762 /* Enable all clocks. */
1767 1763 switch (dev->d_hw_id) {
1768 1764 case CHIP_ID_YUKON_EX:
1769 1765 case CHIP_ID_YUKON_EC_U:
1770 1766 case CHIP_ID_YUKON_FE_P:
1771 1767 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1772 1768 break;
1773 1769 }
1774 1770
1775 1771 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1776 1772
1777 1773 yge_reset(dev);
1778 1774
1779 1775 /* Make sure interrupts are reenabled */
1780 1776 CSR_WRITE_4(dev, B0_IMSK, 0);
1781 1777 CSR_WRITE_4(dev, B0_IMSK, Y2_IS_HW_ERR | Y2_IS_STAT_BMU);
1782 1778 CSR_WRITE_4(dev, B0_HWE_IMSK,
1783 1779 Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1784 1780 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP);
1785 1781
1786 1782 for (int i = 0; i < dev->d_num_port; i++) {
1787 1783 yge_port_t *port = dev->d_port[i];
1788 1784
1789 1785 if (port != NULL && port->p_running) {
1790 1786 yge_start_port(port);
1791 1787 }
1792 1788 }
1793 1789 dev->d_suspended = B_FALSE;
1794 1790
1795 1791 DEV_UNLOCK(dev);
1796 1792
1797 1793 /* Reset MII layer */
1798 1794 for (int i = 0; i < dev->d_num_port; i++) {
1799 1795 yge_port_t *port = dev->d_port[i];
1800 1796
1801 1797 if (port->p_running) {
1802 1798 mii_resume(port->p_mii);
1803 1799 mac_tx_update(port->p_mh);
1804 1800 }
1805 1801 }
1806 1802
1807 1803 return (DDI_SUCCESS);
1808 1804 }
1809 1805
1810 1806 static mblk_t *
1811 1807 yge_rxeof(yge_port_t *port, uint32_t status, int len)
1812 1808 {
1813 1809 yge_dev_t *dev = port->p_dev;
1814 1810 mblk_t *mp;
1815 1811 int cons, rxlen;
1816 1812 yge_buf_t *rxb;
1817 1813 yge_ring_t *ring;
1818 1814
1819 1815 ASSERT(mutex_owned(&dev->d_rxlock));
1820 1816
1821 1817 if (!port->p_running)
1822 1818 return (NULL);
1823 1819
1824 1820 ring = &port->p_rx_ring;
1825 1821 cons = port->p_rx_cons;
1826 1822 rxlen = status >> 16;
1827 1823 rxb = &port->p_rx_buf[cons];
1828 1824 mp = NULL;
1829 1825
1830 1826
1831 1827 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
1832 1828 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
1833 1829 /*
1834 1830 * Apparently the status for this chip is not reliable.
1835 1831 * Only perform minimal consistency checking; the MAC
1836 1832 * and upper protocols will have to filter any garbage.
1837 1833 */
1838 1834 if ((len > port->p_framesize) || (rxlen != len)) {
1839 1835 goto bad;
1840 1836 }
1841 1837 } else {
1842 1838 if ((len > port->p_framesize) || (rxlen != len) ||
1843 1839 ((status & GMR_FS_ANY_ERR) != 0) ||
1844 1840 ((status & GMR_FS_RX_OK) == 0)) {
1845 1841 goto bad;
1846 1842 }
1847 1843 }
1848 1844
1849 1845 if ((mp = allocb(len + YGE_HEADROOM, BPRI_HI)) != NULL) {
1850 1846
1851 1847 /* good packet - yay */
1852 1848 mp->b_rptr += YGE_HEADROOM;
1853 1849 SYNCBUF(rxb, DDI_DMA_SYNC_FORKERNEL);
1854 1850 bcopy(rxb->b_buf, mp->b_rptr, len);
1855 1851 mp->b_wptr = mp->b_rptr + len;
1856 1852 } else {
1857 1853 port->p_stats.rx_nobuf++;
1858 1854 }
1859 1855
1860 1856 bad:
1861 1857
1862 1858 PUTCTRL(ring, cons, port->p_framesize | OP_PACKET | HW_OWNER);
1863 1859 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
1864 1860
1865 1861 CSR_WRITE_2(dev,
1866 1862 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
1867 1863 cons);
1868 1864
1869 1865 YGE_INC(port->p_rx_cons, YGE_RX_RING_CNT);
1870 1866
1871 1867 return (mp);
1872 1868 }
1873 1869
1874 1870 static boolean_t
1875 1871 yge_txeof_locked(yge_port_t *port, int idx)
1876 1872 {
1877 1873 int prog;
1878 1874 int16_t cons;
1879 1875 boolean_t resched;
1880 1876
1881 1877 if (!port->p_running) {
1882 1878 return (B_FALSE);
1883 1879 }
1884 1880
1885 1881 cons = port->p_tx_cons;
1886 1882 prog = 0;
1887 1883 for (; cons != idx; YGE_INC(cons, YGE_TX_RING_CNT)) {
1888 1884 if (port->p_tx_cnt <= 0)
1889 1885 break;
1890 1886 prog++;
1891 1887 port->p_tx_cnt--;
1892 1888 /* No need to sync LEs as we didn't update LEs. */
1893 1889 }
1894 1890
1895 1891 port->p_tx_cons = cons;
1896 1892
1897 1893 if (prog > 0) {
1898 1894 resched = port->p_wantw;
1899 1895 port->p_tx_wdog = 0;
1900 1896 port->p_wantw = B_FALSE;
1901 1897 return (resched);
1902 1898 } else {
1903 1899 return (B_FALSE);
1904 1900 }
1905 1901 }
1906 1902
1907 1903 static void
1908 1904 yge_txeof(yge_port_t *port, int idx)
1909 1905 {
1910 1906 boolean_t resched;
1911 1907
1912 1908 TX_LOCK(port->p_dev);
1913 1909
1914 1910 resched = yge_txeof_locked(port, idx);
1915 1911
1916 1912 TX_UNLOCK(port->p_dev);
1917 1913
1918 1914 if (resched && port->p_running) {
1919 1915 mac_tx_update(port->p_mh);
1920 1916 }
1921 1917 }
1922 1918
1923 1919 static void
1924 1920 yge_restart_task(yge_dev_t *dev)
1925 1921 {
1926 1922 yge_port_t *port;
1927 1923
1928 1924 DEV_LOCK(dev);
1929 1925
1930 1926 /* Cancel pending I/O and free all Rx/Tx buffers. */
1931 1927 for (int i = 0; i < dev->d_num_port; i++) {
1932 1928 port = dev->d_port[i];
1933 1929 if (port->p_running)
1934 1930 yge_stop_port(dev->d_port[i]);
1935 1931 }
1936 1932 yge_reset(dev);
1937 1933 for (int i = 0; i < dev->d_num_port; i++) {
1938 1934 port = dev->d_port[i];
1939 1935
1940 1936 if (port->p_running)
1941 1937 yge_start_port(port);
1942 1938 }
1943 1939
1944 1940 DEV_UNLOCK(dev);
1945 1941
1946 1942 for (int i = 0; i < dev->d_num_port; i++) {
1947 1943 port = dev->d_port[i];
1948 1944
1949 1945 mii_reset(port->p_mii);
1950 1946 if (port->p_running)
1951 1947 mac_tx_update(port->p_mh);
1952 1948 }
1953 1949 }
1954 1950
1955 1951 static void
1956 1952 yge_tick(void *arg)
1957 1953 {
1958 1954 yge_dev_t *dev = arg;
1959 1955 yge_port_t *port;
1960 1956 boolean_t restart = B_FALSE;
1961 1957 boolean_t resched = B_FALSE;
1962 1958 int idx;
1963 1959
1964 1960 DEV_LOCK(dev);
1965 1961
1966 1962 if (dev->d_suspended) {
1967 1963 DEV_UNLOCK(dev);
1968 1964 return;
1969 1965 }
1970 1966
1971 1967 for (int i = 0; i < dev->d_num_port; i++) {
1972 1968 port = dev->d_port[i];
1973 1969
1974 1970 if (!port->p_running)
1975 1971 continue;
1976 1972
1977 1973 if (port->p_tx_cnt) {
1978 1974 uint32_t ridx;
1979 1975
1980 1976 /*
1981 1977 * Reclaim first as there is a possibility of losing
1982 1978 * Tx completion interrupts.
1983 1979 */
1984 1980 ridx = port->p_port == YGE_PORT_A ?
1985 1981 STAT_TXA1_RIDX : STAT_TXA2_RIDX;
1986 1982 idx = CSR_READ_2(dev, ridx);
1987 1983 if (port->p_tx_cons != idx) {
1988 1984 resched = yge_txeof_locked(port, idx);
1989 1985
1990 1986 } else {
1991 1987
1992 1988 /* detect TX hang */
1993 1989 port->p_tx_wdog++;
1994 1990 if (port->p_tx_wdog > YGE_TX_TIMEOUT) {
1995 1991 port->p_tx_wdog = 0;
1996 1992 yge_error(NULL, port,
1997 1993 "TX hang detected!");
1998 1994 restart = B_TRUE;
1999 1995 }
2000 1996 }
2001 1997 }
2002 1998 }
2003 1999
2004 2000 DEV_UNLOCK(dev);
2005 2001 if (restart) {
2006 2002 yge_dispatch(dev, YGE_TASK_RESTART);
2007 2003 } else {
2008 2004 if (resched) {
2009 2005 for (int i = 0; i < dev->d_num_port; i++) {
2010 2006 port = dev->d_port[i];
2011 2007
2012 2008 if (port->p_running)
2013 2009 mac_tx_update(port->p_mh);
2014 2010 }
2015 2011 }
2016 2012 }
2017 2013 }
2018 2014
2019 2015 static int
2020 2016 yge_intr_gmac(yge_port_t *port)
2021 2017 {
2022 2018 yge_dev_t *dev = port->p_dev;
2023 2019 int pnum = port->p_port;
2024 2020 uint8_t status;
2025 2021 int dispatch_wrk = 0;
2026 2022
2027 2023 status = CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2028 2024
2029 2025 /* GMAC Rx FIFO overrun. */
2030 2026 if ((status & GM_IS_RX_FF_OR) != 0) {
2031 2027 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2032 2028 yge_error(NULL, port, "Rx FIFO overrun!");
2033 2029 dispatch_wrk |= YGE_TASK_RESTART;
2034 2030 }
2035 2031 /* GMAC Tx FIFO underrun. */
2036 2032 if ((status & GM_IS_TX_FF_UR) != 0) {
2037 2033 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2038 2034 yge_error(NULL, port, "Tx FIFO underrun!");
2039 2035 /*
2040 2036 * In case of Tx underrun, we may need to flush/reset
2041 2037 * Tx MAC but that would also require
2042 2038 * resynchronization with status LEs. Reinitializing
2043 2039 * status LEs would affect the other port in dual MAC
2044 2040 * configuration so it should be avoided if we can.
2045 2041 * Due to lack of documentation it's all vague guess
2046 2042 * but it needs more investigation.
2047 2043 */
2048 2044 }
2049 2045 return (dispatch_wrk);
2050 2046 }
2051 2047
2052 2048 static void
2053 2049 yge_handle_hwerr(yge_port_t *port, uint32_t status)
2054 2050 {
2055 2051 yge_dev_t *dev = port->p_dev;
2056 2052
2057 2053 if ((status & Y2_IS_PAR_RD1) != 0) {
2058 2054 yge_error(NULL, port, "RAM buffer read parity error");
2059 2055 /* Clear IRQ. */
2060 2056 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2061 2057 RI_CLR_RD_PERR);
2062 2058 }
2063 2059 if ((status & Y2_IS_PAR_WR1) != 0) {
2064 2060 yge_error(NULL, port, "RAM buffer write parity error");
2065 2061 /* Clear IRQ. */
2066 2062 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2067 2063 RI_CLR_WR_PERR);
2068 2064 }
2069 2065 if ((status & Y2_IS_PAR_MAC1) != 0) {
2070 2066 yge_error(NULL, port, "Tx MAC parity error");
2071 2067 /* Clear IRQ. */
2072 2068 CSR_WRITE_4(dev, MR_ADDR(port->p_port, TX_GMF_CTRL_T),
2073 2069 GMF_CLI_TX_PE);
2074 2070 }
2075 2071 if ((status & Y2_IS_PAR_RX1) != 0) {
2076 2072 yge_error(NULL, port, "Rx parity error");
2077 2073 /* Clear IRQ. */
2078 2074 CSR_WRITE_4(dev, Q_ADDR(port->p_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
2079 2075 }
2080 2076 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
2081 2077 yge_error(NULL, port, "TCP segmentation error");
2082 2078 /* Clear IRQ. */
2083 2079 CSR_WRITE_4(dev, Q_ADDR(port->p_txq, Q_CSR), BMU_CLR_IRQ_TCP);
2084 2080 }
2085 2081 }
2086 2082
2087 2083 static void
2088 2084 yge_intr_hwerr(yge_dev_t *dev)
2089 2085 {
2090 2086 uint32_t status;
2091 2087 uint32_t tlphead[4];
2092 2088
2093 2089 status = CSR_READ_4(dev, B0_HWE_ISRC);
2094 2090 /* Time Stamp timer overflow. */
2095 2091 if ((status & Y2_IS_TIST_OV) != 0)
2096 2092 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2097 2093 if ((status & Y2_IS_PCI_NEXP) != 0) {
2098 2094 /*
2099 2095 * PCI Express Error occurred which is not described in PEX
2100 2096 * spec.
2101 2097 * This error is also mapped either to Master Abort(
2102 2098 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
2103 2099 * can only be cleared there.
2104 2100 */
2105 2101 yge_error(dev, NULL, "PCI Express protocol violation error");
2106 2102 }
2107 2103
2108 2104 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
2109 2105 uint16_t v16;
2110 2106
2111 2107 if ((status & Y2_IS_IRQ_STAT) != 0)
2112 2108 yge_error(dev, NULL, "Unexpected IRQ Status error");
2113 2109 if ((status & Y2_IS_MST_ERR) != 0)
2114 2110 yge_error(dev, NULL, "Unexpected IRQ Master error");
2115 2111 /* Reset all bits in the PCI status register. */
2116 2112 v16 = pci_config_get16(dev->d_pcih, PCI_CONF_STAT);
2117 2113 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2118 2114 pci_config_put16(dev->d_pcih, PCI_CONF_STAT, v16 |
2119 2115 PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
2120 2116 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
2121 2117 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2122 2118 }
2123 2119
2124 2120 /* Check for PCI Express Uncorrectable Error. */
2125 2121 if ((status & Y2_IS_PCI_EXP) != 0) {
2126 2122 uint32_t v32;
2127 2123
2128 2124 /*
2129 2125 * On PCI Express bus bridges are called root complexes (RC).
2130 2126 * PCI Express errors are recognized by the root complex too,
2131 2127 * which requests the system to handle the problem. After
2132 2128 * error occurrence it may be that no access to the adapter
2133 2129 * may be performed any longer.
2134 2130 */
2135 2131
2136 2132 v32 = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
2137 2133 if ((v32 & PEX_UNSUP_REQ) != 0) {
2138 2134 /* Ignore unsupported request error. */
2139 2135 yge_error(dev, NULL,
2140 2136 "Uncorrectable PCI Express error");
2141 2137 }
2142 2138 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
2143 2139 int i;
2144 2140
2145 2141 /* Get TLP header form Log Registers. */
2146 2142 for (i = 0; i < 4; i++)
2147 2143 tlphead[i] = CSR_PCI_READ_4(dev,
2148 2144 PEX_HEADER_LOG + i * 4);
2149 2145 /* Check for vendor defined broadcast message. */
2150 2146 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
2151 2147 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
2152 2148 CSR_WRITE_4(dev, B0_HWE_IMSK,
2153 2149 dev->d_intrhwemask);
2154 2150 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2155 2151 }
2156 2152 }
2157 2153 /* Clear the interrupt. */
2158 2154 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2159 2155 CSR_PCI_WRITE_4(dev, PEX_UNC_ERR_STAT, 0xffffffff);
2160 2156 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2161 2157 }
2162 2158
2163 2159 if ((status & Y2_HWE_L1_MASK) != 0 && dev->d_port[YGE_PORT_A] != NULL)
2164 2160 yge_handle_hwerr(dev->d_port[YGE_PORT_A], status);
2165 2161 if ((status & Y2_HWE_L2_MASK) != 0 && dev->d_port[YGE_PORT_B] != NULL)
2166 2162 yge_handle_hwerr(dev->d_port[YGE_PORT_B], status >> 8);
2167 2163 }
2168 2164
2169 2165 /*
2170 2166 * Returns B_TRUE if there is potentially more work to do.
2171 2167 */
2172 2168 static boolean_t
2173 2169 yge_handle_events(yge_dev_t *dev, mblk_t **heads, mblk_t **tails, int *txindex)
2174 2170 {
2175 2171 yge_port_t *port;
2176 2172 yge_ring_t *ring;
2177 2173 uint32_t control, status;
2178 2174 int cons, idx, len, pnum;
2179 2175 mblk_t *mp;
2180 2176 uint32_t rxprogs[2];
2181 2177
2182 2178 rxprogs[0] = rxprogs[1] = 0;
2183 2179
2184 2180 idx = CSR_READ_2(dev, STAT_PUT_IDX);
2185 2181 if (idx == dev->d_stat_cons) {
2186 2182 return (B_FALSE);
2187 2183 }
2188 2184
2189 2185 ring = &dev->d_status_ring;
2190 2186
2191 2187 for (cons = dev->d_stat_cons; cons != idx; ) {
2192 2188 /* Sync status LE. */
2193 2189 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORKERNEL);
2194 2190 control = GETCTRL(ring, cons);
2195 2191 if ((control & HW_OWNER) == 0) {
2196 2192 yge_error(dev, NULL, "Status descriptor error: "
2197 2193 "index %d, control %x", cons, control);
2198 2194 break;
2199 2195 }
2200 2196
2201 2197 status = GETSTAT(ring, cons);
2202 2198
2203 2199 control &= ~HW_OWNER;
2204 2200 len = control & STLE_LEN_MASK;
2205 2201 pnum = ((control >> 16) & 0x01);
2206 2202 port = dev->d_port[pnum];
2207 2203 if (port == NULL) {
2208 2204 yge_error(dev, NULL, "Invalid port opcode: 0x%08x",
2209 2205 control & STLE_OP_MASK);
2210 2206 goto finish;
2211 2207 }
2212 2208
2213 2209 switch (control & STLE_OP_MASK) {
2214 2210 case OP_RXSTAT:
2215 2211 mp = yge_rxeof(port, status, len);
2216 2212 if (mp != NULL) {
2217 2213 if (heads[pnum] == NULL)
2218 2214 heads[pnum] = mp;
2219 2215 else
2220 2216 tails[pnum]->b_next = mp;
2221 2217 tails[pnum] = mp;
2222 2218 }
2223 2219
2224 2220 rxprogs[pnum]++;
2225 2221 break;
2226 2222
2227 2223 case OP_TXINDEXLE:
2228 2224 txindex[0] = status & STLE_TXA1_MSKL;
2229 2225 txindex[1] =
2230 2226 ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) |
2231 2227 ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH);
2232 2228 break;
2233 2229 default:
2234 2230 yge_error(dev, NULL, "Unhandled opcode: 0x%08x",
2235 2231 control & STLE_OP_MASK);
2236 2232 break;
2237 2233 }
2238 2234 finish:
2239 2235
2240 2236 /* Give it back to HW. */
2241 2237 PUTCTRL(ring, cons, control);
2242 2238 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
2243 2239
2244 2240 YGE_INC(cons, YGE_STAT_RING_CNT);
2245 2241 if (rxprogs[pnum] > dev->d_process_limit) {
2246 2242 break;
2247 2243 }
2248 2244 }
2249 2245
2250 2246 dev->d_stat_cons = cons;
2251 2247 if (dev->d_stat_cons != CSR_READ_2(dev, STAT_PUT_IDX))
2252 2248 return (B_TRUE);
2253 2249 else
2254 2250 return (B_FALSE);
2255 2251 }
2256 2252
2257 2253 /*ARGSUSED1*/
2258 2254 static uint_t
2259 2255 yge_intr(caddr_t arg1, caddr_t arg2)
2260 2256 {
2261 2257 yge_dev_t *dev;
2262 2258 yge_port_t *port1;
2263 2259 yge_port_t *port2;
2264 2260 uint32_t status;
2265 2261 mblk_t *heads[2], *tails[2];
2266 2262 int txindex[2];
2267 2263 int dispatch_wrk;
2268 2264
2269 2265 dev = (void *)arg1;
2270 2266
2271 2267 heads[0] = heads[1] = NULL;
2272 2268 tails[0] = tails[1] = NULL;
2273 2269 txindex[0] = txindex[1] = -1;
2274 2270 dispatch_wrk = 0;
2275 2271
2276 2272 port1 = dev->d_port[YGE_PORT_A];
2277 2273 port2 = dev->d_port[YGE_PORT_B];
2278 2274
2279 2275 RX_LOCK(dev);
2280 2276
2281 2277 if (dev->d_suspended) {
2282 2278 RX_UNLOCK(dev);
2283 2279 return (DDI_INTR_UNCLAIMED);
2284 2280 }
2285 2281
2286 2282 /* Get interrupt source. */
2287 2283 status = CSR_READ_4(dev, B0_Y2_SP_ISRC2);
2288 2284 if (status == 0 || status == 0xffffffff ||
2289 2285 (status & dev->d_intrmask) == 0) { /* Stray interrupt ? */
2290 2286 /* Reenable interrupts. */
2291 2287 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2292 2288 RX_UNLOCK(dev);
2293 2289 return (DDI_INTR_UNCLAIMED);
2294 2290 }
2295 2291
2296 2292 if ((status & Y2_IS_HW_ERR) != 0) {
2297 2293 yge_intr_hwerr(dev);
2298 2294 }
2299 2295
2300 2296 if (status & Y2_IS_IRQ_MAC1) {
2301 2297 dispatch_wrk |= yge_intr_gmac(port1);
2302 2298 }
2303 2299 if (status & Y2_IS_IRQ_MAC2) {
2304 2300 dispatch_wrk |= yge_intr_gmac(port2);
2305 2301 }
2306 2302
2307 2303 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
2308 2304 yge_error(NULL, status & Y2_IS_CHK_RX1 ? port1 : port2,
2309 2305 "Rx descriptor error");
2310 2306 dev->d_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
2311 2307 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2312 2308 (void) CSR_READ_4(dev, B0_IMSK);
2313 2309 }
2314 2310 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
2315 2311 yge_error(NULL, status & Y2_IS_CHK_TXA1 ? port1 : port2,
2316 2312 "Tx descriptor error");
2317 2313 dev->d_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
2318 2314 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2319 2315 (void) CSR_READ_4(dev, B0_IMSK);
2320 2316 }
2321 2317
2322 2318 /* handle events until it returns false */
2323 2319 while (yge_handle_events(dev, heads, tails, txindex))
2324 2320 /* NOP */;
2325 2321
2326 2322 /* Do receive/transmit events */
2327 2323 if ((status & Y2_IS_STAT_BMU)) {
2328 2324 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_CLR_IRQ);
2329 2325 }
2330 2326
2331 2327 /* Reenable interrupts. */
2332 2328 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2333 2329
2334 2330 RX_UNLOCK(dev);
2335 2331
2336 2332 if (dispatch_wrk) {
2337 2333 yge_dispatch(dev, dispatch_wrk);
2338 2334 }
2339 2335
2340 2336 if (port1->p_running) {
2341 2337 if (txindex[0] >= 0) {
2342 2338 yge_txeof(port1, txindex[0]);
2343 2339 }
2344 2340 if (heads[0])
2345 2341 mac_rx(port1->p_mh, NULL, heads[0]);
2346 2342 } else {
2347 2343 if (heads[0]) {
2348 2344 mblk_t *mp;
2349 2345 while ((mp = heads[0]) != NULL) {
2350 2346 heads[0] = mp->b_next;
2351 2347 freemsg(mp);
2352 2348 }
2353 2349 }
2354 2350 }
2355 2351
2356 2352 if (port2->p_running) {
2357 2353 if (txindex[1] >= 0) {
2358 2354 yge_txeof(port2, txindex[1]);
2359 2355 }
2360 2356 if (heads[1])
2361 2357 mac_rx(port2->p_mh, NULL, heads[1]);
2362 2358 } else {
2363 2359 if (heads[1]) {
2364 2360 mblk_t *mp;
2365 2361 while ((mp = heads[1]) != NULL) {
2366 2362 heads[1] = mp->b_next;
2367 2363 freemsg(mp);
2368 2364 }
2369 2365 }
2370 2366 }
2371 2367
2372 2368 return (DDI_INTR_CLAIMED);
2373 2369 }
2374 2370
2375 2371 static void
2376 2372 yge_set_tx_stfwd(yge_port_t *port)
2377 2373 {
2378 2374 yge_dev_t *dev = port->p_dev;
2379 2375 int pnum = port->p_port;
2380 2376
2381 2377 switch (dev->d_hw_id) {
2382 2378 case CHIP_ID_YUKON_EX:
2383 2379 if (dev->d_hw_rev == CHIP_REV_YU_EX_A0)
2384 2380 goto yukon_ex_workaround;
2385 2381
2386 2382 if (port->p_mtu > ETHERMTU)
2387 2383 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2388 2384 TX_JUMBO_ENA | TX_STFW_ENA);
2389 2385 else
2390 2386 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2391 2387 TX_JUMBO_DIS | TX_STFW_ENA);
2392 2388 break;
2393 2389 default:
2394 2390 yukon_ex_workaround:
2395 2391 if (port->p_mtu > ETHERMTU) {
2396 2392 /* Set Tx GMAC FIFO Almost Empty Threshold. */
2397 2393 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_AE_THR),
2398 2394 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
2399 2395 /* Disable Store & Forward mode for Tx. */
2400 2396 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2401 2397 TX_JUMBO_ENA | TX_STFW_DIS);
2402 2398 } else {
2403 2399 /* Enable Store & Forward mode for Tx. */
2404 2400 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2405 2401 TX_JUMBO_DIS | TX_STFW_ENA);
2406 2402 }
2407 2403 break;
2408 2404 }
2409 2405 }
2410 2406
2411 2407 static void
2412 2408 yge_start_port(yge_port_t *port)
2413 2409 {
2414 2410 yge_dev_t *dev = port->p_dev;
2415 2411 uint16_t gmac;
2416 2412 int32_t pnum;
2417 2413 int32_t rxq;
2418 2414 int32_t txq;
2419 2415 uint32_t reg;
2420 2416
2421 2417 pnum = port->p_port;
2422 2418 txq = port->p_txq;
2423 2419 rxq = port->p_rxq;
2424 2420
2425 2421 if (port->p_mtu < ETHERMTU)
2426 2422 port->p_framesize = ETHERMTU;
2427 2423 else
2428 2424 port->p_framesize = port->p_mtu;
2429 2425 port->p_framesize += sizeof (struct ether_vlan_header);
2430 2426
2431 2427 /*
2432 2428 * Note for the future, if we enable offloads:
2433 2429 * In Yukon EC Ultra, TSO & checksum offload is not
2434 2430 * supported for jumbo frame.
2435 2431 */
2436 2432
2437 2433 /* GMAC Control reset */
2438 2434 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_SET);
2439 2435 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_CLR);
2440 2436 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_F_LOOPB_OFF);
2441 2437 if (dev->d_hw_id == CHIP_ID_YUKON_EX)
2442 2438 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL),
2443 2439 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
2444 2440 GMC_BYP_RETR_ON);
2445 2441 /*
2446 2442 * Initialize GMAC first such that speed/duplex/flow-control
2447 2443 * parameters are renegotiated with the interface is brought up.
2448 2444 */
2449 2445 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, 0);
2450 2446
2451 2447 /* Dummy read the Interrupt Source Register. */
2452 2448 (void) CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2453 2449
2454 2450 /* Clear MIB stats. */
2455 2451 yge_stats_clear(port);
2456 2452
2457 2453 /* Disable FCS. */
2458 2454 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, GM_RXCR_CRC_DIS);
2459 2455
2460 2456 /* Setup Transmit Control Register. */
2461 2457 GMAC_WRITE_2(dev, pnum, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
2462 2458
2463 2459 /* Setup Transmit Flow Control Register. */
2464 2460 GMAC_WRITE_2(dev, pnum, GM_TX_FLOW_CTRL, 0xffff);
2465 2461
2466 2462 /* Setup Transmit Parameter Register. */
2467 2463 GMAC_WRITE_2(dev, pnum, GM_TX_PARAM,
2468 2464 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
2469 2465 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
2470 2466
2471 2467 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2472 2468 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2473 2469
2474 2470 if (port->p_mtu > ETHERMTU)
2475 2471 gmac |= GM_SMOD_JUMBO_ENA;
2476 2472 GMAC_WRITE_2(dev, pnum, GM_SERIAL_MODE, gmac);
2477 2473
2478 2474 /* Disable interrupts for counter overflows. */
2479 2475 GMAC_WRITE_2(dev, pnum, GM_TX_IRQ_MSK, 0);
2480 2476 GMAC_WRITE_2(dev, pnum, GM_RX_IRQ_MSK, 0);
2481 2477 GMAC_WRITE_2(dev, pnum, GM_TR_IRQ_MSK, 0);
2482 2478
2483 2479 /* Configure Rx MAC FIFO. */
2484 2480 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2485 2481 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_CLR);
2486 2482 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2487 2483 if (dev->d_hw_id == CHIP_ID_YUKON_FE_P ||
2488 2484 dev->d_hw_id == CHIP_ID_YUKON_EX)
2489 2485 reg |= GMF_RX_OVER_ON;
2490 2486 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), reg);
2491 2487
2492 2488 /* Set receive filter. */
2493 2489 yge_setrxfilt(port);
2494 2490
2495 2491 /* Flush Rx MAC FIFO on any flow control or error. */
2496 2492 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
2497 2493
2498 2494 /*
2499 2495 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
2500 2496 * due to hardware hang on receipt of pause frames.
2501 2497 */
2502 2498 reg = RX_GMF_FL_THR_DEF + 1;
2503 2499 /* FE+ magic */
2504 2500 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2505 2501 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0))
2506 2502 reg = 0x178;
2507 2503
2508 2504 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_THR), reg);
2509 2505
2510 2506 /* Configure Tx MAC FIFO. */
2511 2507 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2512 2508 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_CLR);
2513 2509 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_OPER_ON);
2514 2510
2515 2511 /* Disable hardware VLAN tag insertion/stripping. */
2516 2512 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
2517 2513 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
2518 2514
2519 2515 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) {
2520 2516 /* Set Rx Pause threshold. */
2521 2517 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2522 2518 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2523 2519 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2524 2520 MSK_ECU_LLPP);
2525 2521 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2526 2522 MSK_FEP_ULPP);
2527 2523 } else {
2528 2524 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2529 2525 MSK_ECU_LLPP);
2530 2526 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2531 2527 MSK_ECU_ULPP);
2532 2528 }
2533 2529 /* Configure store-and-forward for TX */
2534 2530 yge_set_tx_stfwd(port);
2535 2531 }
2536 2532
2537 2533 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2538 2534 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2539 2535 /* Disable dynamic watermark */
2540 2536 reg = CSR_READ_4(dev, MR_ADDR(pnum, TX_GMF_EA));
2541 2537 reg &= ~TX_DYN_WM_ENA;
2542 2538 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_EA), reg);
2543 2539 }
2544 2540
2545 2541 /*
2546 2542 * Disable Force Sync bit and Alloc bit in Tx RAM interface
2547 2543 * arbiter as we don't use Sync Tx queue.
2548 2544 */
2549 2545 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL),
2550 2546 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2551 2547 /* Enable the RAM Interface Arbiter. */
2552 2548 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_ENA_ARB);
2553 2549
2554 2550 /* Setup RAM buffer. */
2555 2551 yge_set_rambuffer(port);
2556 2552
2557 2553 /* Disable Tx sync Queue. */
2558 2554 CSR_WRITE_1(dev, RB_ADDR(port->p_txsq, RB_CTRL), RB_RST_SET);
2559 2555
2560 2556 /* Setup Tx Queue Bus Memory Interface. */
2561 2557 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_CLR_RESET);
2562 2558 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_OPER_INIT);
2563 2559 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_FIFO_OP_ON);
2564 2560 CSR_WRITE_2(dev, Q_ADDR(txq, Q_WM), MSK_BMU_TX_WM);
2565 2561
2566 2562 switch (dev->d_hw_id) {
2567 2563 case CHIP_ID_YUKON_EC_U:
2568 2564 if (dev->d_hw_rev == CHIP_REV_YU_EC_U_A0) {
2569 2565 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
2570 2566 CSR_WRITE_2(dev, Q_ADDR(txq, Q_AL), MSK_ECU_TXFF_LEV);
2571 2567 }
2572 2568 break;
2573 2569 case CHIP_ID_YUKON_EX:
2574 2570 /*
2575 2571 * Yukon Extreme seems to have silicon bug for
2576 2572 * automatic Tx checksum calculation capability.
2577 2573 */
2578 2574 if (dev->d_hw_rev == CHIP_REV_YU_EX_B0)
2579 2575 CSR_WRITE_4(dev, Q_ADDR(txq, Q_F), F_TX_CHK_AUTO_OFF);
2580 2576 break;
2581 2577 }
2582 2578
2583 2579 /* Setup Rx Queue Bus Memory Interface. */
2584 2580 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET);
2585 2581 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT);
2586 2582 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON);
2587 2583 if (dev->d_bustype == PEX_BUS) {
2588 2584 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80);
2589 2585 } else {
2590 2586 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM);
2591 2587 }
2592 2588 if (dev->d_hw_id == CHIP_ID_YUKON_EC_U &&
2593 2589 dev->d_hw_rev >= CHIP_REV_YU_EC_U_A1) {
2594 2590 /* MAC Rx RAM Read is controlled by hardware. */
2595 2591 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
2596 2592 }
2597 2593
2598 2594 yge_init_tx_ring(port);
2599 2595
2600 2596 /* Disable Rx checksum offload and RSS hash. */
2601 2597 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR),
2602 2598 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
2603 2599
2604 2600 yge_init_rx_ring(port);
2605 2601
2606 2602 /* Configure interrupt handling. */
2607 2603 if (port == dev->d_port[YGE_PORT_A]) {
2608 2604 dev->d_intrmask |= Y2_IS_PORT_A;
2609 2605 dev->d_intrhwemask |= Y2_HWE_L1_MASK;
2610 2606 } else if (port == dev->d_port[YGE_PORT_B]) {
2611 2607 dev->d_intrmask |= Y2_IS_PORT_B;
2612 2608 dev->d_intrhwemask |= Y2_HWE_L2_MASK;
2613 2609 }
2614 2610 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2615 2611 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2616 2612 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2617 2613 (void) CSR_READ_4(dev, B0_IMSK);
2618 2614
2619 2615 /* Enable RX/TX GMAC */
2620 2616 gmac = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2621 2617 gmac |= (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2622 2618 GMAC_WRITE_2(port->p_dev, port->p_port, GM_GP_CTRL, gmac);
2623 2619 /* Read again to ensure writing. */
2624 2620 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2625 2621
2626 2622 /* Reset TX timer */
2627 2623 port->p_tx_wdog = 0;
2628 2624 }
2629 2625
2630 2626 static void
2631 2627 yge_set_rambuffer(yge_port_t *port)
2632 2628 {
2633 2629 yge_dev_t *dev;
2634 2630 int ltpp, utpp;
2635 2631 int pnum;
2636 2632 uint32_t rxq;
2637 2633 uint32_t txq;
2638 2634
2639 2635 dev = port->p_dev;
2640 2636 pnum = port->p_port;
2641 2637 rxq = port->p_rxq;
2642 2638 txq = port->p_txq;
2643 2639
2644 2640 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0)
2645 2641 return;
2646 2642
2647 2643 /* Setup Rx Queue. */
2648 2644 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_CLR);
2649 2645 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_START), dev->d_rxqstart[pnum] / 8);
2650 2646 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_END), dev->d_rxqend[pnum] / 8);
2651 2647 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_WP), dev->d_rxqstart[pnum] / 8);
2652 2648 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RP), dev->d_rxqstart[pnum] / 8);
2653 2649
2654 2650 utpp =
2655 2651 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_ULPP) / 8;
2656 2652 ltpp =
2657 2653 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_LLPP_B) / 8;
2658 2654
2659 2655 if (dev->d_rxqsize < MSK_MIN_RXQ_SIZE)
2660 2656 ltpp += (RB_LLPP_B - RB_LLPP_S) / 8;
2661 2657
2662 2658 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_UTPP), utpp);
2663 2659 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_LTPP), ltpp);
2664 2660 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
2665 2661
2666 2662 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_ENA_OP_MD);
2667 2663 (void) CSR_READ_1(dev, RB_ADDR(rxq, RB_CTRL));
2668 2664
2669 2665 /* Setup Tx Queue. */
2670 2666 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_CLR);
2671 2667 CSR_WRITE_4(dev, RB_ADDR(txq, RB_START), dev->d_txqstart[pnum] / 8);
2672 2668 CSR_WRITE_4(dev, RB_ADDR(txq, RB_END), dev->d_txqend[pnum] / 8);
2673 2669 CSR_WRITE_4(dev, RB_ADDR(txq, RB_WP), dev->d_txqstart[pnum] / 8);
2674 2670 CSR_WRITE_4(dev, RB_ADDR(txq, RB_RP), dev->d_txqstart[pnum] / 8);
2675 2671 /* Enable Store & Forward for Tx side. */
2676 2672 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_STFWD);
2677 2673 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_OP_MD);
2678 2674 (void) CSR_READ_1(dev, RB_ADDR(txq, RB_CTRL));
2679 2675 }
2680 2676
2681 2677 static void
2682 2678 yge_set_prefetch(yge_dev_t *dev, int qaddr, yge_ring_t *ring)
2683 2679 {
2684 2680 /* Reset the prefetch unit. */
2685 2681 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2686 2682 PREF_UNIT_RST_SET);
2687 2683 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2688 2684 PREF_UNIT_RST_CLR);
2689 2685 /* Set LE base address. */
2690 2686 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
2691 2687 YGE_ADDR_LO(ring->r_paddr));
2692 2688 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
2693 2689 YGE_ADDR_HI(ring->r_paddr));
2694 2690 /* Set the list last index. */
2695 2691 CSR_WRITE_2(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
2696 2692 ring->r_num - 1);
2697 2693 /* Turn on prefetch unit. */
2698 2694 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2699 2695 PREF_UNIT_OP_ON);
2700 2696 /* Dummy read to ensure write. */
2701 2697 (void) CSR_READ_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
2702 2698 }
2703 2699
2704 2700 static void
2705 2701 yge_stop_port(yge_port_t *port)
2706 2702 {
2707 2703 yge_dev_t *dev = port->p_dev;
2708 2704 int pnum = port->p_port;
2709 2705 uint32_t txq = port->p_txq;
2710 2706 uint32_t rxq = port->p_rxq;
2711 2707 uint32_t val;
2712 2708 int i;
2713 2709
2714 2710 dev = port->p_dev;
2715 2711
2716 2712 /*
2717 2713 * shutdown timeout
2718 2714 */
2719 2715 port->p_tx_wdog = 0;
2720 2716
2721 2717 /* Disable interrupts. */
2722 2718 if (pnum == YGE_PORT_A) {
2723 2719 dev->d_intrmask &= ~Y2_IS_PORT_A;
2724 2720 dev->d_intrhwemask &= ~Y2_HWE_L1_MASK;
2725 2721 } else {
2726 2722 dev->d_intrmask &= ~Y2_IS_PORT_B;
2727 2723 dev->d_intrhwemask &= ~Y2_HWE_L2_MASK;
2728 2724 }
2729 2725 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2730 2726 (void) CSR_READ_4(dev, B0_HWE_IMSK);
2731 2727 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2732 2728 (void) CSR_READ_4(dev, B0_IMSK);
2733 2729
2734 2730 /* Disable Tx/Rx MAC. */
2735 2731 val = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2736 2732 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2737 2733 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, val);
2738 2734 /* Read again to ensure writing. */
2739 2735 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2740 2736
2741 2737 /* Update stats and clear counters. */
2742 2738 yge_stats_update(port);
2743 2739
2744 2740 /* Stop Tx BMU. */
2745 2741 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2746 2742 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2747 2743 for (i = 0; i < YGE_TIMEOUT; i += 10) {
2748 2744 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
2749 2745 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2750 2746 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2751 2747 } else
2752 2748 break;
2753 2749 drv_usecwait(10);
2754 2750 }
2755 2751 /* This is probably fairly catastrophic. */
2756 2752 if ((val & (BMU_STOP | BMU_IDLE)) == 0)
2757 2753 yge_error(NULL, port, "Tx BMU stop failed");
2758 2754
2759 2755 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD);
2760 2756
2761 2757 /* Disable all GMAC interrupt. */
2762 2758 CSR_WRITE_1(dev, MR_ADDR(pnum, GMAC_IRQ_MSK), 0);
2763 2759
2764 2760 /* Disable the RAM Interface Arbiter. */
2765 2761 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_DIS_ARB);
2766 2762
2767 2763 /* Reset the PCI FIFO of the async Tx queue */
2768 2764 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2769 2765
2770 2766 /* Reset the Tx prefetch units. */
2771 2767 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(txq, PREF_UNIT_CTRL_REG),
2772 2768 PREF_UNIT_RST_SET);
2773 2769
2774 2770 /* Reset the RAM Buffer async Tx queue. */
2775 2771 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET);
2776 2772
2777 2773 /* Reset Tx MAC FIFO. */
2778 2774 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2779 2775 /* Set Pause Off. */
2780 2776 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_PAUSE_OFF);
2781 2777
2782 2778 /*
2783 2779 * The Rx Stop command will not work for Yukon-2 if the BMU does not
2784 2780 * reach the end of packet and since we can't make sure that we have
2785 2781 * incoming data, we must reset the BMU while it is not during a DMA
2786 2782 * transfer. Since it is possible that the Rx path is still active,
2787 2783 * the Rx RAM buffer will be stopped first, so any possible incoming
2788 2784 * data will not trigger a DMA. After the RAM buffer is stopped, the
2789 2785 * BMU is polled until any DMA in progress is ended and only then it
2790 2786 * will be reset.
2791 2787 */
2792 2788
2793 2789 /* Disable the RAM Buffer receive queue. */
2794 2790 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
2795 2791 for (i = 0; i < YGE_TIMEOUT; i += 10) {
2796 2792 if (CSR_READ_1(dev, RB_ADDR(rxq, Q_RSL)) ==
2797 2793 CSR_READ_1(dev, RB_ADDR(rxq, Q_RL)))
2798 2794 break;
2799 2795 drv_usecwait(10);
2800 2796 }
2801 2797 /* This is probably nearly a fatal error. */
2802 2798 if (i == YGE_TIMEOUT)
2803 2799 yge_error(NULL, port, "Rx BMU stop failed");
2804 2800
2805 2801 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2806 2802 /* Reset the Rx prefetch unit. */
2807 2803 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(rxq, PREF_UNIT_CTRL_REG),
2808 2804 PREF_UNIT_RST_SET);
2809 2805 /* Reset the RAM Buffer receive queue. */
2810 2806 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
2811 2807 /* Reset Rx MAC FIFO. */
2812 2808 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2813 2809 }
2814 2810
2815 2811 /*
2816 2812 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
2817 2813 * counter clears high 16 bits of the counter such that accessing
2818 2814 * lower 16 bits should be the last operation.
2819 2815 */
2820 2816 #define YGE_READ_MIB32(x, y) \
2821 2817 GMAC_READ_4(dev, x, y)
2822 2818
2823 2819 #define YGE_READ_MIB64(x, y) \
2824 2820 ((((uint64_t)YGE_READ_MIB32(x, (y) + 8)) << 32) + \
2825 2821 (uint64_t)YGE_READ_MIB32(x, y))
2826 2822
2827 2823 static void
2828 2824 yge_stats_clear(yge_port_t *port)
2829 2825 {
2830 2826 yge_dev_t *dev;
2831 2827 uint16_t gmac;
2832 2828 int32_t pnum;
2833 2829
2834 2830 pnum = port->p_port;
2835 2831 dev = port->p_dev;
2836 2832
2837 2833 /* Set MIB Clear Counter Mode. */
2838 2834 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2839 2835 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2840 2836 /* Read all MIB Counters with Clear Mode set. */
2841 2837 for (int i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += 4)
2842 2838 (void) YGE_READ_MIB32(pnum, i);
2843 2839 /* Clear MIB Clear Counter Mode. */
2844 2840 gmac &= ~GM_PAR_MIB_CLR;
2845 2841 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2846 2842 }
2847 2843
2848 2844 static void
2849 2845 yge_stats_update(yge_port_t *port)
2850 2846 {
2851 2847 yge_dev_t *dev;
2852 2848 struct yge_hw_stats *stats;
2853 2849 uint16_t gmac;
2854 2850 int32_t pnum;
2855 2851
2856 2852 dev = port->p_dev;
2857 2853 pnum = port->p_port;
2858 2854
2859 2855 if (dev->d_suspended || !port->p_running) {
2860 2856 return;
2861 2857 }
2862 2858 stats = &port->p_stats;
2863 2859 /* Set MIB Clear Counter Mode. */
2864 2860 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2865 2861 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2866 2862
2867 2863 /* Rx stats. */
2868 2864 stats->rx_ucast_frames += YGE_READ_MIB32(pnum, GM_RXF_UC_OK);
2869 2865 stats->rx_bcast_frames += YGE_READ_MIB32(pnum, GM_RXF_BC_OK);
2870 2866 stats->rx_pause_frames += YGE_READ_MIB32(pnum, GM_RXF_MPAUSE);
2871 2867 stats->rx_mcast_frames += YGE_READ_MIB32(pnum, GM_RXF_MC_OK);
2872 2868 stats->rx_crc_errs += YGE_READ_MIB32(pnum, GM_RXF_FCS_ERR);
2873 2869 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE1);
2874 2870 stats->rx_good_octets += YGE_READ_MIB64(pnum, GM_RXO_OK_LO);
2875 2871 stats->rx_bad_octets += YGE_READ_MIB64(pnum, GM_RXO_ERR_LO);
2876 2872 stats->rx_runts += YGE_READ_MIB32(pnum, GM_RXF_SHT);
2877 2873 stats->rx_runt_errs += YGE_READ_MIB32(pnum, GM_RXE_FRAG);
2878 2874 stats->rx_pkts_64 += YGE_READ_MIB32(pnum, GM_RXF_64B);
2879 2875 stats->rx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_RXF_127B);
2880 2876 stats->rx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_RXF_255B);
2881 2877 stats->rx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_RXF_511B);
2882 2878 stats->rx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_RXF_1023B);
2883 2879 stats->rx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_RXF_1518B);
2884 2880 stats->rx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_RXF_MAX_SZ);
2885 2881 stats->rx_pkts_too_long += YGE_READ_MIB32(pnum, GM_RXF_LNG_ERR);
2886 2882 stats->rx_pkts_jabbers += YGE_READ_MIB32(pnum, GM_RXF_JAB_PKT);
2887 2883 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE2);
2888 2884 stats->rx_fifo_oflows += YGE_READ_MIB32(pnum, GM_RXE_FIFO_OV);
2889 2885 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE3);
2890 2886
2891 2887 /* Tx stats. */
2892 2888 stats->tx_ucast_frames += YGE_READ_MIB32(pnum, GM_TXF_UC_OK);
2893 2889 stats->tx_bcast_frames += YGE_READ_MIB32(pnum, GM_TXF_BC_OK);
2894 2890 stats->tx_pause_frames += YGE_READ_MIB32(pnum, GM_TXF_MPAUSE);
2895 2891 stats->tx_mcast_frames += YGE_READ_MIB32(pnum, GM_TXF_MC_OK);
2896 2892 stats->tx_octets += YGE_READ_MIB64(pnum, GM_TXO_OK_LO);
2897 2893 stats->tx_pkts_64 += YGE_READ_MIB32(pnum, GM_TXF_64B);
2898 2894 stats->tx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_TXF_127B);
2899 2895 stats->tx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_TXF_255B);
2900 2896 stats->tx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_TXF_511B);
2901 2897 stats->tx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_TXF_1023B);
2902 2898 stats->tx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_TXF_1518B);
2903 2899 stats->tx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_TXF_MAX_SZ);
2904 2900 (void) YGE_READ_MIB32(pnum, GM_TXF_SPARE1);
2905 2901 stats->tx_colls += YGE_READ_MIB32(pnum, GM_TXF_COL);
2906 2902 stats->tx_late_colls += YGE_READ_MIB32(pnum, GM_TXF_LAT_COL);
2907 2903 stats->tx_excess_colls += YGE_READ_MIB32(pnum, GM_TXF_ABO_COL);
2908 2904 stats->tx_multi_colls += YGE_READ_MIB32(pnum, GM_TXF_MUL_COL);
2909 2905 stats->tx_single_colls += YGE_READ_MIB32(pnum, GM_TXF_SNG_COL);
2910 2906 stats->tx_underflows += YGE_READ_MIB32(pnum, GM_TXE_FIFO_UR);
2911 2907 /* Clear MIB Clear Counter Mode. */
2912 2908 gmac &= ~GM_PAR_MIB_CLR;
2913 2909 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2914 2910 }
2915 2911
2916 2912 #undef YGE_READ_MIB32
2917 2913 #undef YGE_READ_MIB64
2918 2914
2919 2915 uint32_t
2920 2916 yge_hashbit(const uint8_t *addr)
2921 2917 {
2922 2918 int idx;
2923 2919 int bit;
2924 2920 uint_t data;
2925 2921 uint32_t crc;
2926 2922 #define POLY_BE 0x04c11db7
2927 2923
2928 2924 crc = 0xffffffff;
2929 2925 for (idx = 0; idx < 6; idx++) {
2930 2926 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
2931 2927 crc = (crc << 1)
2932 2928 ^ ((((crc >> 31) ^ data) & 1) ? POLY_BE : 0);
2933 2929 }
2934 2930 }
2935 2931 #undef POLY_BE
2936 2932
2937 2933 return (crc % 64);
2938 2934 }
2939 2935
2940 2936 int
2941 2937 yge_m_stat(void *arg, uint_t stat, uint64_t *val)
2942 2938 {
2943 2939 yge_port_t *port = arg;
2944 2940 struct yge_hw_stats *stats = &port->p_stats;
2945 2941
2946 2942 if (stat == MAC_STAT_IFSPEED) {
2947 2943 /*
2948 2944 * This is the first stat we are asked about. We update only
2949 2945 * for this stat, to avoid paying the hefty cost of the update
2950 2946 * once for each stat.
2951 2947 */
2952 2948 DEV_LOCK(port->p_dev);
2953 2949 yge_stats_update(port);
2954 2950 DEV_UNLOCK(port->p_dev);
2955 2951 }
2956 2952
2957 2953 if (mii_m_getstat(port->p_mii, stat, val) == 0) {
2958 2954 return (0);
2959 2955 }
2960 2956
2961 2957 switch (stat) {
2962 2958 case MAC_STAT_MULTIRCV:
2963 2959 *val = stats->rx_mcast_frames;
2964 2960 break;
2965 2961
2966 2962 case MAC_STAT_BRDCSTRCV:
2967 2963 *val = stats->rx_bcast_frames;
2968 2964 break;
2969 2965
2970 2966 case MAC_STAT_MULTIXMT:
2971 2967 *val = stats->tx_mcast_frames;
2972 2968 break;
2973 2969
2974 2970 case MAC_STAT_BRDCSTXMT:
2975 2971 *val = stats->tx_bcast_frames;
2976 2972 break;
2977 2973
2978 2974 case MAC_STAT_IPACKETS:
2979 2975 *val = stats->rx_ucast_frames;
2980 2976 break;
2981 2977
2982 2978 case MAC_STAT_RBYTES:
2983 2979 *val = stats->rx_good_octets;
2984 2980 break;
2985 2981
2986 2982 case MAC_STAT_OPACKETS:
2987 2983 *val = stats->tx_ucast_frames;
2988 2984 break;
2989 2985
2990 2986 case MAC_STAT_OBYTES:
2991 2987 *val = stats->tx_octets;
2992 2988 break;
2993 2989
2994 2990 case MAC_STAT_NORCVBUF:
2995 2991 *val = stats->rx_nobuf;
2996 2992 break;
2997 2993
2998 2994 case MAC_STAT_COLLISIONS:
2999 2995 *val = stats->tx_colls;
3000 2996 break;
3001 2997
3002 2998 case ETHER_STAT_ALIGN_ERRORS:
3003 2999 *val = stats->rx_runt_errs;
3004 3000 break;
3005 3001
3006 3002 case ETHER_STAT_FCS_ERRORS:
3007 3003 *val = stats->rx_crc_errs;
3008 3004 break;
3009 3005
3010 3006 case ETHER_STAT_FIRST_COLLISIONS:
3011 3007 *val = stats->tx_single_colls;
3012 3008 break;
3013 3009
3014 3010 case ETHER_STAT_MULTI_COLLISIONS:
3015 3011 *val = stats->tx_multi_colls;
3016 3012 break;
3017 3013
3018 3014 case ETHER_STAT_TX_LATE_COLLISIONS:
3019 3015 *val = stats->tx_late_colls;
3020 3016 break;
3021 3017
3022 3018 case ETHER_STAT_EX_COLLISIONS:
3023 3019 *val = stats->tx_excess_colls;
3024 3020 break;
3025 3021
3026 3022 case ETHER_STAT_TOOLONG_ERRORS:
3027 3023 *val = stats->rx_pkts_too_long;
3028 3024 break;
3029 3025
3030 3026 case MAC_STAT_OVERFLOWS:
3031 3027 *val = stats->rx_fifo_oflows;
3032 3028 break;
3033 3029
3034 3030 case MAC_STAT_UNDERFLOWS:
3035 3031 *val = stats->tx_underflows;
3036 3032 break;
3037 3033
3038 3034 case ETHER_STAT_TOOSHORT_ERRORS:
3039 3035 *val = stats->rx_runts;
3040 3036 break;
3041 3037
3042 3038 case ETHER_STAT_JABBER_ERRORS:
3043 3039 *val = stats->rx_pkts_jabbers;
3044 3040 break;
3045 3041
3046 3042 default:
3047 3043 return (ENOTSUP);
3048 3044 }
3049 3045 return (0);
3050 3046 }
3051 3047
3052 3048 int
3053 3049 yge_m_start(void *arg)
3054 3050 {
3055 3051 yge_port_t *port = arg;
3056 3052
3057 3053 DEV_LOCK(port->p_dev);
3058 3054
3059 3055 /*
3060 3056 * We defer resource allocation to this point, because we
3061 3057 * don't want to waste DMA resources that might better be used
3062 3058 * elsewhere, if the port is not actually being used.
3063 3059 *
3064 3060 * Furthermore, this gives us a more graceful handling of dynamic
3065 3061 * MTU modification.
3066 3062 */
3067 3063 if (yge_txrx_dma_alloc(port) != DDI_SUCCESS) {
3068 3064 /* Make sure we free up partially allocated resources. */
3069 3065 yge_txrx_dma_free(port);
3070 3066 DEV_UNLOCK(port->p_dev);
3071 3067 return (ENOMEM);
3072 3068 }
3073 3069
3074 3070 if (!port->p_dev->d_suspended)
3075 3071 yge_start_port(port);
3076 3072 port->p_running = B_TRUE;
3077 3073 DEV_UNLOCK(port->p_dev);
3078 3074
3079 3075 mii_start(port->p_mii);
3080 3076
3081 3077 return (0);
3082 3078 }
3083 3079
3084 3080 void
3085 3081 yge_m_stop(void *arg)
3086 3082 {
3087 3083 yge_port_t *port = arg;
3088 3084 yge_dev_t *dev = port->p_dev;
3089 3085
3090 3086 DEV_LOCK(dev);
3091 3087 if (!dev->d_suspended)
3092 3088 yge_stop_port(port);
3093 3089
3094 3090 port->p_running = B_FALSE;
3095 3091
3096 3092 /* Release resources we don't need */
3097 3093 yge_txrx_dma_free(port);
3098 3094 DEV_UNLOCK(dev);
3099 3095 }
3100 3096
3101 3097 int
3102 3098 yge_m_promisc(void *arg, boolean_t on)
3103 3099 {
3104 3100 yge_port_t *port = arg;
3105 3101
3106 3102 DEV_LOCK(port->p_dev);
3107 3103
3108 3104 /* Save current promiscuous mode. */
3109 3105 port->p_promisc = on;
3110 3106 yge_setrxfilt(port);
3111 3107
3112 3108 DEV_UNLOCK(port->p_dev);
3113 3109
3114 3110 return (0);
3115 3111 }
3116 3112
3117 3113 int
3118 3114 yge_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
3119 3115 {
3120 3116 yge_port_t *port = arg;
3121 3117 int bit;
3122 3118 boolean_t update;
3123 3119
3124 3120 bit = yge_hashbit(addr);
3125 3121 ASSERT(bit < 64);
3126 3122
3127 3123 DEV_LOCK(port->p_dev);
3128 3124 if (add) {
3129 3125 if (port->p_mccount[bit] == 0) {
3130 3126 /* Set the corresponding bit in the hash table. */
3131 3127 port->p_mchash[bit / 32] |= (1 << (bit % 32));
3132 3128 update = B_TRUE;
3133 3129 }
3134 3130 port->p_mccount[bit]++;
3135 3131 } else {
3136 3132 ASSERT(port->p_mccount[bit] > 0);
3137 3133 port->p_mccount[bit]--;
3138 3134 if (port->p_mccount[bit] == 0) {
3139 3135 port->p_mchash[bit / 32] &= ~(1 << (bit % 32));
3140 3136 update = B_TRUE;
3141 3137 }
3142 3138 }
3143 3139
3144 3140 if (update) {
3145 3141 yge_setrxfilt(port);
3146 3142 }
3147 3143 DEV_UNLOCK(port->p_dev);
3148 3144 return (0);
3149 3145 }
3150 3146
3151 3147 int
3152 3148 yge_m_unicst(void *arg, const uint8_t *macaddr)
3153 3149 {
3154 3150 yge_port_t *port = arg;
3155 3151
3156 3152 DEV_LOCK(port->p_dev);
3157 3153
3158 3154 bcopy(macaddr, port->p_curraddr, ETHERADDRL);
3159 3155 yge_setrxfilt(port);
3160 3156
3161 3157 DEV_UNLOCK(port->p_dev);
3162 3158
3163 3159 return (0);
3164 3160 }
3165 3161
3166 3162 mblk_t *
3167 3163 yge_m_tx(void *arg, mblk_t *mp)
3168 3164 {
3169 3165 yge_port_t *port = arg;
3170 3166 mblk_t *nmp;
3171 3167 int enq = 0;
3172 3168 uint32_t ridx;
3173 3169 int idx;
3174 3170 boolean_t resched = B_FALSE;
3175 3171
3176 3172 TX_LOCK(port->p_dev);
3177 3173
3178 3174 if (port->p_dev->d_suspended) {
3179 3175
3180 3176 TX_UNLOCK(port->p_dev);
3181 3177
3182 3178 while ((nmp = mp) != NULL) {
3183 3179 /* carrier_errors++; */
3184 3180 mp = mp->b_next;
3185 3181 freemsg(nmp);
3186 3182 }
3187 3183 return (NULL);
3188 3184 }
3189 3185
3190 3186 /* attempt a reclaim */
3191 3187 ridx = port->p_port == YGE_PORT_A ?
3192 3188 STAT_TXA1_RIDX : STAT_TXA2_RIDX;
3193 3189 idx = CSR_READ_2(port->p_dev, ridx);
3194 3190 if (port->p_tx_cons != idx)
3195 3191 resched = yge_txeof_locked(port, idx);
3196 3192
3197 3193 while (mp != NULL) {
3198 3194 nmp = mp->b_next;
3199 3195 mp->b_next = NULL;
3200 3196
3201 3197 if (!yge_send(port, mp)) {
3202 3198 mp->b_next = nmp;
3203 3199 break;
3204 3200 }
3205 3201 enq++;
3206 3202 mp = nmp;
3207 3203
3208 3204 }
3209 3205 if (enq > 0) {
3210 3206 /* Transmit */
3211 3207 CSR_WRITE_2(port->p_dev,
3212 3208 Y2_PREF_Q_ADDR(port->p_txq, PREF_UNIT_PUT_IDX_REG),
3213 3209 port->p_tx_prod);
3214 3210 }
3215 3211
3216 3212 TX_UNLOCK(port->p_dev);
3217 3213
3218 3214 if (resched)
3219 3215 mac_tx_update(port->p_mh);
3220 3216
3221 3217 return (mp);
3222 3218 }
3223 3219
3224 3220 void
3225 3221 yge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3226 3222 {
3227 3223 #ifdef YGE_MII_LOOPBACK
3228 3224 /* LINTED E_FUNC_SET_NOT_USED */
3229 3225 yge_port_t *port = arg;
3230 3226
3231 3227 /*
3232 3228 * Right now, the MII common layer does not properly handle
3233 3229 * loopback on these PHYs. Fixing this should be done at some
3234 3230 * point in the future.
3235 3231 */
3236 3232 if (mii_m_loop_ioctl(port->p_mii, wq, mp))
3237 3233 return;
3238 3234 #else
3239 3235 _NOTE(ARGUNUSED(arg));
3240 3236 #endif
3241 3237
3242 3238 miocnak(wq, mp, 0, EINVAL);
3243 3239 }
3244 3240
3245 3241 int
3246 3242 yge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3247 3243 uint_t pr_valsize, const void *pr_val)
3248 3244 {
3249 3245 yge_port_t *port = arg;
3250 3246 uint32_t new_mtu;
3251 3247 int err = 0;
3252 3248
3253 3249 err = mii_m_setprop(port->p_mii, pr_name, pr_num, pr_valsize, pr_val);
3254 3250 if (err != ENOTSUP) {
3255 3251 return (err);
3256 3252 }
3257 3253
3258 3254 DEV_LOCK(port->p_dev);
3259 3255
3260 3256 switch (pr_num) {
3261 3257 case MAC_PROP_MTU:
3262 3258 if (pr_valsize < sizeof (new_mtu)) {
3263 3259 err = EINVAL;
3264 3260 break;
3265 3261 }
3266 3262 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3267 3263 if (new_mtu == port->p_mtu) {
3268 3264 /* no change */
3269 3265 err = 0;
3270 3266 break;
3271 3267 }
3272 3268 if (new_mtu < ETHERMTU) {
3273 3269 yge_error(NULL, port,
3274 3270 "Maximum MTU size too small: %d", new_mtu);
3275 3271 err = EINVAL;
3276 3272 break;
3277 3273 }
3278 3274 if (new_mtu > (port->p_flags & PORT_FLAG_NOJUMBO ?
3279 3275 ETHERMTU : YGE_JUMBO_MTU)) {
3280 3276 yge_error(NULL, port,
3281 3277 "Maximum MTU size too big: %d", new_mtu);
3282 3278 err = EINVAL;
3283 3279 break;
3284 3280 }
3285 3281 if (port->p_running) {
3286 3282 yge_error(NULL, port,
3287 3283 "Unable to change maximum MTU while running");
3288 3284 err = EBUSY;
3289 3285 break;
3290 3286 }
3291 3287
3292 3288
3293 3289 /*
3294 3290 * NB: It would probably be better not to hold the
3295 3291 * DEVLOCK, but releasing it creates a potential race
3296 3292 * if m_start is called concurrently.
3297 3293 *
3298 3294 * It turns out that the MAC layer guarantees safety
3299 3295 * for us here by using a cut out for this kind of
3300 3296 * notification call back anyway.
3301 3297 *
3302 3298 * See R8. and R14. in mac.c locking comments, which read
3303 3299 * as follows:
3304 3300 *
3305 3301 * R8. Since it is not guaranteed (see R14) that
3306 3302 * drivers won't hold locks across mac driver
3307 3303 * interfaces, the MAC layer must provide a cut out
3308 3304 * for control interfaces like upcall notifications
3309 3305 * and start them in a separate thread.
3310 3306 *
3311 3307 * R14. It would be preferable if MAC drivers don't
3312 3308 * hold any locks across any mac call. However at a
3313 3309 * minimum they must not hold any locks across data
3314 3310 * upcalls. They must also make sure that all
3315 3311 * references to mac data structures are cleaned up
3316 3312 * and that it is single threaded at mac_unregister
3317 3313 * time.
3318 3314 */
3319 3315 err = mac_maxsdu_update(port->p_mh, new_mtu);
3320 3316 if (err != 0) {
3321 3317 /* This should never occur! */
3322 3318 yge_error(NULL, port,
3323 3319 "Failed notifying GLDv3 of new maximum MTU");
3324 3320 } else {
3325 3321 port->p_mtu = new_mtu;
3326 3322 }
3327 3323 break;
3328 3324
3329 3325 default:
3330 3326 err = ENOTSUP;
3331 3327 break;
3332 3328 }
3333 3329
3334 3330 err:
3335 3331 DEV_UNLOCK(port->p_dev);
3336 3332
3337 3333 return (err);
3338 3334 }
3339 3335
3340 3336 int
3341 3337 yge_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3342 3338 uint_t pr_valsize, void *pr_val)
3343 3339 {
3344 3340 yge_port_t *port = arg;
3345 3341
3346 3342 return (mii_m_getprop(port->p_mii, pr_name, pr_num, pr_valsize,
3347 3343 pr_val));
3348 3344 }
3349 3345
3350 3346 static void
3351 3347 yge_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3352 3348 mac_prop_info_handle_t prh)
3353 3349 {
3354 3350 yge_port_t *port = arg;
3355 3351
3356 3352 switch (pr_num) {
3357 3353 case MAC_PROP_MTU:
3358 3354 mac_prop_info_set_range_uint32(prh, ETHERMTU,
3359 3355 port->p_flags & PORT_FLAG_NOJUMBO ?
3360 3356 ETHERMTU : YGE_JUMBO_MTU);
3361 3357 break;
3362 3358 default:
3363 3359 mii_m_propinfo(port->p_mii, pr_name, pr_num, prh);
3364 3360 break;
3365 3361 }
3366 3362 }
3367 3363
3368 3364 void
3369 3365 yge_dispatch(yge_dev_t *dev, int flag)
3370 3366 {
3371 3367 TASK_LOCK(dev);
3372 3368 dev->d_task_flags |= flag;
3373 3369 TASK_SIGNAL(dev);
3374 3370 TASK_UNLOCK(dev);
3375 3371 }
3376 3372
3377 3373 void
3378 3374 yge_task(void *arg)
3379 3375 {
3380 3376 yge_dev_t *dev = arg;
3381 3377 int flags;
3382 3378
3383 3379 for (;;) {
3384 3380
3385 3381 TASK_LOCK(dev);
3386 3382 while ((flags = dev->d_task_flags) == 0)
3387 3383 TASK_WAIT(dev);
3388 3384
3389 3385 dev->d_task_flags = 0;
3390 3386 TASK_UNLOCK(dev);
3391 3387
3392 3388 /*
3393 3389 * This should be the first thing after the sleep so if we are
3394 3390 * requested to exit we do that and not waste time doing work
3395 3391 * we will then abandone.
3396 3392 */
3397 3393 if (flags & YGE_TASK_EXIT)
3398 3394 break;
3399 3395
3400 3396 /* all processing done without holding locks */
3401 3397 if (flags & YGE_TASK_RESTART)
3402 3398 yge_restart_task(dev);
3403 3399 }
3404 3400 }
3405 3401
3406 3402 void
3407 3403 yge_error(yge_dev_t *dev, yge_port_t *port, char *fmt, ...)
3408 3404 {
3409 3405 va_list ap;
3410 3406 char buf[256];
3411 3407 int ppa;
3412 3408
3413 3409 va_start(ap, fmt);
3414 3410 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
3415 3411 va_end(ap);
3416 3412
3417 3413 if (dev == NULL && port == NULL) {
3418 3414 cmn_err(CE_WARN, "yge: %s", buf);
3419 3415 } else {
3420 3416 if (port != NULL)
3421 3417 ppa = port->p_ppa;
3422 3418 else
3423 3419 ppa = ddi_get_instance(dev->d_dip);
3424 3420 cmn_err(CE_WARN, "yge%d: %s", ppa, buf);
3425 3421 }
3426 3422 }
3427 3423
3428 3424 static int
3429 3425 yge_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3430 3426 {
3431 3427 yge_dev_t *dev;
3432 3428 int rv;
3433 3429
3434 3430 switch (cmd) {
3435 3431 case DDI_ATTACH:
3436 3432 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
3437 3433 dev->d_port[0] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3438 3434 dev->d_port[1] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3439 3435 dev->d_dip = dip;
3440 3436 ddi_set_driver_private(dip, dev);
3441 3437
3442 3438 dev->d_port[0]->p_port = 0;
3443 3439 dev->d_port[0]->p_dev = dev;
3444 3440 dev->d_port[1]->p_port = 0;
3445 3441 dev->d_port[1]->p_dev = dev;
3446 3442
3447 3443 rv = yge_attach(dev);
3448 3444 if (rv != DDI_SUCCESS) {
3449 3445 ddi_set_driver_private(dip, 0);
3450 3446 kmem_free(dev->d_port[1], sizeof (yge_port_t));
3451 3447 kmem_free(dev->d_port[0], sizeof (yge_port_t));
3452 3448 kmem_free(dev, sizeof (*dev));
3453 3449 }
3454 3450 return (rv);
3455 3451
3456 3452 case DDI_RESUME:
3457 3453 dev = ddi_get_driver_private(dip);
3458 3454 ASSERT(dev != NULL);
3459 3455 return (yge_resume(dev));
3460 3456
3461 3457 default:
3462 3458 return (DDI_FAILURE);
3463 3459 }
3464 3460 }
3465 3461
3466 3462 static int
3467 3463 yge_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3468 3464 {
3469 3465 yge_dev_t *dev;
3470 3466 mac_handle_t mh;
3471 3467
3472 3468 switch (cmd) {
3473 3469 case DDI_DETACH:
3474 3470
3475 3471 dev = ddi_get_driver_private(dip);
3476 3472
3477 3473 /* attempt to unregister MACs from Nemo */
3478 3474 for (int i = 0; i < dev->d_num_port; i++) {
3479 3475
3480 3476 if (((mh = dev->d_port[i]->p_mh) != NULL) &&
3481 3477 (mac_disable(mh) != 0)) {
3482 3478 /*
3483 3479 * We'd really like a mac_enable to reenable
3484 3480 * any MACs that we previously disabled. Too
3485 3481 * bad GLDv3 doesn't have one.
3486 3482 */
3487 3483 return (DDI_FAILURE);
3488 3484 }
3489 3485 }
3490 3486
3491 3487 ASSERT(dip == dev->d_dip);
3492 3488 yge_detach(dev);
3493 3489 ddi_set_driver_private(dip, 0);
3494 3490 for (int i = 0; i < dev->d_num_port; i++) {
3495 3491 if ((mh = dev->d_port[i]->p_mh) != NULL) {
3496 3492 /* This can't fail after mac_disable above. */
3497 3493 (void) mac_unregister(mh);
3498 3494 }
3499 3495 }
3500 3496 kmem_free(dev->d_port[1], sizeof (yge_port_t));
3501 3497 kmem_free(dev->d_port[0], sizeof (yge_port_t));
3502 3498 kmem_free(dev, sizeof (*dev));
3503 3499 return (DDI_SUCCESS);
3504 3500
3505 3501 case DDI_SUSPEND:
3506 3502 dev = ddi_get_driver_private(dip);
3507 3503 ASSERT(dev != NULL);
3508 3504 return (yge_suspend(dev));
3509 3505
3510 3506 default:
3511 3507 return (DDI_FAILURE);
3512 3508 }
3513 3509 }
3514 3510
3515 3511 static int
3516 3512 yge_quiesce(dev_info_t *dip)
3517 3513 {
3518 3514 yge_dev_t *dev;
3519 3515
3520 3516 dev = ddi_get_driver_private(dip);
3521 3517 ASSERT(dev != NULL);
3522 3518
3523 3519 /* NB: No locking! We are called in single threaded context */
3524 3520 for (int i = 0; i < dev->d_num_port; i++) {
3525 3521 yge_port_t *port = dev->d_port[i];
3526 3522 if (port->p_running)
3527 3523 yge_stop_port(port);
3528 3524 }
3529 3525
3530 3526 /* Disable all interrupts. */
3531 3527 CSR_WRITE_4(dev, B0_IMSK, 0);
3532 3528 (void) CSR_READ_4(dev, B0_IMSK);
3533 3529 CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
3534 3530 (void) CSR_READ_4(dev, B0_HWE_IMSK);
3535 3531
3536 3532 /* Put hardware into reset. */
3537 3533 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
3538 3534
3539 3535 return (DDI_SUCCESS);
3540 3536 }
3541 3537
3542 3538 /*
3543 3539 * Stream information
3544 3540 */
3545 3541 DDI_DEFINE_STREAM_OPS(yge_devops, nulldev, nulldev, yge_ddi_attach,
3546 3542 yge_ddi_detach, nodev, NULL, D_MP, NULL, yge_quiesce);
3547 3543
3548 3544 /*
3549 3545 * Module linkage information.
3550 3546 */
3551 3547
3552 3548 static struct modldrv yge_modldrv = {
3553 3549 &mod_driverops, /* drv_modops */
3554 3550 "Yukon 2 Ethernet", /* drv_linkinfo */
3555 3551 &yge_devops /* drv_dev_ops */
3556 3552 };
3557 3553
3558 3554 static struct modlinkage yge_modlinkage = {
3559 3555 MODREV_1, /* ml_rev */
3560 3556 &yge_modldrv, /* ml_linkage */
3561 3557 NULL
3562 3558 };
3563 3559
3564 3560 /*
3565 3561 * DDI entry points.
3566 3562 */
3567 3563 int
3568 3564 _init(void)
3569 3565 {
3570 3566 int rv;
3571 3567 mac_init_ops(&yge_devops, "yge");
3572 3568 if ((rv = mod_install(&yge_modlinkage)) != DDI_SUCCESS) {
3573 3569 mac_fini_ops(&yge_devops);
3574 3570 }
3575 3571 return (rv);
3576 3572 }
3577 3573
3578 3574 int
3579 3575 _fini(void)
3580 3576 {
3581 3577 int rv;
3582 3578 if ((rv = mod_remove(&yge_modlinkage)) == DDI_SUCCESS) {
3583 3579 mac_fini_ops(&yge_devops);
3584 3580 }
3585 3581 return (rv);
3586 3582 }
3587 3583
3588 3584 int
3589 3585 _info(struct modinfo *modinfop)
3590 3586 {
3591 3587 return (mod_info(&yge_modlinkage, modinfop));
3592 3588 }
↓ open down ↓ |
2561 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX