Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/sfe/sfe_util.c
+++ new/usr/src/uts/common/io/sfe/sfe_util.c
1 1 /*
2 2 * sfe_util.c: general ethernet mac driver framework version 2.6
3 3 *
4 4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5 5 *
6 6 * Redistribution and use in source and binary forms, with or without
7 7 * modification, are permitted provided that the following conditions are met:
8 8 *
9 9 * 1. Redistributions of source code must retain the above copyright notice,
10 10 * this list of conditions and the following disclaimer.
11 11 *
12 12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 13 * this list of conditions and the following disclaimer in the documentation
14 14 * and/or other materials provided with the distribution.
15 15 *
16 16 * 3. Neither the name of the author nor the names of its contributors may be
17 17 * used to endorse or promote products derived from this software without
18 18 * specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 31 * DAMAGE.
32 32 */
33 33
34 34 /*
35 35 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36 36 * Use is subject to license terms.
37 37 */
38 38
39 39 /*
40 40 * System Header files.
41 41 */
42 42 #include <sys/types.h>
43 43 #include <sys/conf.h>
44 44 #include <sys/debug.h>
45 45 #include <sys/kmem.h>
46 46 #include <sys/vtrace.h>
47 47 #include <sys/ethernet.h>
48 48 #include <sys/modctl.h>
49 49 #include <sys/errno.h>
50 50 #include <sys/ddi.h>
51 51 #include <sys/sunddi.h>
52 52 #include <sys/stream.h> /* required for MBLK* */
53 53 #include <sys/strsun.h> /* required for mionack() */
54 54 #include <sys/byteorder.h>
55 55 #include <sys/pci.h>
56 56 #include <inet/common.h>
57 57 #include <inet/led.h>
58 58 #include <inet/mi.h>
59 59 #include <inet/nd.h>
60 60 #include <sys/crc32.h>
61 61
62 62 #include <sys/note.h>
63 63
64 64 #include "sfe_mii.h"
65 65 #include "sfe_util.h"
66 66
67 67
68 68
69 69 extern char ident[];
70 70
71 71 /* Debugging support */
72 72 #ifdef GEM_DEBUG_LEVEL
73 73 static int gem_debug = GEM_DEBUG_LEVEL;
74 74 #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
75 75 #else
76 76 #define DPRINTF(n, args)
77 77 #undef ASSERT
78 78 #define ASSERT(x)
79 79 #endif
80 80
81 81 #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
82 82
83 83 /*
84 84 * Useful macros and typedefs
85 85 */
86 86 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
87 87
88 88 #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
89 89 #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
90 90
91 91 #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
92 92 #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
93 93
94 94
95 95 #ifndef INT32_MAX
96 96 #define INT32_MAX 0x7fffffff
97 97 #endif
98 98
99 99 #define VTAG_OFF (ETHERADDRL*2)
100 100 #ifndef VTAG_SIZE
101 101 #define VTAG_SIZE 4
102 102 #endif
103 103 #ifndef VTAG_TPID
104 104 #define VTAG_TPID 0x8100U
105 105 #endif
106 106
107 107 #define GET_TXBUF(dp, sn) \
108 108 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
109 109
110 110 #ifndef offsetof
111 111 #define offsetof(t, m) ((long)&(((t *) 0)->m))
112 112 #endif
113 113 #define TXFLAG_VTAG(flag) \
114 114 (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
115 115
116 116 #define MAXPKTBUF(dp) \
117 117 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
118 118
119 119 #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
120 120 #define BOOLEAN(x) ((x) != 0)
121 121
122 122 /*
123 123 * Macros to distinct chip generation.
124 124 */
125 125
126 126 /*
127 127 * Private functions
128 128 */
129 129 static void gem_mii_start(struct gem_dev *);
130 130 static void gem_mii_stop(struct gem_dev *);
131 131
132 132 /* local buffer management */
133 133 static void gem_nd_setup(struct gem_dev *dp);
134 134 static void gem_nd_cleanup(struct gem_dev *dp);
135 135 static int gem_alloc_memory(struct gem_dev *);
136 136 static void gem_free_memory(struct gem_dev *);
137 137 static void gem_init_rx_ring(struct gem_dev *);
138 138 static void gem_init_tx_ring(struct gem_dev *);
139 139 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
140 140
141 141 static void gem_tx_timeout(struct gem_dev *);
142 142 static void gem_mii_link_watcher(struct gem_dev *dp);
143 143 static int gem_mac_init(struct gem_dev *dp);
144 144 static int gem_mac_start(struct gem_dev *dp);
145 145 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
146 146 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
147 147
148 148 static struct ether_addr gem_etherbroadcastaddr = {
149 149 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 150 };
151 151
152 152 int gem_speed_value[] = {10, 100, 1000};
153 153
154 154 /* ============================================================== */
155 155 /*
156 156 * Misc runtime routines
157 157 */
158 158 /* ============================================================== */
159 159 /*
160 160 * Ether CRC calculation according to 21143 data sheet
161 161 */
162 162 uint32_t
163 163 gem_ether_crc_le(const uint8_t *addr, int len)
164 164 {
165 165 uint32_t crc;
166 166
167 167 CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
168 168 return (crc);
169 169 }
170 170
171 171 uint32_t
172 172 gem_ether_crc_be(const uint8_t *addr, int len)
173 173 {
174 174 int idx;
175 175 int bit;
176 176 uint_t data;
177 177 uint32_t crc;
178 178 #define CRC32_POLY_BE 0x04c11db7
179 179
180 180 crc = 0xffffffff;
181 181 for (idx = 0; idx < len; idx++) {
182 182 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
183 183 crc = (crc << 1)
184 184 ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
185 185 }
186 186 }
187 187 return (crc);
188 188 #undef CRC32_POLY_BE
189 189 }
190 190
191 191 int
192 192 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
193 193 {
194 194 char propname[32];
195 195
196 196 (void) sprintf(propname, prop_template, dp->name);
197 197
198 198 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
199 199 DDI_PROP_DONTPASS, propname, def_val));
200 200 }
201 201
202 202 static int
203 203 gem_population(uint32_t x)
204 204 {
205 205 int i;
206 206 int cnt;
207 207
208 208 cnt = 0;
209 209 for (i = 0; i < 32; i++) {
210 210 if (x & (1 << i)) {
211 211 cnt++;
212 212 }
213 213 }
214 214 return (cnt);
215 215 }
216 216
217 217 #ifdef GEM_DEBUG_LEVEL
218 218 #ifdef GEM_DEBUG_VLAN
219 219 static void
220 220 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
221 221 boolean_t check_cksum)
222 222 {
223 223 char msg[180];
224 224 uint8_t buf[18+20+20];
225 225 uint8_t *p;
226 226 size_t offset;
227 227 uint_t ethertype;
228 228 uint_t proto;
229 229 uint_t ipproto = 0;
230 230 uint_t iplen;
231 231 uint_t iphlen;
232 232 uint_t tcplen;
233 233 uint_t udplen;
234 234 uint_t cksum;
235 235 int rest;
236 236 int len;
237 237 char *bp;
238 238 mblk_t *tp;
239 239 extern uint_t ip_cksum(mblk_t *, int, uint32_t);
240 240
241 241 msg[0] = 0;
242 242 bp = msg;
243 243
244 244 rest = sizeof (buf);
245 245 offset = 0;
246 246 for (tp = mp; tp; tp = tp->b_cont) {
247 247 len = tp->b_wptr - tp->b_rptr;
248 248 len = min(rest, len);
249 249 bcopy(tp->b_rptr, &buf[offset], len);
250 250 rest -= len;
251 251 offset += len;
252 252 if (rest == 0) {
253 253 break;
254 254 }
255 255 }
256 256
257 257 offset = 0;
258 258 p = &buf[offset];
259 259
260 260 /* ethernet address */
261 261 sprintf(bp,
262 262 "ether: %02x:%02x:%02x:%02x:%02x:%02x"
263 263 " -> %02x:%02x:%02x:%02x:%02x:%02x",
264 264 p[6], p[7], p[8], p[9], p[10], p[11],
265 265 p[0], p[1], p[2], p[3], p[4], p[5]);
266 266 bp = &msg[strlen(msg)];
267 267
268 268 /* vlag tag and etherrtype */
269 269 ethertype = GET_ETHERTYPE(p);
270 270 if (ethertype == VTAG_TPID) {
271 271 sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
272 272 bp = &msg[strlen(msg)];
273 273
274 274 offset += VTAG_SIZE;
275 275 p = &buf[offset];
276 276 ethertype = GET_ETHERTYPE(p);
277 277 }
278 278 sprintf(bp, " type:%04x", ethertype);
279 279 bp = &msg[strlen(msg)];
280 280
281 281 /* ethernet packet length */
282 282 sprintf(bp, " mblklen:%d", msgdsize(mp));
283 283 bp = &msg[strlen(msg)];
284 284 if (mp->b_cont) {
285 285 sprintf(bp, "(");
286 286 bp = &msg[strlen(msg)];
287 287 for (tp = mp; tp; tp = tp->b_cont) {
288 288 if (tp == mp) {
289 289 sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
290 290 } else {
291 291 sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
292 292 }
293 293 bp = &msg[strlen(msg)];
294 294 }
295 295 sprintf(bp, ")");
296 296 bp = &msg[strlen(msg)];
297 297 }
298 298
299 299 if (ethertype != ETHERTYPE_IP) {
300 300 goto x;
301 301 }
302 302
303 303 /* ip address */
304 304 offset += sizeof (struct ether_header);
305 305 p = &buf[offset];
306 306 ipproto = p[9];
307 307 iplen = GET_NET16(&p[2]);
308 308 sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
309 309 p[12], p[13], p[14], p[15],
310 310 p[16], p[17], p[18], p[19],
311 311 ipproto, iplen);
312 312 bp = (void *)&msg[strlen(msg)];
313 313
314 314 iphlen = (p[0] & 0xf) * 4;
315 315
316 316 /* cksum for psuedo header */
317 317 cksum = *(uint16_t *)&p[12];
318 318 cksum += *(uint16_t *)&p[14];
319 319 cksum += *(uint16_t *)&p[16];
320 320 cksum += *(uint16_t *)&p[18];
321 321 cksum += BE_16(ipproto);
322 322
323 323 /* tcp or udp protocol header */
324 324 offset += iphlen;
325 325 p = &buf[offset];
326 326 if (ipproto == IPPROTO_TCP) {
327 327 tcplen = iplen - iphlen;
328 328 sprintf(bp, ", tcp: len:%d cksum:%x",
329 329 tcplen, GET_NET16(&p[16]));
330 330 bp = (void *)&msg[strlen(msg)];
331 331
332 332 if (check_cksum) {
333 333 cksum += BE_16(tcplen);
334 334 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
335 335 sprintf(bp, " (%s)",
336 336 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
337 337 bp = (void *)&msg[strlen(msg)];
338 338 }
339 339 } else if (ipproto == IPPROTO_UDP) {
340 340 udplen = GET_NET16(&p[4]);
341 341 sprintf(bp, ", udp: len:%d cksum:%x",
342 342 udplen, GET_NET16(&p[6]));
343 343 bp = (void *)&msg[strlen(msg)];
344 344
345 345 if (GET_NET16(&p[6]) && check_cksum) {
346 346 cksum += *(uint16_t *)&p[4];
347 347 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
348 348 sprintf(bp, " (%s)",
349 349 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
350 350 bp = (void *)&msg[strlen(msg)];
351 351 }
352 352 }
353 353 x:
354 354 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
355 355 }
356 356 #endif /* GEM_DEBUG_VLAN */
357 357 #endif /* GEM_DEBUG_LEVEL */
358 358
359 359 /* ============================================================== */
360 360 /*
361 361 * IO cache flush
362 362 */
363 363 /* ============================================================== */
364 364 __INLINE__ void
365 365 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
366 366 {
367 367 int n;
368 368 int m;
369 369 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
370 370
371 371 /* sync active descriptors */
372 372 if (rx_desc_unit_shift < 0 || nslot == 0) {
373 373 /* no rx descriptor ring */
374 374 return;
375 375 }
376 376
377 377 n = dp->gc.gc_rx_ring_size - head;
378 378 if ((m = nslot - n) > 0) {
379 379 (void) ddi_dma_sync(dp->desc_dma_handle,
380 380 (off_t)0,
381 381 (size_t)(m << rx_desc_unit_shift),
382 382 how);
383 383 nslot = n;
384 384 }
385 385
386 386 (void) ddi_dma_sync(dp->desc_dma_handle,
387 387 (off_t)(head << rx_desc_unit_shift),
388 388 (size_t)(nslot << rx_desc_unit_shift),
389 389 how);
390 390 }
391 391
392 392 __INLINE__ void
393 393 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
394 394 {
395 395 int n;
396 396 int m;
397 397 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
398 398
399 399 /* sync active descriptors */
400 400 if (tx_desc_unit_shift < 0 || nslot == 0) {
401 401 /* no tx descriptor ring */
402 402 return;
403 403 }
404 404
405 405 n = dp->gc.gc_tx_ring_size - head;
406 406 if ((m = nslot - n) > 0) {
407 407 (void) ddi_dma_sync(dp->desc_dma_handle,
408 408 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
409 409 (size_t)(m << tx_desc_unit_shift),
410 410 how);
411 411 nslot = n;
412 412 }
413 413
414 414 (void) ddi_dma_sync(dp->desc_dma_handle,
415 415 (off_t)((head << tx_desc_unit_shift)
416 416 + (dp->tx_ring_dma - dp->rx_ring_dma)),
417 417 (size_t)(nslot << tx_desc_unit_shift),
418 418 how);
419 419 }
420 420
421 421 static void
422 422 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
423 423 {
424 424 gem_rx_desc_dma_sync(dp,
425 425 SLOT(head, dp->gc.gc_rx_ring_size), nslot,
426 426 DDI_DMA_SYNC_FORDEV);
427 427 }
428 428
429 429 /* ============================================================== */
430 430 /*
431 431 * Buffer management
432 432 */
433 433 /* ============================================================== */
434 434 static void
435 435 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
436 436 {
437 437 cmn_err(level,
438 438 "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
439 439 "tx_softq: %d[%d] %d[%d] (+%d), "
440 440 "tx_free: %d[%d] %d[%d] (+%d), "
441 441 "tx_desc: %d[%d] %d[%d] (+%d), "
442 442 "intr: %d[%d] (+%d), ",
443 443 dp->name, title,
444 444 dp->tx_active_head,
445 445 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
446 446 dp->tx_active_tail,
447 447 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
448 448 dp->tx_active_tail - dp->tx_active_head,
449 449 dp->tx_softq_head,
450 450 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
451 451 dp->tx_softq_tail,
452 452 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
453 453 dp->tx_softq_tail - dp->tx_softq_head,
454 454 dp->tx_free_head,
455 455 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
456 456 dp->tx_free_tail,
457 457 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
458 458 dp->tx_free_tail - dp->tx_free_head,
459 459 dp->tx_desc_head,
460 460 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
461 461 dp->tx_desc_tail,
462 462 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
463 463 dp->tx_desc_tail - dp->tx_desc_head,
464 464 dp->tx_desc_intr,
465 465 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
466 466 dp->tx_desc_intr - dp->tx_desc_head);
467 467 }
468 468
469 469 static void
470 470 gem_free_rxbuf(struct rxbuf *rbp)
471 471 {
472 472 struct gem_dev *dp;
473 473
474 474 dp = rbp->rxb_devp;
475 475 ASSERT(mutex_owned(&dp->intrlock));
476 476 rbp->rxb_next = dp->rx_buf_freelist;
477 477 dp->rx_buf_freelist = rbp;
478 478 dp->rx_buf_freecnt++;
479 479 }
480 480
481 481 /*
482 482 * gem_get_rxbuf: supply a receive buffer which have been mapped into
483 483 * DMA space.
484 484 */
485 485 struct rxbuf *
486 486 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
487 487 {
488 488 struct rxbuf *rbp;
489 489 uint_t count = 0;
490 490 int i;
491 491 int err;
492 492
493 493 ASSERT(mutex_owned(&dp->intrlock));
494 494
495 495 DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
496 496 dp->rx_buf_freecnt));
497 497 /*
498 498 * Get rx buffer management structure
499 499 */
500 500 rbp = dp->rx_buf_freelist;
501 501 if (rbp) {
502 502 /* get one from the recycle list */
503 503 ASSERT(dp->rx_buf_freecnt > 0);
504 504
505 505 dp->rx_buf_freelist = rbp->rxb_next;
506 506 dp->rx_buf_freecnt--;
507 507 rbp->rxb_next = NULL;
508 508 return (rbp);
509 509 }
510 510
511 511 /*
512 512 * Allocate a rx buffer management structure
513 513 */
514 514 rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
515 515 if (rbp == NULL) {
516 516 /* no memory */
517 517 return (NULL);
518 518 }
519 519
520 520 /*
521 521 * Prepare a back pointer to the device structure which will be
522 522 * refered on freeing the buffer later.
523 523 */
524 524 rbp->rxb_devp = dp;
525 525
526 526 /* allocate a dma handle for rx data buffer */
527 527 if ((err = ddi_dma_alloc_handle(dp->dip,
528 528 &dp->gc.gc_dma_attr_rxbuf,
529 529 (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
530 530 NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
531 531
532 532 cmn_err(CE_WARN,
533 533 "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
534 534 dp->name, __func__, err);
535 535
536 536 kmem_free(rbp, sizeof (struct rxbuf));
537 537 return (NULL);
538 538 }
539 539
540 540 /* allocate a bounce buffer for rx */
541 541 if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
542 542 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
543 543 &dp->gc.gc_buf_attr,
544 544 /*
545 545 * if the nic requires a header at the top of receive buffers,
546 546 * it may access the rx buffer randomly.
547 547 */
548 548 (dp->gc.gc_rx_header_len > 0)
549 549 ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
550 550 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
551 551 NULL,
552 552 &rbp->rxb_buf, &rbp->rxb_buf_len,
553 553 &rbp->rxb_bah)) != DDI_SUCCESS) {
554 554
555 555 cmn_err(CE_WARN,
556 556 "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
557 557 dp->name, __func__, err);
558 558
559 559 ddi_dma_free_handle(&rbp->rxb_dh);
560 560 kmem_free(rbp, sizeof (struct rxbuf));
561 561 return (NULL);
562 562 }
563 563
564 564 /* Mapin the bounce buffer into the DMA space */
565 565 if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
566 566 NULL, rbp->rxb_buf, dp->rx_buf_len,
567 567 ((dp->gc.gc_rx_header_len > 0)
568 568 ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
569 569 :(DDI_DMA_READ | DDI_DMA_STREAMING)),
570 570 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
571 571 NULL,
572 572 rbp->rxb_dmacookie,
573 573 &count)) != DDI_DMA_MAPPED) {
574 574
575 575 ASSERT(err != DDI_DMA_INUSE);
576 576 DPRINTF(0, (CE_WARN,
577 577 "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
578 578 dp->name, __func__, err));
579 579
580 580 /*
581 581 * we failed to allocate a dma resource
582 582 * for the rx bounce buffer.
583 583 */
584 584 ddi_dma_mem_free(&rbp->rxb_bah);
585 585 ddi_dma_free_handle(&rbp->rxb_dh);
586 586 kmem_free(rbp, sizeof (struct rxbuf));
587 587 return (NULL);
588 588 }
589 589
590 590 /* correct the rest of the DMA mapping */
591 591 for (i = 1; i < count; i++) {
592 592 ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
593 593 }
594 594 rbp->rxb_nfrags = count;
595 595
596 596 /* Now we successfully prepared an rx buffer */
597 597 dp->rx_buf_allocated++;
598 598
599 599 return (rbp);
600 600 }
601 601
602 602 /* ============================================================== */
603 603 /*
604 604 * memory resource management
605 605 */
606 606 /* ============================================================== */
607 607 static int
608 608 gem_alloc_memory(struct gem_dev *dp)
609 609 {
610 610 caddr_t ring;
611 611 caddr_t buf;
612 612 size_t req_size;
613 613 size_t ring_len;
614 614 size_t buf_len;
615 615 ddi_dma_cookie_t ring_cookie;
616 616 ddi_dma_cookie_t buf_cookie;
617 617 uint_t count;
618 618 int i;
619 619 int err;
620 620 struct txbuf *tbp;
621 621 int tx_buf_len;
622 622 ddi_dma_attr_t dma_attr_txbounce;
623 623
624 624 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
625 625
626 626 dp->desc_dma_handle = NULL;
627 627 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
628 628
629 629 if (req_size > 0) {
630 630 /*
631 631 * Alloc RX/TX descriptors and a io area.
632 632 */
633 633 if ((err = ddi_dma_alloc_handle(dp->dip,
634 634 &dp->gc.gc_dma_attr_desc,
635 635 DDI_DMA_SLEEP, NULL,
636 636 &dp->desc_dma_handle)) != DDI_SUCCESS) {
637 637 cmn_err(CE_WARN,
638 638 "!%s: %s: ddi_dma_alloc_handle failed: %d",
639 639 dp->name, __func__, err);
640 640 return (ENOMEM);
641 641 }
642 642
643 643 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
644 644 req_size, &dp->gc.gc_desc_attr,
645 645 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
646 646 &ring, &ring_len,
647 647 &dp->desc_acc_handle)) != DDI_SUCCESS) {
648 648 cmn_err(CE_WARN,
649 649 "!%s: %s: ddi_dma_mem_alloc failed: "
650 650 "ret %d, request size: %d",
651 651 dp->name, __func__, err, (int)req_size);
652 652 ddi_dma_free_handle(&dp->desc_dma_handle);
653 653 return (ENOMEM);
654 654 }
655 655
656 656 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
657 657 NULL, ring, ring_len,
658 658 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
659 659 DDI_DMA_SLEEP, NULL,
660 660 &ring_cookie, &count)) != DDI_SUCCESS) {
661 661 ASSERT(err != DDI_DMA_INUSE);
662 662 cmn_err(CE_WARN,
663 663 "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
664 664 dp->name, __func__, err);
665 665 ddi_dma_mem_free(&dp->desc_acc_handle);
666 666 ddi_dma_free_handle(&dp->desc_dma_handle);
667 667 return (ENOMEM);
668 668 }
669 669 ASSERT(count == 1);
670 670
671 671 /* set base of rx descriptor ring */
672 672 dp->rx_ring = ring;
673 673 dp->rx_ring_dma = ring_cookie.dmac_laddress;
674 674
675 675 /* set base of tx descriptor ring */
676 676 dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
677 677 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678 678
679 679 /* set base of io area */
680 680 dp->io_area = dp->tx_ring + dp->tx_desc_size;
681 681 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
682 682 }
683 683
684 684 /*
685 685 * Prepare DMA resources for tx packets
686 686 */
687 687 ASSERT(dp->gc.gc_tx_buf_size > 0);
688 688
689 689 /* Special dma attribute for tx bounce buffers */
690 690 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
691 691 dma_attr_txbounce.dma_attr_sgllen = 1;
692 692 dma_attr_txbounce.dma_attr_align =
693 693 max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
694 694
695 695 /* Size for tx bounce buffers must be max tx packet size. */
696 696 tx_buf_len = MAXPKTBUF(dp);
697 697 tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
698 698
699 699 ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
700 700
701 701 for (i = 0, tbp = dp->tx_buf;
702 702 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703 703
704 704 /* setup bounce buffers for tx packets */
705 705 if ((err = ddi_dma_alloc_handle(dp->dip,
706 706 &dma_attr_txbounce,
707 707 DDI_DMA_SLEEP, NULL,
708 708 &tbp->txb_bdh)) != DDI_SUCCESS) {
709 709
710 710 cmn_err(CE_WARN,
711 711 "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
712 712 " err=%d, i=%d",
713 713 dp->name, __func__, err, i);
714 714 goto err_alloc_dh;
715 715 }
716 716
717 717 if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
718 718 tx_buf_len,
719 719 &dp->gc.gc_buf_attr,
720 720 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
721 721 &buf, &buf_len,
722 722 &tbp->txb_bah)) != DDI_SUCCESS) {
723 723 cmn_err(CE_WARN,
724 724 "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
725 725 "ret %d, request size %d",
726 726 dp->name, __func__, err, tx_buf_len);
727 727 ddi_dma_free_handle(&tbp->txb_bdh);
728 728 goto err_alloc_dh;
729 729 }
730 730
731 731 if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
732 732 NULL, buf, buf_len,
733 733 DDI_DMA_WRITE | DDI_DMA_STREAMING,
734 734 DDI_DMA_SLEEP, NULL,
735 735 &buf_cookie, &count)) != DDI_SUCCESS) {
736 736 ASSERT(err != DDI_DMA_INUSE);
737 737 cmn_err(CE_WARN,
738 738 "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
739 739 dp->name, __func__, err);
740 740 ddi_dma_mem_free(&tbp->txb_bah);
741 741 ddi_dma_free_handle(&tbp->txb_bdh);
742 742 goto err_alloc_dh;
743 743 }
744 744 ASSERT(count == 1);
745 745 tbp->txb_buf = buf;
746 746 tbp->txb_buf_dma = buf_cookie.dmac_laddress;
747 747 }
748 748
749 749 return (0);
750 750
751 751 err_alloc_dh:
752 752 if (dp->gc.gc_tx_buf_size > 0) {
753 753 while (i-- > 0) {
754 754 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
755 755 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
756 756 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
757 757 }
758 758 }
759 759
760 760 if (dp->desc_dma_handle) {
761 761 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
762 762 ddi_dma_mem_free(&dp->desc_acc_handle);
763 763 ddi_dma_free_handle(&dp->desc_dma_handle);
764 764 dp->desc_dma_handle = NULL;
765 765 }
766 766
767 767 return (ENOMEM);
768 768 }
769 769
770 770 static void
771 771 gem_free_memory(struct gem_dev *dp)
772 772 {
773 773 int i;
774 774 struct rxbuf *rbp;
775 775 struct txbuf *tbp;
776 776
777 777 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778 778
779 779 /* Free TX/RX descriptors and tx padding buffer */
780 780 if (dp->desc_dma_handle) {
781 781 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
782 782 ddi_dma_mem_free(&dp->desc_acc_handle);
783 783 ddi_dma_free_handle(&dp->desc_dma_handle);
784 784 dp->desc_dma_handle = NULL;
785 785 }
786 786
787 787 /* Free dma handles for Tx */
788 788 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
789 789 /* Free bounce buffer associated to each txbuf */
790 790 (void) ddi_dma_unbind_handle(tbp->txb_bdh);
791 791 ddi_dma_mem_free(&tbp->txb_bah);
792 792 ddi_dma_free_handle(&tbp->txb_bdh);
793 793 }
794 794
795 795 /* Free rx buffer */
796 796 while ((rbp = dp->rx_buf_freelist) != NULL) {
797 797
798 798 ASSERT(dp->rx_buf_freecnt > 0);
799 799
800 800 dp->rx_buf_freelist = rbp->rxb_next;
801 801 dp->rx_buf_freecnt--;
802 802
803 803 /* release DMA mapping */
804 804 ASSERT(rbp->rxb_dh != NULL);
805 805
806 806 /* free dma handles for rx bbuf */
807 807 /* it has dma mapping always */
808 808 ASSERT(rbp->rxb_nfrags > 0);
809 809 (void) ddi_dma_unbind_handle(rbp->rxb_dh);
810 810
811 811 /* free the associated bounce buffer and dma handle */
812 812 ASSERT(rbp->rxb_bah != NULL);
813 813 ddi_dma_mem_free(&rbp->rxb_bah);
814 814 /* free the associated dma handle */
815 815 ddi_dma_free_handle(&rbp->rxb_dh);
816 816
817 817 /* free the base memory of rx buffer management */
818 818 kmem_free(rbp, sizeof (struct rxbuf));
819 819 }
820 820 }
821 821
822 822 /* ============================================================== */
823 823 /*
824 824 * Rx/Tx descriptor slot management
825 825 */
826 826 /* ============================================================== */
827 827 /*
828 828 * Initialize an empty rx ring.
829 829 */
830 830 static void
831 831 gem_init_rx_ring(struct gem_dev *dp)
832 832 {
833 833 int i;
834 834 int rx_ring_size = dp->gc.gc_rx_ring_size;
835 835
836 836 DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
837 837 dp->name, __func__,
838 838 rx_ring_size, dp->gc.gc_rx_buf_max));
839 839
840 840 /* make a physical chain of rx descriptors */
841 841 for (i = 0; i < rx_ring_size; i++) {
842 842 (*dp->gc.gc_rx_desc_init)(dp, i);
843 843 }
844 844 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
845 845
846 846 dp->rx_active_head = (seqnum_t)0;
847 847 dp->rx_active_tail = (seqnum_t)0;
848 848
849 849 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
850 850 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
851 851 }
852 852
853 853 /*
854 854 * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855 855 */
856 856 static void
857 857 gem_prepare_rx_buf(struct gem_dev *dp)
858 858 {
859 859 int i;
860 860 int nrbuf;
861 861 struct rxbuf *rbp;
862 862
863 863 ASSERT(mutex_owned(&dp->intrlock));
864 864
865 865 /* Now we have no active buffers in rx ring */
866 866
867 867 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
868 868 for (i = 0; i < nrbuf; i++) {
869 869 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870 870 break;
871 871 }
872 872 gem_append_rxbuf(dp, rbp);
873 873 }
874 874
875 875 gem_rx_desc_dma_sync(dp,
876 876 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
877 877 }
878 878
879 879 /*
880 880 * Reclaim active rx buffers in rx buffer ring.
881 881 */
882 882 static void
883 883 gem_clean_rx_buf(struct gem_dev *dp)
884 884 {
885 885 int i;
886 886 struct rxbuf *rbp;
887 887 int rx_ring_size = dp->gc.gc_rx_ring_size;
888 888 #ifdef GEM_DEBUG_LEVEL
889 889 int total;
890 890 #endif
891 891 ASSERT(mutex_owned(&dp->intrlock));
892 892
893 893 DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
894 894 dp->name, __func__, dp->rx_buf_freecnt));
895 895 /*
896 896 * clean up HW descriptors
897 897 */
898 898 for (i = 0; i < rx_ring_size; i++) {
899 899 (*dp->gc.gc_rx_desc_clean)(dp, i);
900 900 }
901 901 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
902 902
903 903 #ifdef GEM_DEBUG_LEVEL
904 904 total = 0;
905 905 #endif
906 906 /*
907 907 * Reclaim allocated rx buffers
908 908 */
909 909 while ((rbp = dp->rx_buf_head) != NULL) {
910 910 #ifdef GEM_DEBUG_LEVEL
911 911 total++;
912 912 #endif
913 913 /* remove the first one from rx buffer list */
914 914 dp->rx_buf_head = rbp->rxb_next;
915 915
916 916 /* recycle the rxbuf */
917 917 gem_free_rxbuf(rbp);
918 918 }
919 919 dp->rx_buf_tail = (struct rxbuf *)NULL;
920 920
921 921 DPRINTF(2, (CE_CONT,
922 922 "!%s: %s: %d buffers freeed, total: %d free",
923 923 dp->name, __func__, total, dp->rx_buf_freecnt));
924 924 }
925 925
926 926 /*
927 927 * Initialize an empty transmit buffer/descriptor ring
928 928 */
929 929 static void
930 930 gem_init_tx_ring(struct gem_dev *dp)
931 931 {
932 932 int i;
933 933 int tx_buf_size = dp->gc.gc_tx_buf_size;
934 934 int tx_ring_size = dp->gc.gc_tx_ring_size;
935 935
936 936 DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
937 937 dp->name, __func__,
938 938 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
939 939
940 940 ASSERT(!dp->mac_active);
941 941
942 942 /* initialize active list and free list */
943 943 dp->tx_slots_base =
944 944 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
945 945 dp->tx_softq_tail -= dp->tx_softq_head;
946 946 dp->tx_softq_head = (seqnum_t)0;
947 947
948 948 dp->tx_active_head = dp->tx_softq_head;
949 949 dp->tx_active_tail = dp->tx_softq_head;
950 950
951 951 dp->tx_free_head = dp->tx_softq_tail;
952 952 dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
953 953
954 954 dp->tx_desc_head = (seqnum_t)0;
955 955 dp->tx_desc_tail = (seqnum_t)0;
956 956 dp->tx_desc_intr = (seqnum_t)0;
957 957
958 958 for (i = 0; i < tx_ring_size; i++) {
959 959 (*dp->gc.gc_tx_desc_init)(dp, i);
960 960 }
961 961 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
962 962 }
963 963
964 964 __INLINE__
965 965 static void
966 966 gem_txbuf_free_dma_resources(struct txbuf *tbp)
967 967 {
968 968 if (tbp->txb_mp) {
969 969 freemsg(tbp->txb_mp);
970 970 tbp->txb_mp = NULL;
971 971 }
972 972 tbp->txb_nfrags = 0;
973 973 tbp->txb_flag = 0;
974 974 }
975 975 #pragma inline(gem_txbuf_free_dma_resources)
976 976
977 977 /*
978 978 * reclaim active tx buffers and reset positions in tx rings.
979 979 */
980 980 static void
981 981 gem_clean_tx_buf(struct gem_dev *dp)
982 982 {
983 983 int i;
984 984 seqnum_t head;
985 985 seqnum_t tail;
986 986 seqnum_t sn;
987 987 struct txbuf *tbp;
988 988 int tx_ring_size = dp->gc.gc_tx_ring_size;
989 989 #ifdef GEM_DEBUG_LEVEL
990 990 int err;
991 991 #endif
992 992
993 993 ASSERT(!dp->mac_active);
994 994 ASSERT(dp->tx_busy == 0);
995 995 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
996 996
997 997 /*
998 998 * clean up all HW descriptors
999 999 */
1000 1000 for (i = 0; i < tx_ring_size; i++) {
1001 1001 (*dp->gc.gc_tx_desc_clean)(dp, i);
1002 1002 }
1003 1003 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004 1004
1005 1005 /* dequeue all active and loaded buffers */
1006 1006 head = dp->tx_active_head;
1007 1007 tail = dp->tx_softq_tail;
1008 1008
1009 1009 ASSERT(dp->tx_free_head - head >= 0);
1010 1010 tbp = GET_TXBUF(dp, head);
1011 1011 for (sn = head; sn != tail; sn++) {
1012 1012 gem_txbuf_free_dma_resources(tbp);
1013 1013 ASSERT(tbp->txb_mp == NULL);
1014 1014 dp->stats.errxmt++;
1015 1015 tbp = tbp->txb_next;
1016 1016 }
1017 1017
1018 1018 #ifdef GEM_DEBUG_LEVEL
1019 1019 /* ensure no dma resources for tx are not in use now */
1020 1020 err = 0;
1021 1021 while (sn != head + dp->gc.gc_tx_buf_size) {
1022 1022 if (tbp->txb_mp || tbp->txb_nfrags) {
1023 1023 DPRINTF(0, (CE_CONT,
1024 1024 "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1025 1025 dp->name, __func__,
1026 1026 sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1027 1027 tbp->txb_mp, tbp->txb_nfrags));
1028 1028 err = 1;
1029 1029 }
1030 1030 sn++;
1031 1031 tbp = tbp->txb_next;
1032 1032 }
1033 1033
1034 1034 if (err) {
1035 1035 gem_dump_txbuf(dp, CE_WARN,
1036 1036 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037 1037 }
1038 1038 #endif
1039 1039 /* recycle buffers, now no active tx buffers in the ring */
1040 1040 dp->tx_free_tail += tail - head;
1041 1041 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042 1042
1043 1043 /* fix positions in tx buffer rings */
1044 1044 dp->tx_active_head = dp->tx_free_head;
1045 1045 dp->tx_active_tail = dp->tx_free_head;
1046 1046 dp->tx_softq_head = dp->tx_free_head;
1047 1047 dp->tx_softq_tail = dp->tx_free_head;
1048 1048 }
1049 1049
1050 1050 /*
1051 1051 * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052 1052 */
1053 1053 __INLINE__ int
1054 1054 gem_reclaim_txbuf(struct gem_dev *dp)
1055 1055 {
1056 1056 struct txbuf *tbp;
1057 1057 uint_t txstat;
1058 1058 int err = GEM_SUCCESS;
1059 1059 seqnum_t head;
1060 1060 seqnum_t tail;
1061 1061 seqnum_t sn;
1062 1062 seqnum_t desc_head;
1063 1063 int tx_ring_size = dp->gc.gc_tx_ring_size;
1064 1064 uint_t (*tx_desc_stat)(struct gem_dev *dp,
1065 1065 int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1066 1066 clock_t now;
1067 1067
1068 1068 now = ddi_get_lbolt();
1069 1069 if (now == (clock_t)0) {
1070 1070 /* make non-zero timestamp */
1071 1071 now--;
1072 1072 }
1073 1073
1074 1074 mutex_enter(&dp->xmitlock);
1075 1075
1076 1076 head = dp->tx_active_head;
1077 1077 tail = dp->tx_active_tail;
1078 1078
1079 1079 #if GEM_DEBUG_LEVEL > 2
1080 1080 if (head != tail) {
1081 1081 cmn_err(CE_CONT, "!%s: %s: "
1082 1082 "testing active_head:%d[%d], active_tail:%d[%d]",
1083 1083 dp->name, __func__,
1084 1084 head, SLOT(head, dp->gc.gc_tx_buf_size),
1085 1085 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1086 1086 }
1087 1087 #endif
1088 1088 #ifdef DEBUG
1089 1089 if (dp->tx_reclaim_busy == 0) {
1090 1090 /* check tx buffer management consistency */
1091 1091 ASSERT(dp->tx_free_tail - dp->tx_active_head
1092 1092 == dp->gc.gc_tx_buf_limit);
1093 1093 /* EMPTY */
1094 1094 }
1095 1095 #endif
1096 1096 dp->tx_reclaim_busy++;
1097 1097
1098 1098 /* sync all active HW descriptors */
1099 1099 gem_tx_desc_dma_sync(dp,
1100 1100 SLOT(dp->tx_desc_head, tx_ring_size),
1101 1101 dp->tx_desc_tail - dp->tx_desc_head,
1102 1102 DDI_DMA_SYNC_FORKERNEL);
1103 1103
1104 1104 tbp = GET_TXBUF(dp, head);
1105 1105 desc_head = dp->tx_desc_head;
1106 1106 for (sn = head; sn != tail;
1107 1107 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1108 1108 int ndescs;
1109 1109
1110 1110 ASSERT(tbp->txb_desc == desc_head);
1111 1111
1112 1112 ndescs = tbp->txb_ndescs;
1113 1113 if (ndescs == 0) {
1114 1114 /* skip errored descriptors */
1115 1115 continue;
1116 1116 }
1117 1117 txstat = (*tx_desc_stat)(dp,
1118 1118 SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1119 1119
1120 1120 if (txstat == 0) {
1121 1121 /* not transmitted yet */
1122 1122 break;
1123 1123 }
1124 1124
1125 1125 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1126 1126 dp->tx_blocked = now;
1127 1127 }
1128 1128
1129 1129 ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1130 1130
1131 1131 if (txstat & GEM_TX_ERR) {
1132 1132 err = GEM_FAILURE;
1133 1133 cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1134 1134 dp->name, sn, SLOT(sn, tx_ring_size));
1135 1135 }
1136 1136 #if GEM_DEBUG_LEVEL > 4
1137 1137 if (now - tbp->txb_stime >= 50) {
1138 1138 cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1139 1139 dp->name, (now - tbp->txb_stime)*10);
1140 1140 }
1141 1141 #endif
1142 1142 /* free transmitted descriptors */
1143 1143 desc_head += ndescs;
1144 1144 }
1145 1145
1146 1146 if (dp->tx_desc_head != desc_head) {
1147 1147 /* we have reclaimed one or more tx buffers */
1148 1148 dp->tx_desc_head = desc_head;
1149 1149
1150 1150 /* If we passed the next interrupt position, update it */
1151 1151 if (desc_head - dp->tx_desc_intr > 0) {
1152 1152 dp->tx_desc_intr = desc_head;
1153 1153 }
1154 1154 }
1155 1155 mutex_exit(&dp->xmitlock);
1156 1156
1157 1157 /* free dma mapping resources associated with transmitted tx buffers */
1158 1158 tbp = GET_TXBUF(dp, head);
1159 1159 tail = sn;
1160 1160 #if GEM_DEBUG_LEVEL > 2
1161 1161 if (head != tail) {
1162 1162 cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1163 1163 __func__,
1164 1164 head, SLOT(head, dp->gc.gc_tx_buf_size),
1165 1165 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1166 1166 }
1167 1167 #endif
1168 1168 for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169 1169 gem_txbuf_free_dma_resources(tbp);
1170 1170 }
1171 1171
1172 1172 /* recycle the tx buffers */
1173 1173 mutex_enter(&dp->xmitlock);
1174 1174 if (--dp->tx_reclaim_busy == 0) {
1175 1175 /* we are the last thread who can update free tail */
1176 1176 #if GEM_DEBUG_LEVEL > 4
1177 1177 /* check all resouces have been deallocated */
1178 1178 sn = dp->tx_free_tail;
1179 1179 tbp = GET_TXBUF(dp, new_tail);
1180 1180 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1181 1181 if (tbp->txb_nfrags) {
1182 1182 /* in use */
1183 1183 break;
1184 1184 }
1185 1185 ASSERT(tbp->txb_mp == NULL);
1186 1186 tbp = tbp->txb_next;
1187 1187 sn++;
1188 1188 }
1189 1189 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1190 1190 #endif
1191 1191 dp->tx_free_tail =
1192 1192 dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1193 1193 }
1194 1194 if (!dp->mac_active) {
1195 1195 /* someone may be waiting for me. */
1196 1196 cv_broadcast(&dp->tx_drain_cv);
1197 1197 }
1198 1198 #if GEM_DEBUG_LEVEL > 2
1199 1199 cmn_err(CE_CONT, "!%s: %s: called, "
1200 1200 "free_head:%d free_tail:%d(+%d) added:%d",
1201 1201 dp->name, __func__,
1202 1202 dp->tx_free_head, dp->tx_free_tail,
1203 1203 dp->tx_free_tail - dp->tx_free_head, tail - head);
1204 1204 #endif
1205 1205 mutex_exit(&dp->xmitlock);
1206 1206
1207 1207 return (err);
1208 1208 }
1209 1209 #pragma inline(gem_reclaim_txbuf)
1210 1210
1211 1211
1212 1212 /*
1213 1213 * Make tx descriptors in out-of-order manner
1214 1214 */
1215 1215 static void
1216 1216 gem_tx_load_descs_oo(struct gem_dev *dp,
1217 1217 seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1218 1218 {
1219 1219 seqnum_t sn;
1220 1220 struct txbuf *tbp;
1221 1221 int tx_ring_size = dp->gc.gc_tx_ring_size;
1222 1222 int (*tx_desc_write)
1223 1223 (struct gem_dev *dp, int slot,
1224 1224 ddi_dma_cookie_t *dmacookie,
1225 1225 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1226 1226 clock_t now = ddi_get_lbolt();
1227 1227
1228 1228 sn = start_slot;
1229 1229 tbp = GET_TXBUF(dp, sn);
1230 1230 do {
1231 1231 #if GEM_DEBUG_LEVEL > 1
1232 1232 if (dp->tx_cnt < 100) {
1233 1233 dp->tx_cnt++;
1234 1234 flags |= GEM_TXFLAG_INTR;
1235 1235 }
1236 1236 #endif
1237 1237 /* write a tx descriptor */
1238 1238 tbp->txb_desc = sn;
1239 1239 tbp->txb_ndescs = (*tx_desc_write)(dp,
1240 1240 SLOT(sn, tx_ring_size),
1241 1241 tbp->txb_dmacookie,
1242 1242 tbp->txb_nfrags, flags | tbp->txb_flag);
1243 1243 tbp->txb_stime = now;
1244 1244 ASSERT(tbp->txb_ndescs == 1);
1245 1245
1246 1246 flags = 0;
1247 1247 sn++;
1248 1248 tbp = tbp->txb_next;
1249 1249 } while (sn != end_slot);
1250 1250 }
1251 1251
1252 1252 __INLINE__
1253 1253 static size_t
1254 1254 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1255 1255 {
1256 1256 size_t min_pkt;
1257 1257 caddr_t bp;
1258 1258 size_t off;
1259 1259 mblk_t *tp;
1260 1260 size_t len;
1261 1261 uint64_t flag;
1262 1262
1263 1263 ASSERT(tbp->txb_mp == NULL);
1264 1264
1265 1265 /* we use bounce buffer for the packet */
1266 1266 min_pkt = ETHERMIN;
1267 1267 bp = tbp->txb_buf;
1268 1268 off = 0;
1269 1269 tp = mp;
1270 1270
1271 1271 flag = tbp->txb_flag;
1272 1272 if (flag & GEM_TXFLAG_SWVTAG) {
1273 1273 /* need to increase min packet size */
1274 1274 min_pkt += VTAG_SIZE;
1275 1275 ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1276 1276 }
1277 1277
1278 1278 /* copy the rest */
1279 1279 for (; tp; tp = tp->b_cont) {
1280 1280 if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1281 1281 bcopy(tp->b_rptr, &bp[off], len);
1282 1282 off += len;
1283 1283 }
1284 1284 }
1285 1285
1286 1286 if (off < min_pkt &&
1287 1287 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1288 1288 /*
1289 1289 * Extend the packet to minimum packet size explicitly.
1290 1290 * For software vlan packets, we shouldn't use tx autopad
1291 1291 * function because nics may not be aware of vlan.
1292 1292 * we must keep 46 octet of payload even if we use vlan.
1293 1293 */
1294 1294 bzero(&bp[off], min_pkt - off);
1295 1295 off = min_pkt;
1296 1296 }
1297 1297
1298 1298 (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1299 1299
1300 1300 tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301 1301 tbp->txb_dmacookie[0].dmac_size = off;
1302 1302
1303 1303 DPRINTF(2, (CE_CONT,
1304 1304 "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1305 1305 dp->name, __func__,
1306 1306 tbp->txb_dmacookie[0].dmac_laddress,
1307 1307 tbp->txb_dmacookie[0].dmac_size,
1308 1308 (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1309 1309 min_pkt));
1310 1310
1311 1311 /* save misc info */
1312 1312 tbp->txb_mp = mp;
1313 1313 tbp->txb_nfrags = 1;
1314 1314 #ifdef DEBUG_MULTIFRAGS
1315 1315 if (dp->gc.gc_tx_max_frags >= 3 &&
1316 1316 tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317 1317 tbp->txb_dmacookie[1].dmac_laddress =
1318 1318 tbp->txb_dmacookie[0].dmac_laddress + 16;
1319 1319 tbp->txb_dmacookie[2].dmac_laddress =
1320 1320 tbp->txb_dmacookie[1].dmac_laddress + 16;
1321 1321
1322 1322 tbp->txb_dmacookie[2].dmac_size =
1323 1323 tbp->txb_dmacookie[0].dmac_size - 16*2;
1324 1324 tbp->txb_dmacookie[1].dmac_size = 16;
1325 1325 tbp->txb_dmacookie[0].dmac_size = 16;
1326 1326 tbp->txb_nfrags = 3;
1327 1327 }
1328 1328 #endif
1329 1329 return (off);
1330 1330 }
1331 1331 #pragma inline(gem_setup_txbuf_copy)
1332 1332
1333 1333 __INLINE__
1334 1334 static void
1335 1335 gem_tx_start_unit(struct gem_dev *dp)
1336 1336 {
1337 1337 seqnum_t head;
1338 1338 seqnum_t tail;
1339 1339 struct txbuf *tbp_head;
1340 1340 struct txbuf *tbp_tail;
1341 1341
1342 1342 /* update HW descriptors from soft queue */
1343 1343 ASSERT(mutex_owned(&dp->xmitlock));
1344 1344 ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1345 1345
1346 1346 head = dp->tx_softq_head;
1347 1347 tail = dp->tx_softq_tail;
1348 1348
1349 1349 DPRINTF(1, (CE_CONT,
1350 1350 "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1351 1351 dp->name, __func__, head, tail, tail - head,
1352 1352 dp->tx_desc_head, dp->tx_desc_tail,
1353 1353 dp->tx_desc_tail - dp->tx_desc_head));
1354 1354
1355 1355 ASSERT(tail - head > 0);
1356 1356
1357 1357 dp->tx_desc_tail = tail;
1358 1358
1359 1359 tbp_head = GET_TXBUF(dp, head);
1360 1360 tbp_tail = GET_TXBUF(dp, tail - 1);
1361 1361
1362 1362 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1363 1363
1364 1364 dp->gc.gc_tx_start(dp,
1365 1365 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1366 1366 tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1367 1367
1368 1368 /* advance softq head and active tail */
1369 1369 dp->tx_softq_head = dp->tx_active_tail = tail;
1370 1370 }
1371 1371 #pragma inline(gem_tx_start_unit)
1372 1372
1373 1373 #ifdef GEM_DEBUG_LEVEL
1374 1374 static int gem_send_cnt[10];
1375 1375 #endif
1376 1376 #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
1377 1377 #define EHLEN (sizeof (struct ether_header))
1378 1378 /*
1379 1379 * check ether packet type and ip protocol
1380 1380 */
1381 1381 static uint64_t
1382 1382 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1383 1383 {
1384 1384 mblk_t *tp;
1385 1385 ssize_t len;
1386 1386 uint_t vtag;
1387 1387 int off;
1388 1388 uint64_t flag;
1389 1389
1390 1390 flag = 0ULL;
1391 1391
1392 1392 /*
1393 1393 * prepare continuous header of the packet for protocol analysis
1394 1394 */
1395 1395 if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1396 1396 /* we use work buffer to copy mblk */
1397 1397 for (tp = mp, off = 0;
1398 1398 tp && (off < PKT_MIN_SIZE);
1399 1399 tp = tp->b_cont, off += len) {
1400 1400 len = (long)tp->b_wptr - (long)tp->b_rptr;
1401 1401 len = min(len, PKT_MIN_SIZE - off);
1402 1402 bcopy(tp->b_rptr, &bp[off], len);
1403 1403 }
1404 1404 } else {
1405 1405 /* we can use mblk without copy */
1406 1406 bp = mp->b_rptr;
1407 1407 }
1408 1408
1409 1409 /* process vlan tag for GLD v3 */
1410 1410 if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1411 1411 if (dp->misc_flag & GEM_VLAN_HARD) {
1412 1412 vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1413 1413 ASSERT(vtag);
1414 1414 flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1415 1415 } else {
1416 1416 flag |= GEM_TXFLAG_SWVTAG;
1417 1417 }
1418 1418 }
1419 1419 return (flag);
1420 1420 }
1421 1421 #undef EHLEN
1422 1422 #undef PKT_MIN_SIZE
1423 1423 /*
1424 1424 * gem_send_common is an exported function because hw depend routines may
1425 1425 * use it for sending control frames like setup frames for 2114x chipset.
1426 1426 */
1427 1427 mblk_t *
1428 1428 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1429 1429 {
1430 1430 int nmblk;
1431 1431 int avail;
1432 1432 mblk_t *tp;
1433 1433 mblk_t *mp;
1434 1434 int i;
1435 1435 struct txbuf *tbp;
1436 1436 seqnum_t head;
1437 1437 uint64_t load_flags;
1438 1438 uint64_t len_total = 0;
1439 1439 uint32_t bcast = 0;
1440 1440 uint32_t mcast = 0;
1441 1441
1442 1442 ASSERT(mp_head != NULL);
1443 1443
1444 1444 mp = mp_head;
1445 1445 nmblk = 1;
1446 1446 while ((mp = mp->b_next) != NULL) {
1447 1447 nmblk++;
1448 1448 }
1449 1449 #ifdef GEM_DEBUG_LEVEL
1450 1450 gem_send_cnt[0]++;
1451 1451 gem_send_cnt[min(nmblk, 9)]++;
1452 1452 #endif
1453 1453 /*
1454 1454 * Aquire resources
1455 1455 */
1456 1456 mutex_enter(&dp->xmitlock);
1457 1457 if (dp->mac_suspended) {
1458 1458 mutex_exit(&dp->xmitlock);
1459 1459 mp = mp_head;
1460 1460 while (mp) {
1461 1461 tp = mp->b_next;
1462 1462 freemsg(mp);
1463 1463 mp = tp;
1464 1464 }
1465 1465 return (NULL);
1466 1466 }
1467 1467
1468 1468 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469 1469 /* don't send data packets while mac isn't active */
1470 1470 /* XXX - should we discard packets? */
1471 1471 mutex_exit(&dp->xmitlock);
1472 1472 return (mp_head);
1473 1473 }
1474 1474
1475 1475 /* allocate free slots */
1476 1476 head = dp->tx_free_head;
1477 1477 avail = dp->tx_free_tail - head;
1478 1478
1479 1479 DPRINTF(2, (CE_CONT,
1480 1480 "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1481 1481 dp->name, __func__,
1482 1482 dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1483 1483
1484 1484 avail = min(avail, dp->tx_max_packets);
1485 1485
1486 1486 if (nmblk > avail) {
1487 1487 if (avail == 0) {
1488 1488 /* no resources; short cut */
1489 1489 DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1490 1490 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1491 1491 goto done;
1492 1492 }
1493 1493 nmblk = avail;
1494 1494 }
1495 1495
1496 1496 dp->tx_free_head = head + nmblk;
1497 1497 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1498 1498
1499 1499 /* update last interrupt position if tx buffers exhaust. */
1500 1500 if (nmblk == avail) {
1501 1501 tbp = GET_TXBUF(dp, head + avail - 1);
1502 1502 tbp->txb_flag = GEM_TXFLAG_INTR;
1503 1503 dp->tx_desc_intr = head + avail;
1504 1504 }
1505 1505 mutex_exit(&dp->xmitlock);
1506 1506
1507 1507 tbp = GET_TXBUF(dp, head);
1508 1508
1509 1509 for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1510 1510 uint8_t *bp;
1511 1511 uint64_t txflag;
1512 1512
1513 1513 /* remove one from the mblk list */
1514 1514 ASSERT(mp_head != NULL);
1515 1515 mp = mp_head;
1516 1516 mp_head = mp_head->b_next;
1517 1517 mp->b_next = NULL;
1518 1518
1519 1519 /* statistics for non-unicast packets */
1520 1520 bp = mp->b_rptr;
1521 1521 if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1522 1522 if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1523 1523 ETHERADDRL) == 0) {
1524 1524 bcast++;
1525 1525 } else {
1526 1526 mcast++;
1527 1527 }
1528 1528 }
1529 1529
1530 1530 /* save misc info */
1531 1531 txflag = tbp->txb_flag;
1532 1532 txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1533 1533 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 1534 tbp->txb_flag = txflag;
1535 1535
1536 1536 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537 1537 }
1538 1538
1539 1539 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540 1540
1541 1541 /* Append the tbp at the tail of the active tx buffer list */
1542 1542 mutex_enter(&dp->xmitlock);
1543 1543
1544 1544 if ((--dp->tx_busy) == 0) {
1545 1545 /* extend the tail of softq, as new packets have been ready. */
1546 1546 dp->tx_softq_tail = dp->tx_free_head;
1547 1547
1548 1548 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1549 1549 /*
1550 1550 * The device status has changed while we are
1551 1551 * preparing tx buf.
1552 1552 * As we are the last one that make tx non-busy.
1553 1553 * wake up someone who may wait for us.
1554 1554 */
1555 1555 cv_broadcast(&dp->tx_drain_cv);
1556 1556 } else {
1557 1557 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1558 1558 gem_tx_start_unit(dp);
1559 1559 }
1560 1560 }
1561 1561 dp->stats.obytes += len_total;
1562 1562 dp->stats.opackets += nmblk;
1563 1563 dp->stats.obcast += bcast;
1564 1564 dp->stats.omcast += mcast;
1565 1565 done:
1566 1566 mutex_exit(&dp->xmitlock);
1567 1567
1568 1568 return (mp_head);
1569 1569 }
1570 1570
1571 1571 /* ========================================================== */
1572 1572 /*
1573 1573 * error detection and restart routines
1574 1574 */
1575 1575 /* ========================================================== */
1576 1576 int
1577 1577 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1578 1578 {
1579 1579 ASSERT(mutex_owned(&dp->intrlock));
1580 1580
1581 1581 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1582 1582 #ifdef GEM_DEBUG_LEVEL
1583 1583 #if GEM_DEBUG_LEVEL > 1
1584 1584 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1585 1585 #endif
1586 1586 #endif
1587 1587
1588 1588 if (dp->mac_suspended) {
1589 1589 /* should we return GEM_FAILURE ? */
1590 1590 return (GEM_FAILURE);
1591 1591 }
1592 1592
1593 1593 /*
1594 1594 * We should avoid calling any routines except xxx_chip_reset
1595 1595 * when we are resuming the system.
1596 1596 */
1597 1597 if (dp->mac_active) {
1598 1598 if (flags & GEM_RESTART_KEEP_BUF) {
1599 1599 /* stop rx gracefully */
1600 1600 dp->rxmode &= ~RXMODE_ENABLE;
1601 1601 (void) (*dp->gc.gc_set_rx_filter)(dp);
1602 1602 }
1603 1603 (void) gem_mac_stop(dp, flags);
1604 1604 }
1605 1605
1606 1606 /* reset the chip. */
1607 1607 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1608 1608 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1609 1609 dp->name, __func__);
1610 1610 goto err;
1611 1611 }
1612 1612
1613 1613 if (gem_mac_init(dp) != GEM_SUCCESS) {
1614 1614 goto err;
1615 1615 }
1616 1616
1617 1617 /* setup media mode if the link have been up */
1618 1618 if (dp->mii_state == MII_STATE_LINKUP) {
1619 1619 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1620 1620 goto err;
1621 1621 }
1622 1622 }
1623 1623
1624 1624 /* setup mac address and enable rx filter */
1625 1625 dp->rxmode |= RXMODE_ENABLE;
1626 1626 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1627 1627 goto err;
1628 1628 }
1629 1629
1630 1630 /*
1631 1631 * XXX - a panic happened because of linkdown.
1632 1632 * We must check mii_state here, because the link can be down just
1633 1633 * before the restart event happen. If the link is down now,
1634 1634 * gem_mac_start() will be called from gem_mii_link_check() when
1635 1635 * the link become up later.
1636 1636 */
1637 1637 if (dp->mii_state == MII_STATE_LINKUP) {
1638 1638 /* restart the nic */
1639 1639 ASSERT(!dp->mac_active);
1640 1640 (void) gem_mac_start(dp);
1641 1641 }
1642 1642 return (GEM_SUCCESS);
1643 1643 err:
1644 1644 return (GEM_FAILURE);
1645 1645 }
1646 1646
1647 1647
1648 1648 static void
1649 1649 gem_tx_timeout(struct gem_dev *dp)
1650 1650 {
1651 1651 clock_t now;
1652 1652 boolean_t tx_sched;
1653 1653 struct txbuf *tbp;
1654 1654
1655 1655 mutex_enter(&dp->intrlock);
1656 1656
1657 1657 tx_sched = B_FALSE;
1658 1658 now = ddi_get_lbolt();
1659 1659
1660 1660 mutex_enter(&dp->xmitlock);
1661 1661 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1662 1662 mutex_exit(&dp->xmitlock);
1663 1663 goto schedule_next;
1664 1664 }
1665 1665 mutex_exit(&dp->xmitlock);
1666 1666
1667 1667 /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1668 1668 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1669 1669 /* tx error happened, reset transmitter in the chip */
1670 1670 (void) gem_restart_nic(dp, 0);
1671 1671 tx_sched = B_TRUE;
1672 1672 dp->tx_blocked = (clock_t)0;
1673 1673
1674 1674 goto schedule_next;
1675 1675 }
1676 1676
1677 1677 mutex_enter(&dp->xmitlock);
1678 1678 /* check if the transmitter thread is stuck */
1679 1679 if (dp->tx_active_head == dp->tx_active_tail) {
1680 1680 /* no tx buffer is loaded to the nic */
1681 1681 if (dp->tx_blocked &&
1682 1682 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1683 1683 gem_dump_txbuf(dp, CE_WARN,
1684 1684 "gem_tx_timeout: tx blocked");
1685 1685 tx_sched = B_TRUE;
1686 1686 dp->tx_blocked = (clock_t)0;
1687 1687 }
1688 1688 mutex_exit(&dp->xmitlock);
1689 1689 goto schedule_next;
1690 1690 }
1691 1691
1692 1692 tbp = GET_TXBUF(dp, dp->tx_active_head);
1693 1693 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1694 1694 mutex_exit(&dp->xmitlock);
1695 1695 goto schedule_next;
1696 1696 }
1697 1697 mutex_exit(&dp->xmitlock);
1698 1698
1699 1699 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700 1700
1701 1701 /* discard untransmitted packet and restart tx. */
1702 1702 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1703 1703 tx_sched = B_TRUE;
1704 1704 dp->tx_blocked = (clock_t)0;
1705 1705
1706 1706 schedule_next:
1707 1707 mutex_exit(&dp->intrlock);
1708 1708
1709 1709 /* restart the downstream if needed */
1710 1710 if (tx_sched) {
1711 1711 mac_tx_update(dp->mh);
1712 1712 }
1713 1713
1714 1714 DPRINTF(4, (CE_CONT,
1715 1715 "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1716 1716 dp->name, BOOLEAN(dp->tx_blocked),
1717 1717 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1718 1718 dp->timeout_id =
1719 1719 timeout((void (*)(void *))gem_tx_timeout,
1720 1720 (void *)dp, dp->gc.gc_tx_timeout_interval);
1721 1721 }
1722 1722
1723 1723 /* ================================================================== */
1724 1724 /*
1725 1725 * Interrupt handler
1726 1726 */
1727 1727 /* ================================================================== */
1728 1728 __INLINE__
1729 1729 static void
1730 1730 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1731 1731 {
1732 1732 struct rxbuf *rbp;
1733 1733 seqnum_t tail;
1734 1734 int rx_ring_size = dp->gc.gc_rx_ring_size;
1735 1735
1736 1736 ASSERT(rbp_head != NULL);
1737 1737 ASSERT(mutex_owned(&dp->intrlock));
1738 1738
1739 1739 DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1740 1740 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1741 1741
1742 1742 /*
1743 1743 * Add new buffers into active rx buffer list
1744 1744 */
1745 1745 if (dp->rx_buf_head == NULL) {
1746 1746 dp->rx_buf_head = rbp_head;
1747 1747 ASSERT(dp->rx_buf_tail == NULL);
1748 1748 } else {
1749 1749 dp->rx_buf_tail->rxb_next = rbp_head;
1750 1750 }
1751 1751
1752 1752 tail = dp->rx_active_tail;
1753 1753 for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1754 1754 /* need to notify the tail for the lower layer */
1755 1755 dp->rx_buf_tail = rbp;
1756 1756
1757 1757 dp->gc.gc_rx_desc_write(dp,
1758 1758 SLOT(tail, rx_ring_size),
1759 1759 rbp->rxb_dmacookie,
1760 1760 rbp->rxb_nfrags);
1761 1761
1762 1762 dp->rx_active_tail = tail = tail + 1;
1763 1763 }
1764 1764 }
1765 1765 #pragma inline(gem_append_rxbuf)
1766 1766
1767 1767 mblk_t *
1768 1768 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1769 1769 {
1770 1770 int rx_header_len = dp->gc.gc_rx_header_len;
1771 1771 uint8_t *bp;
1772 1772 mblk_t *mp;
1773 1773
1774 1774 /* allocate a new mblk */
1775 1775 if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1776 1776 ASSERT(mp->b_next == NULL);
1777 1777 ASSERT(mp->b_cont == NULL);
1778 1778
1779 1779 mp->b_rptr += VTAG_SIZE;
1780 1780 bp = mp->b_rptr;
1781 1781 mp->b_wptr = bp + len;
1782 1782
1783 1783 /*
1784 1784 * flush the range of the entire buffer to invalidate
1785 1785 * all of corresponding dirty entries in iocache.
1786 1786 */
1787 1787 (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1788 1788 0, DDI_DMA_SYNC_FORKERNEL);
1789 1789
1790 1790 bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1791 1791 }
1792 1792 return (mp);
1793 1793 }
1794 1794
1795 1795 #ifdef GEM_DEBUG_LEVEL
1796 1796 uint_t gem_rx_pkts[17];
1797 1797 #endif
1798 1798
1799 1799
1800 1800 int
1801 1801 gem_receive(struct gem_dev *dp)
1802 1802 {
1803 1803 uint64_t len_total = 0;
1804 1804 struct rxbuf *rbp;
1805 1805 mblk_t *mp;
1806 1806 int cnt = 0;
1807 1807 uint64_t rxstat;
1808 1808 struct rxbuf *newbufs;
1809 1809 struct rxbuf **newbufs_tailp;
1810 1810 mblk_t *rx_head;
1811 1811 mblk_t **rx_tailp;
1812 1812 int rx_ring_size = dp->gc.gc_rx_ring_size;
1813 1813 seqnum_t active_head;
1814 1814 uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1815 1815 int slot, int ndesc);
1816 1816 int ethermin = ETHERMIN;
1817 1817 int ethermax = dp->mtu + sizeof (struct ether_header);
1818 1818 int rx_header_len = dp->gc.gc_rx_header_len;
1819 1819
1820 1820 ASSERT(mutex_owned(&dp->intrlock));
1821 1821
1822 1822 DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1823 1823 dp->name, dp->rx_buf_head));
1824 1824
1825 1825 rx_desc_stat = dp->gc.gc_rx_desc_stat;
1826 1826 newbufs_tailp = &newbufs;
1827 1827 rx_tailp = &rx_head;
1828 1828 for (active_head = dp->rx_active_head;
1829 1829 (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830 1830 int len;
1831 1831 if (cnt == 0) {
1832 1832 cnt = max(dp->poll_pkt_delay*2, 10);
1833 1833 cnt = min(cnt,
1834 1834 dp->rx_active_tail - active_head);
1835 1835 gem_rx_desc_dma_sync(dp,
1836 1836 SLOT(active_head, rx_ring_size),
1837 1837 cnt,
1838 1838 DDI_DMA_SYNC_FORKERNEL);
1839 1839 }
1840 1840
1841 1841 if (rx_header_len > 0) {
1842 1842 (void) ddi_dma_sync(rbp->rxb_dh, 0,
1843 1843 rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1844 1844 }
1845 1845
1846 1846 if (((rxstat = (*rx_desc_stat)(dp,
1847 1847 SLOT(active_head, rx_ring_size),
1848 1848 rbp->rxb_nfrags))
1849 1849 & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1850 1850 /* not received yet */
1851 1851 break;
1852 1852 }
1853 1853
1854 1854 /* Remove the head of the rx buffer list */
1855 1855 dp->rx_buf_head = rbp->rxb_next;
1856 1856 cnt--;
1857 1857
1858 1858
1859 1859 if (rxstat & GEM_RX_ERR) {
1860 1860 goto next;
1861 1861 }
1862 1862
1863 1863 len = rxstat & GEM_RX_LEN;
1864 1864 DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1865 1865 dp->name, __func__, rxstat, len));
1866 1866
1867 1867 /*
1868 1868 * Copy the packet
1869 1869 */
1870 1870 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1871 1871 /* no memory, discard the packet */
1872 1872 dp->stats.norcvbuf++;
1873 1873 goto next;
1874 1874 }
1875 1875
1876 1876 /*
1877 1877 * Process VLAN tag
1878 1878 */
1879 1879 ethermin = ETHERMIN;
1880 1880 ethermax = dp->mtu + sizeof (struct ether_header);
1881 1881 if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1882 1882 ethermax += VTAG_SIZE;
1883 1883 }
1884 1884
1885 1885 /* check packet size */
1886 1886 if (len < ethermin) {
1887 1887 dp->stats.errrcv++;
1888 1888 dp->stats.runt++;
1889 1889 freemsg(mp);
1890 1890 goto next;
1891 1891 }
1892 1892
1893 1893 if (len > ethermax) {
1894 1894 dp->stats.errrcv++;
1895 1895 dp->stats.frame_too_long++;
1896 1896 freemsg(mp);
1897 1897 goto next;
1898 1898 }
1899 1899
1900 1900 len_total += len;
1901 1901
1902 1902 #ifdef GEM_DEBUG_VLAN
1903 1903 if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1904 1904 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1905 1905 }
1906 1906 #endif
1907 1907 /* append received packet to temporaly rx buffer list */
1908 1908 *rx_tailp = mp;
1909 1909 rx_tailp = &mp->b_next;
1910 1910
1911 1911 if (mp->b_rptr[0] & 1) {
1912 1912 if (bcmp(mp->b_rptr,
1913 1913 gem_etherbroadcastaddr.ether_addr_octet,
1914 1914 ETHERADDRL) == 0) {
1915 1915 dp->stats.rbcast++;
1916 1916 } else {
1917 1917 dp->stats.rmcast++;
1918 1918 }
1919 1919 }
1920 1920 next:
1921 1921 ASSERT(rbp != NULL);
1922 1922
1923 1923 /* append new one to temporal new buffer list */
1924 1924 *newbufs_tailp = rbp;
1925 1925 newbufs_tailp = &rbp->rxb_next;
1926 1926 }
1927 1927
1928 1928 /* advance rx_active_head */
1929 1929 if ((cnt = active_head - dp->rx_active_head) > 0) {
1930 1930 dp->stats.rbytes += len_total;
1931 1931 dp->stats.rpackets += cnt;
1932 1932 }
1933 1933 dp->rx_active_head = active_head;
1934 1934
1935 1935 /* terminate the working list */
1936 1936 *newbufs_tailp = NULL;
1937 1937 *rx_tailp = NULL;
1938 1938
1939 1939 if (dp->rx_buf_head == NULL) {
1940 1940 dp->rx_buf_tail = NULL;
1941 1941 }
1942 1942
1943 1943 DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1944 1944 dp->name, __func__, cnt, rx_head));
1945 1945
1946 1946 if (newbufs) {
1947 1947 /*
1948 1948 * fillfull rx list with new buffers
1949 1949 */
1950 1950 seqnum_t head;
1951 1951
1952 1952 /* save current tail */
1953 1953 head = dp->rx_active_tail;
1954 1954 gem_append_rxbuf(dp, newbufs);
1955 1955
1956 1956 /* call hw depend start routine if we have. */
1957 1957 dp->gc.gc_rx_start(dp,
1958 1958 SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1959 1959 }
1960 1960
1961 1961 if (rx_head) {
1962 1962 /*
1963 1963 * send up received packets
1964 1964 */
1965 1965 mutex_exit(&dp->intrlock);
1966 1966 mac_rx(dp->mh, NULL, rx_head);
1967 1967 mutex_enter(&dp->intrlock);
1968 1968 }
1969 1969
1970 1970 #ifdef GEM_DEBUG_LEVEL
1971 1971 gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1972 1972 #endif
1973 1973 return (cnt);
1974 1974 }
1975 1975
1976 1976 boolean_t
1977 1977 gem_tx_done(struct gem_dev *dp)
1978 1978 {
1979 1979 boolean_t tx_sched = B_FALSE;
1980 1980
1981 1981 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1982 1982 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1983 1983 DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1984 1984 dp->name, dp->tx_active_head, dp->tx_active_tail));
1985 1985 tx_sched = B_TRUE;
1986 1986 goto x;
1987 1987 }
1988 1988
1989 1989 mutex_enter(&dp->xmitlock);
1990 1990
1991 1991 /* XXX - we must not have any packets in soft queue */
1992 1992 ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1993 1993 /*
1994 1994 * If we won't have chance to get more free tx buffers, and blocked,
1995 1995 * it is worth to reschedule the downstream i.e. tx side.
1996 1996 */
1997 1997 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1998 1998 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1999 1999 /*
2000 2000 * As no further tx-done interrupts are scheduled, this
2001 2001 * is the last chance to kick tx side, which may be
2002 2002 * blocked now, otherwise the tx side never works again.
2003 2003 */
2004 2004 tx_sched = B_TRUE;
2005 2005 dp->tx_blocked = (clock_t)0;
2006 2006 dp->tx_max_packets =
2007 2007 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008 2008 }
2009 2009
2010 2010 mutex_exit(&dp->xmitlock);
2011 2011
2012 2012 DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2013 2013 dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2014 2014 x:
2015 2015 return (tx_sched);
2016 2016 }
2017 2017
2018 2018 static uint_t
2019 2019 gem_intr(struct gem_dev *dp)
2020 2020 {
2021 2021 uint_t ret;
2022 2022
2023 2023 mutex_enter(&dp->intrlock);
2024 2024 if (dp->mac_suspended) {
2025 2025 mutex_exit(&dp->intrlock);
2026 2026 return (DDI_INTR_UNCLAIMED);
2027 2027 }
2028 2028 dp->intr_busy = B_TRUE;
2029 2029
2030 2030 ret = (*dp->gc.gc_interrupt)(dp);
2031 2031
2032 2032 if (ret == DDI_INTR_UNCLAIMED) {
2033 2033 dp->intr_busy = B_FALSE;
2034 2034 mutex_exit(&dp->intrlock);
2035 2035 return (ret);
2036 2036 }
2037 2037
2038 2038 if (!dp->mac_active) {
2039 2039 cv_broadcast(&dp->tx_drain_cv);
2040 2040 }
2041 2041
2042 2042
2043 2043 dp->stats.intr++;
2044 2044 dp->intr_busy = B_FALSE;
2045 2045
2046 2046 mutex_exit(&dp->intrlock);
2047 2047
2048 2048 if (ret & INTR_RESTART_TX) {
2049 2049 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2050 2050 mac_tx_update(dp->mh);
2051 2051 ret &= ~INTR_RESTART_TX;
2052 2052 }
2053 2053 return (ret);
2054 2054 }
2055 2055
2056 2056 static void
2057 2057 gem_intr_watcher(struct gem_dev *dp)
2058 2058 {
2059 2059 (void) gem_intr(dp);
2060 2060
2061 2061 /* schedule next call of tu_intr_watcher */
2062 2062 dp->intr_watcher_id =
2063 2063 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2064 2064 }
2065 2065
2066 2066 /* ======================================================================== */
2067 2067 /*
2068 2068 * MII support routines
2069 2069 */
2070 2070 /* ======================================================================== */
2071 2071 static void
2072 2072 gem_choose_forcedmode(struct gem_dev *dp)
2073 2073 {
2074 2074 /* choose media mode */
2075 2075 if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2076 2076 dp->speed = GEM_SPD_1000;
2077 2077 dp->full_duplex = dp->anadv_1000fdx;
2078 2078 } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2079 2079 dp->speed = GEM_SPD_100;
2080 2080 dp->full_duplex = B_TRUE;
2081 2081 } else if (dp->anadv_100hdx) {
2082 2082 dp->speed = GEM_SPD_100;
2083 2083 dp->full_duplex = B_FALSE;
2084 2084 } else {
2085 2085 dp->speed = GEM_SPD_10;
2086 2086 dp->full_duplex = dp->anadv_10fdx;
2087 2087 }
2088 2088 }
2089 2089
2090 2090 uint16_t
2091 2091 gem_mii_read(struct gem_dev *dp, uint_t reg)
2092 2092 {
2093 2093 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2094 2094 (*dp->gc.gc_mii_sync)(dp);
2095 2095 }
2096 2096 return ((*dp->gc.gc_mii_read)(dp, reg));
2097 2097 }
2098 2098
2099 2099 void
2100 2100 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2101 2101 {
2102 2102 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2103 2103 (*dp->gc.gc_mii_sync)(dp);
2104 2104 }
2105 2105 (*dp->gc.gc_mii_write)(dp, reg, val);
2106 2106 }
2107 2107
2108 2108 #define fc_cap_decode(x) \
2109 2109 ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2110 2110 (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2111 2111
2112 2112 int
2113 2113 gem_mii_config_default(struct gem_dev *dp)
2114 2114 {
2115 2115 uint16_t mii_stat;
2116 2116 uint16_t val;
2117 2117 static uint16_t fc_cap_encode[4] = {
2118 2118 0, /* none */
2119 2119 MII_ABILITY_PAUSE, /* symmetric */
2120 2120 MII_ABILITY_ASMPAUSE, /* tx */
2121 2121 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2122 2122 };
2123 2123
2124 2124 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2125 2125
2126 2126 /*
2127 2127 * Configure bits in advertisement register
2128 2128 */
2129 2129 mii_stat = dp->mii_status;
2130 2130
2131 2131 DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2132 2132 dp->name, __func__, mii_stat, MII_STATUS_BITS));
2133 2133
2134 2134 if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2135 2135 /* it's funny */
2136 2136 cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2137 2137 dp->name, mii_stat, MII_STATUS_BITS);
2138 2138 return (GEM_FAILURE);
2139 2139 }
2140 2140
2141 2141 /* Do not change the rest of the ability bits in the advert reg */
2142 2142 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2143 2143
2144 2144 DPRINTF(0, (CE_CONT,
2145 2145 "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2146 2146 dp->name, __func__,
2147 2147 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2148 2148 dp->anadv_10fdx, dp->anadv_10hdx));
2149 2149
2150 2150 if (dp->anadv_100t4) {
2151 2151 val |= MII_ABILITY_100BASE_T4;
2152 2152 }
2153 2153 if (dp->anadv_100fdx) {
2154 2154 val |= MII_ABILITY_100BASE_TX_FD;
2155 2155 }
2156 2156 if (dp->anadv_100hdx) {
2157 2157 val |= MII_ABILITY_100BASE_TX;
2158 2158 }
2159 2159 if (dp->anadv_10fdx) {
2160 2160 val |= MII_ABILITY_10BASE_T_FD;
2161 2161 }
2162 2162 if (dp->anadv_10hdx) {
2163 2163 val |= MII_ABILITY_10BASE_T;
2164 2164 }
2165 2165
2166 2166 /* set flow control capability */
2167 2167 val |= fc_cap_encode[dp->anadv_flow_control];
2168 2168
2169 2169 DPRINTF(0, (CE_CONT,
2170 2170 "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2171 2171 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2172 2172 dp->anadv_flow_control));
2173 2173
2174 2174 gem_mii_write(dp, MII_AN_ADVERT, val);
2175 2175
2176 2176 if (mii_stat & MII_STATUS_XSTATUS) {
2177 2177 /*
2178 2178 * 1000Base-T GMII support
2179 2179 */
2180 2180 if (!dp->anadv_autoneg) {
2181 2181 /* enable manual configuration */
2182 2182 val = MII_1000TC_CFG_EN;
2183 2183 } else {
2184 2184 val = 0;
2185 2185 if (dp->anadv_1000fdx) {
2186 2186 val |= MII_1000TC_ADV_FULL;
2187 2187 }
2188 2188 if (dp->anadv_1000hdx) {
2189 2189 val |= MII_1000TC_ADV_HALF;
2190 2190 }
2191 2191 }
2192 2192 DPRINTF(0, (CE_CONT,
2193 2193 "!%s: %s: setting MII_1000TC reg:%b",
2194 2194 dp->name, __func__, val, MII_1000TC_BITS));
2195 2195
2196 2196 gem_mii_write(dp, MII_1000TC, val);
2197 2197 }
2198 2198
2199 2199 return (GEM_SUCCESS);
2200 2200 }
2201 2201
2202 2202 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2203 2203 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2204 2204
2205 2205 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2206 2206 /* none symm tx rx/symm */
2207 2207 /* none */
2208 2208 {FLOW_CONTROL_NONE,
2209 2209 FLOW_CONTROL_NONE,
2210 2210 FLOW_CONTROL_NONE,
2211 2211 FLOW_CONTROL_NONE},
2212 2212 /* sym */
2213 2213 {FLOW_CONTROL_NONE,
2214 2214 FLOW_CONTROL_SYMMETRIC,
2215 2215 FLOW_CONTROL_NONE,
2216 2216 FLOW_CONTROL_SYMMETRIC},
2217 2217 /* tx */
2218 2218 {FLOW_CONTROL_NONE,
2219 2219 FLOW_CONTROL_NONE,
2220 2220 FLOW_CONTROL_NONE,
2221 2221 FLOW_CONTROL_TX_PAUSE},
2222 2222 /* rx/symm */
2223 2223 {FLOW_CONTROL_NONE,
2224 2224 FLOW_CONTROL_SYMMETRIC,
2225 2225 FLOW_CONTROL_RX_PAUSE,
2226 2226 FLOW_CONTROL_SYMMETRIC},
2227 2227 };
2228 2228
2229 2229 static char *gem_fc_type[] = {
2230 2230 "without",
2231 2231 "with symmetric",
2232 2232 "with tx",
2233 2233 "with rx",
2234 2234 };
2235 2235
2236 2236 boolean_t
2237 2237 gem_mii_link_check(struct gem_dev *dp)
2238 2238 {
2239 2239 uint16_t old_mii_state;
2240 2240 boolean_t tx_sched = B_FALSE;
2241 2241 uint16_t status;
2242 2242 uint16_t advert;
2243 2243 uint16_t lpable;
2244 2244 uint16_t exp;
2245 2245 uint16_t ctl1000;
2246 2246 uint16_t stat1000;
2247 2247 uint16_t val;
2248 2248 clock_t now;
2249 2249 clock_t diff;
2250 2250 int linkdown_action;
2251 2251 boolean_t fix_phy = B_FALSE;
2252 2252
2253 2253 now = ddi_get_lbolt();
2254 2254 old_mii_state = dp->mii_state;
2255 2255
2256 2256 DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2257 2257 dp->name, __func__, now, dp->mii_state));
2258 2258
2259 2259 diff = now - dp->mii_last_check;
2260 2260 dp->mii_last_check = now;
2261 2261
2262 2262 /*
2263 2263 * For NWAM, don't show linkdown state right
2264 2264 * after the system boots
2265 2265 */
2266 2266 if (dp->linkup_delay > 0) {
2267 2267 if (dp->linkup_delay > diff) {
2268 2268 dp->linkup_delay -= diff;
2269 2269 } else {
2270 2270 /* link up timeout */
2271 2271 dp->linkup_delay = -1;
2272 2272 }
2273 2273 }
2274 2274
2275 2275 next_nowait:
2276 2276 switch (dp->mii_state) {
2277 2277 case MII_STATE_UNKNOWN:
2278 2278 /* power-up, DP83840 requires 32 sync bits */
2279 2279 (*dp->gc.gc_mii_sync)(dp);
2280 2280 goto reset_phy;
2281 2281
2282 2282 case MII_STATE_RESETTING:
2283 2283 dp->mii_timer -= diff;
2284 2284 if (dp->mii_timer > 0) {
2285 2285 /* don't read phy registers in resetting */
2286 2286 dp->mii_interval = WATCH_INTERVAL_FAST;
2287 2287 goto next;
2288 2288 }
2289 2289
2290 2290 /* Timer expired, ensure reset bit is not set */
2291 2291
2292 2292 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2293 2293 /* some phys need sync bits after reset */
2294 2294 (*dp->gc.gc_mii_sync)(dp);
2295 2295 }
2296 2296 val = gem_mii_read(dp, MII_CONTROL);
2297 2297 if (val & MII_CONTROL_RESET) {
2298 2298 cmn_err(CE_NOTE,
2299 2299 "!%s: time:%ld resetting phy not complete."
2300 2300 " mii_control:0x%b",
2301 2301 dp->name, ddi_get_lbolt(),
2302 2302 val, MII_CONTROL_BITS);
2303 2303 }
2304 2304
2305 2305 /* ensure neither isolated nor pwrdown nor auto-nego mode */
2306 2306 /* XXX -- this operation is required for NS DP83840A. */
2307 2307 gem_mii_write(dp, MII_CONTROL, 0);
2308 2308
2309 2309 /* As resetting PHY has completed, configure PHY registers */
2310 2310 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2311 2311 /* we failed to configure PHY. */
2312 2312 goto reset_phy;
2313 2313 }
2314 2314
2315 2315 /* mii_config may disable autonegatiation */
2316 2316 gem_choose_forcedmode(dp);
2317 2317
2318 2318 dp->mii_lpable = 0;
2319 2319 dp->mii_advert = 0;
2320 2320 dp->mii_exp = 0;
2321 2321 dp->mii_ctl1000 = 0;
2322 2322 dp->mii_stat1000 = 0;
2323 2323 dp->flow_control = FLOW_CONTROL_NONE;
2324 2324
2325 2325 if (!dp->anadv_autoneg) {
2326 2326 /* skip auto-negotiation phase */
2327 2327 dp->mii_state = MII_STATE_MEDIA_SETUP;
2328 2328 dp->mii_timer = 0;
2329 2329 dp->mii_interval = 0;
2330 2330 goto next_nowait;
2331 2331 }
2332 2332
2333 2333 /* Issue auto-negotiation command */
2334 2334 goto autonego;
2335 2335
2336 2336 case MII_STATE_AUTONEGOTIATING:
2337 2337 /*
2338 2338 * Autonegotiation is in progress
2339 2339 */
2340 2340 dp->mii_timer -= diff;
2341 2341 if (dp->mii_timer -
2342 2342 (dp->gc.gc_mii_an_timeout
2343 2343 - dp->gc.gc_mii_an_wait) > 0) {
2344 2344 /*
2345 2345 * wait for a while, typically autonegotiation
2346 2346 * completes in 2.3 - 2.5 sec.
2347 2347 */
2348 2348 dp->mii_interval = WATCH_INTERVAL_FAST;
2349 2349 goto next;
2350 2350 }
2351 2351
2352 2352 /* read PHY status */
2353 2353 status = gem_mii_read(dp, MII_STATUS);
2354 2354 DPRINTF(4, (CE_CONT,
2355 2355 "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2356 2356 dp->name, __func__, dp->mii_state,
2357 2357 status, MII_STATUS_BITS));
2358 2358
2359 2359 if (status & MII_STATUS_REMFAULT) {
2360 2360 /*
2361 2361 * The link parnert told me something wrong happend.
2362 2362 * What do we do ?
2363 2363 */
2364 2364 cmn_err(CE_CONT,
2365 2365 "!%s: auto-negotiation failed: remote fault",
2366 2366 dp->name);
2367 2367 goto autonego;
2368 2368 }
2369 2369
2370 2370 if ((status & MII_STATUS_ANDONE) == 0) {
2371 2371 if (dp->mii_timer <= 0) {
2372 2372 /*
2373 2373 * Auto-negotiation was timed out,
2374 2374 * try again w/o resetting phy.
2375 2375 */
2376 2376 if (!dp->mii_supress_msg) {
2377 2377 cmn_err(CE_WARN,
2378 2378 "!%s: auto-negotiation failed: timeout",
2379 2379 dp->name);
2380 2380 dp->mii_supress_msg = B_TRUE;
2381 2381 }
2382 2382 goto autonego;
2383 2383 }
2384 2384 /*
2385 2385 * Auto-negotiation is in progress. Wait.
2386 2386 */
2387 2387 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2388 2388 goto next;
2389 2389 }
2390 2390
2391 2391 /*
2392 2392 * Auto-negotiation have completed.
2393 2393 * Assume linkdown and fall through.
2394 2394 */
2395 2395 dp->mii_supress_msg = B_FALSE;
2396 2396 dp->mii_state = MII_STATE_AN_DONE;
2397 2397 DPRINTF(0, (CE_CONT,
2398 2398 "!%s: auto-negotiation completed, MII_STATUS:%b",
2399 2399 dp->name, status, MII_STATUS_BITS));
2400 2400
2401 2401 if (dp->gc.gc_mii_an_delay > 0) {
2402 2402 dp->mii_timer = dp->gc.gc_mii_an_delay;
2403 2403 dp->mii_interval = drv_usectohz(20*1000);
2404 2404 goto next;
2405 2405 }
2406 2406
2407 2407 dp->mii_timer = 0;
2408 2408 diff = 0;
2409 2409 goto next_nowait;
2410 2410
2411 2411 case MII_STATE_AN_DONE:
2412 2412 /*
2413 2413 * Auto-negotiation have done. Now we can set up media.
2414 2414 */
2415 2415 dp->mii_timer -= diff;
2416 2416 if (dp->mii_timer > 0) {
2417 2417 /* wait for a while */
2418 2418 dp->mii_interval = WATCH_INTERVAL_FAST;
2419 2419 goto next;
2420 2420 }
2421 2421
2422 2422 /*
2423 2423 * set up the result of auto negotiation
2424 2424 */
2425 2425
2426 2426 /*
2427 2427 * Read registers required to determin current
2428 2428 * duplex mode and media speed.
2429 2429 */
2430 2430 if (dp->gc.gc_mii_an_delay > 0) {
2431 2431 /*
2432 2432 * As the link watcher context has been suspended,
2433 2433 * 'status' is invalid. We must status register here
2434 2434 */
2435 2435 status = gem_mii_read(dp, MII_STATUS);
2436 2436 }
2437 2437 advert = gem_mii_read(dp, MII_AN_ADVERT);
2438 2438 lpable = gem_mii_read(dp, MII_AN_LPABLE);
2439 2439 exp = gem_mii_read(dp, MII_AN_EXPANSION);
2440 2440 if (exp == 0xffff) {
2441 2441 /* some phys don't have exp register */
2442 2442 exp = 0;
2443 2443 }
2444 2444 ctl1000 = 0;
2445 2445 stat1000 = 0;
2446 2446 if (dp->mii_status & MII_STATUS_XSTATUS) {
2447 2447 ctl1000 = gem_mii_read(dp, MII_1000TC);
2448 2448 stat1000 = gem_mii_read(dp, MII_1000TS);
2449 2449 }
2450 2450 dp->mii_lpable = lpable;
2451 2451 dp->mii_advert = advert;
2452 2452 dp->mii_exp = exp;
2453 2453 dp->mii_ctl1000 = ctl1000;
2454 2454 dp->mii_stat1000 = stat1000;
2455 2455
2456 2456 cmn_err(CE_CONT,
2457 2457 "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2458 2458 dp->name,
2459 2459 advert, MII_ABILITY_BITS,
2460 2460 lpable, MII_ABILITY_BITS,
2461 2461 exp, MII_AN_EXP_BITS);
2462 2462
2463 2463 if (dp->mii_status & MII_STATUS_XSTATUS) {
2464 2464 cmn_err(CE_CONT,
2465 2465 "! MII_1000TC:%b, MII_1000TS:%b",
2466 2466 ctl1000, MII_1000TC_BITS,
2467 2467 stat1000, MII_1000TS_BITS);
2468 2468 }
2469 2469
2470 2470 if (gem_population(lpable) <= 1 &&
2471 2471 (exp & MII_AN_EXP_LPCANAN) == 0) {
2472 2472 if ((advert & MII_ABILITY_TECH) != lpable) {
2473 2473 cmn_err(CE_WARN,
2474 2474 "!%s: but the link partnar doesn't seem"
2475 2475 " to have auto-negotiation capability."
2476 2476 " please check the link configuration.",
2477 2477 dp->name);
2478 2478 }
2479 2479 /*
2480 2480 * it should be result of parallel detection, which
2481 2481 * cannot detect duplex mode.
2482 2482 */
2483 2483 if (lpable & MII_ABILITY_100BASE_TX) {
2484 2484 /*
2485 2485 * we prefer full duplex mode for 100Mbps
2486 2486 * connection, if we can.
2487 2487 */
2488 2488 lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2489 2489 }
2490 2490
2491 2491 if ((advert & lpable) == 0 &&
2492 2492 lpable & MII_ABILITY_10BASE_T) {
2493 2493 lpable |= advert & MII_ABILITY_10BASE_T_FD;
2494 2494 }
2495 2495 /*
2496 2496 * as the link partnar isn't auto-negotiatable, use
2497 2497 * fixed mode temporally.
2498 2498 */
2499 2499 fix_phy = B_TRUE;
2500 2500 } else if (lpable == 0) {
2501 2501 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2502 2502 goto reset_phy;
2503 2503 }
2504 2504 /*
2505 2505 * configure current link mode according to AN priority.
2506 2506 */
2507 2507 val = advert & lpable;
2508 2508 if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2509 2509 (stat1000 & MII_1000TS_LP_FULL)) {
2510 2510 /* 1000BaseT & full duplex */
2511 2511 dp->speed = GEM_SPD_1000;
2512 2512 dp->full_duplex = B_TRUE;
2513 2513 } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2514 2514 (stat1000 & MII_1000TS_LP_HALF)) {
2515 2515 /* 1000BaseT & half duplex */
2516 2516 dp->speed = GEM_SPD_1000;
2517 2517 dp->full_duplex = B_FALSE;
2518 2518 } else if (val & MII_ABILITY_100BASE_TX_FD) {
2519 2519 /* 100BaseTx & full duplex */
2520 2520 dp->speed = GEM_SPD_100;
2521 2521 dp->full_duplex = B_TRUE;
2522 2522 } else if (val & MII_ABILITY_100BASE_T4) {
2523 2523 /* 100BaseT4 & full duplex */
2524 2524 dp->speed = GEM_SPD_100;
2525 2525 dp->full_duplex = B_TRUE;
2526 2526 } else if (val & MII_ABILITY_100BASE_TX) {
2527 2527 /* 100BaseTx & half duplex */
2528 2528 dp->speed = GEM_SPD_100;
2529 2529 dp->full_duplex = B_FALSE;
2530 2530 } else if (val & MII_ABILITY_10BASE_T_FD) {
2531 2531 /* 10BaseT & full duplex */
2532 2532 dp->speed = GEM_SPD_10;
2533 2533 dp->full_duplex = B_TRUE;
2534 2534 } else if (val & MII_ABILITY_10BASE_T) {
2535 2535 /* 10BaseT & half duplex */
2536 2536 dp->speed = GEM_SPD_10;
2537 2537 dp->full_duplex = B_FALSE;
2538 2538 } else {
2539 2539 /*
2540 2540 * It seems that the link partnar doesn't have
2541 2541 * auto-negotiation capability and our PHY
2542 2542 * could not report the correct current mode.
2543 2543 * We guess current mode by mii_control register.
2544 2544 */
2545 2545 val = gem_mii_read(dp, MII_CONTROL);
2546 2546
2547 2547 /* select 100m full or 10m half */
2548 2548 dp->speed = (val & MII_CONTROL_100MB) ?
2549 2549 GEM_SPD_100 : GEM_SPD_10;
2550 2550 dp->full_duplex = dp->speed != GEM_SPD_10;
2551 2551 fix_phy = B_TRUE;
2552 2552
2553 2553 cmn_err(CE_NOTE,
2554 2554 "!%s: auto-negotiation done but "
2555 2555 "common ability not found.\n"
2556 2556 "PHY state: control:%b advert:%b lpable:%b\n"
2557 2557 "guessing %d Mbps %s duplex mode",
2558 2558 dp->name,
2559 2559 val, MII_CONTROL_BITS,
2560 2560 advert, MII_ABILITY_BITS,
2561 2561 lpable, MII_ABILITY_BITS,
2562 2562 gem_speed_value[dp->speed],
2563 2563 dp->full_duplex ? "full" : "half");
2564 2564 }
2565 2565
2566 2566 if (dp->full_duplex) {
2567 2567 dp->flow_control =
2568 2568 gem_fc_result[fc_cap_decode(advert)]
2569 2569 [fc_cap_decode(lpable)];
2570 2570 } else {
2571 2571 dp->flow_control = FLOW_CONTROL_NONE;
2572 2572 }
2573 2573 dp->mii_state = MII_STATE_MEDIA_SETUP;
2574 2574 /* FALLTHROUGH */
2575 2575
2576 2576 case MII_STATE_MEDIA_SETUP:
2577 2577 dp->mii_state = MII_STATE_LINKDOWN;
2578 2578 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2579 2579 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2580 2580 dp->mii_supress_msg = B_FALSE;
2581 2581
2582 2582 /* use short interval */
2583 2583 dp->mii_interval = WATCH_INTERVAL_FAST;
2584 2584
2585 2585 if ((!dp->anadv_autoneg) ||
2586 2586 dp->gc.gc_mii_an_oneshot || fix_phy) {
2587 2587
2588 2588 /*
2589 2589 * write specified mode to phy.
2590 2590 */
2591 2591 val = gem_mii_read(dp, MII_CONTROL);
2592 2592 val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2593 2593 MII_CONTROL_ANE | MII_CONTROL_RSAN);
2594 2594
2595 2595 if (dp->full_duplex) {
2596 2596 val |= MII_CONTROL_FDUPLEX;
2597 2597 }
2598 2598
2599 2599 switch (dp->speed) {
2600 2600 case GEM_SPD_1000:
2601 2601 val |= MII_CONTROL_1000MB;
2602 2602 break;
2603 2603
2604 2604 case GEM_SPD_100:
2605 2605 val |= MII_CONTROL_100MB;
2606 2606 break;
2607 2607
2608 2608 default:
2609 2609 cmn_err(CE_WARN, "%s: unknown speed:%d",
2610 2610 dp->name, dp->speed);
2611 2611 /* FALLTHROUGH */
2612 2612 case GEM_SPD_10:
2613 2613 /* for GEM_SPD_10, do nothing */
2614 2614 break;
2615 2615 }
2616 2616
2617 2617 if (dp->mii_status & MII_STATUS_XSTATUS) {
2618 2618 gem_mii_write(dp,
2619 2619 MII_1000TC, MII_1000TC_CFG_EN);
2620 2620 }
2621 2621 gem_mii_write(dp, MII_CONTROL, val);
2622 2622 }
2623 2623
2624 2624 if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2625 2625 /* notify the result of auto-negotiation to mac */
2626 2626 (*dp->gc.gc_set_media)(dp);
2627 2627 }
2628 2628
2629 2629 if ((void *)dp->gc.gc_mii_tune_phy) {
2630 2630 /* for built-in sis900 */
2631 2631 /* XXX - this code should be removed. */
2632 2632 (*dp->gc.gc_mii_tune_phy)(dp);
2633 2633 }
2634 2634
2635 2635 goto next_nowait;
2636 2636
2637 2637 case MII_STATE_LINKDOWN:
2638 2638 status = gem_mii_read(dp, MII_STATUS);
2639 2639 if (status & MII_STATUS_LINKUP) {
2640 2640 /*
2641 2641 * Link going up
2642 2642 */
2643 2643 dp->mii_state = MII_STATE_LINKUP;
2644 2644 dp->mii_supress_msg = B_FALSE;
2645 2645
2646 2646 DPRINTF(0, (CE_CONT,
2647 2647 "!%s: link up detected: mii_stat:%b",
2648 2648 dp->name, status, MII_STATUS_BITS));
2649 2649
2650 2650 /*
2651 2651 * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2652 2652 * ignored when MII_CONTROL_ANE is set.
2653 2653 */
2654 2654 cmn_err(CE_CONT,
2655 2655 "!%s: Link up: %d Mbps %s duplex %s flow control",
2656 2656 dp->name,
2657 2657 gem_speed_value[dp->speed],
2658 2658 dp->full_duplex ? "full" : "half",
2659 2659 gem_fc_type[dp->flow_control]);
2660 2660
2661 2661 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662 2662
2663 2663 /* XXX - we need other timer to watch statictics */
2664 2664 if (dp->gc.gc_mii_hw_link_detection &&
2665 2665 dp->nic_state == NIC_STATE_ONLINE) {
2666 2666 dp->mii_interval = 0;
2667 2667 }
2668 2668
2669 2669 if (dp->nic_state == NIC_STATE_ONLINE) {
2670 2670 if (!dp->mac_active) {
2671 2671 (void) gem_mac_start(dp);
2672 2672 }
2673 2673 tx_sched = B_TRUE;
2674 2674 }
2675 2675 goto next;
2676 2676 }
2677 2677
2678 2678 dp->mii_supress_msg = B_TRUE;
2679 2679 if (dp->anadv_autoneg) {
2680 2680 dp->mii_timer -= diff;
2681 2681 if (dp->mii_timer <= 0) {
2682 2682 /*
2683 2683 * link down timer expired.
2684 2684 * need to restart auto-negotiation.
2685 2685 */
2686 2686 linkdown_action =
2687 2687 dp->gc.gc_mii_linkdown_timeout_action;
2688 2688 goto restart_autonego;
2689 2689 }
2690 2690 }
2691 2691 /* don't change mii_state */
2692 2692 break;
2693 2693
2694 2694 case MII_STATE_LINKUP:
2695 2695 status = gem_mii_read(dp, MII_STATUS);
2696 2696 if ((status & MII_STATUS_LINKUP) == 0) {
2697 2697 /*
2698 2698 * Link going down
2699 2699 */
2700 2700 cmn_err(CE_NOTE,
2701 2701 "!%s: link down detected: mii_stat:%b",
2702 2702 dp->name, status, MII_STATUS_BITS);
2703 2703
2704 2704 if (dp->nic_state == NIC_STATE_ONLINE &&
2705 2705 dp->mac_active &&
2706 2706 dp->gc.gc_mii_stop_mac_on_linkdown) {
2707 2707 (void) gem_mac_stop(dp, 0);
2708 2708
2709 2709 if (dp->tx_blocked) {
2710 2710 /* drain tx */
2711 2711 tx_sched = B_TRUE;
2712 2712 }
2713 2713 }
2714 2714
2715 2715 if (dp->anadv_autoneg) {
2716 2716 /* need to restart auto-negotiation */
2717 2717 linkdown_action = dp->gc.gc_mii_linkdown_action;
2718 2718 goto restart_autonego;
2719 2719 }
2720 2720
2721 2721 dp->mii_state = MII_STATE_LINKDOWN;
2722 2722 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2723 2723
2724 2724 if ((void *)dp->gc.gc_mii_tune_phy) {
2725 2725 /* for built-in sis900 */
2726 2726 (*dp->gc.gc_mii_tune_phy)(dp);
2727 2727 }
2728 2728 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2729 2729 goto next;
2730 2730 }
2731 2731
2732 2732 /* don't change mii_state */
2733 2733 if (dp->gc.gc_mii_hw_link_detection &&
2734 2734 dp->nic_state == NIC_STATE_ONLINE) {
2735 2735 dp->mii_interval = 0;
2736 2736 goto next;
2737 2737 }
2738 2738 break;
2739 2739 }
2740 2740 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2741 2741 goto next;
2742 2742
2743 2743 /* Actions on the end of state routine */
2744 2744
2745 2745 restart_autonego:
2746 2746 switch (linkdown_action) {
2747 2747 case MII_ACTION_RESET:
2748 2748 if (!dp->mii_supress_msg) {
2749 2749 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2750 2750 }
2751 2751 dp->mii_supress_msg = B_TRUE;
2752 2752 goto reset_phy;
2753 2753
2754 2754 case MII_ACTION_NONE:
2755 2755 dp->mii_supress_msg = B_TRUE;
2756 2756 if (dp->gc.gc_mii_an_oneshot) {
2757 2757 goto autonego;
2758 2758 }
2759 2759 /* PHY will restart autonego automatically */
2760 2760 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2761 2761 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2762 2762 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2763 2763 goto next;
2764 2764
2765 2765 case MII_ACTION_RSA:
2766 2766 if (!dp->mii_supress_msg) {
2767 2767 cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2768 2768 dp->name);
2769 2769 }
2770 2770 dp->mii_supress_msg = B_TRUE;
2771 2771 goto autonego;
2772 2772
2773 2773 default:
2774 2774 cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2775 2775 dp->name, dp->gc.gc_mii_linkdown_action);
2776 2776 dp->mii_supress_msg = B_TRUE;
2777 2777 }
2778 2778 /* NOTREACHED */
2779 2779
2780 2780 reset_phy:
2781 2781 if (!dp->mii_supress_msg) {
2782 2782 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2783 2783 }
2784 2784 dp->mii_state = MII_STATE_RESETTING;
2785 2785 dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2786 2786 if (!dp->gc.gc_mii_dont_reset) {
2787 2787 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2788 2788 }
2789 2789 dp->mii_interval = WATCH_INTERVAL_FAST;
2790 2790 goto next;
2791 2791
2792 2792 autonego:
2793 2793 if (!dp->mii_supress_msg) {
2794 2794 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2795 2795 }
2796 2796 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2797 2797 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798 2798
2799 2799 /* start/restart auto nego */
2800 2800 val = gem_mii_read(dp, MII_CONTROL) &
2801 2801 ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2802 2802
2803 2803 gem_mii_write(dp, MII_CONTROL,
2804 2804 val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2805 2805
2806 2806 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807 2807
2808 2808 next:
2809 2809 if (dp->link_watcher_id == 0 && dp->mii_interval) {
2810 2810 /* we must schedule next mii_watcher */
2811 2811 dp->link_watcher_id =
2812 2812 timeout((void (*)(void *))&gem_mii_link_watcher,
2813 2813 (void *)dp, dp->mii_interval);
2814 2814 }
2815 2815
2816 2816 if (old_mii_state != dp->mii_state) {
2817 2817 /* notify new mii link state */
2818 2818 if (dp->mii_state == MII_STATE_LINKUP) {
2819 2819 dp->linkup_delay = 0;
2820 2820 GEM_LINKUP(dp);
2821 2821 } else if (dp->linkup_delay <= 0) {
2822 2822 GEM_LINKDOWN(dp);
2823 2823 }
2824 2824 } else if (dp->linkup_delay < 0) {
2825 2825 /* first linkup timeout */
2826 2826 dp->linkup_delay = 0;
2827 2827 GEM_LINKDOWN(dp);
2828 2828 }
2829 2829
2830 2830 return (tx_sched);
2831 2831 }
2832 2832
2833 2833 static void
2834 2834 gem_mii_link_watcher(struct gem_dev *dp)
2835 2835 {
2836 2836 boolean_t tx_sched;
2837 2837
2838 2838 mutex_enter(&dp->intrlock);
2839 2839
2840 2840 dp->link_watcher_id = 0;
2841 2841 tx_sched = gem_mii_link_check(dp);
2842 2842 #if GEM_DEBUG_LEVEL > 2
2843 2843 if (dp->link_watcher_id == 0) {
2844 2844 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845 2845 }
2846 2846 #endif
2847 2847 mutex_exit(&dp->intrlock);
2848 2848
2849 2849 if (tx_sched) {
2850 2850 /* kick potentially stopped downstream */
2851 2851 mac_tx_update(dp->mh);
2852 2852 }
2853 2853 }
2854 2854
2855 2855 int
2856 2856 gem_mii_probe_default(struct gem_dev *dp)
2857 2857 {
2858 2858 int8_t phy;
2859 2859 uint16_t status;
2860 2860 uint16_t adv;
2861 2861 uint16_t adv_org;
2862 2862
2863 2863 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2864 2864
2865 2865 /*
2866 2866 * Scan PHY
2867 2867 */
2868 2868 /* ensure to send sync bits */
2869 2869 dp->mii_status = 0;
2870 2870
2871 2871 /* Try default phy first */
2872 2872 if (dp->mii_phy_addr) {
2873 2873 status = gem_mii_read(dp, MII_STATUS);
2874 2874 if (status != 0xffff && status != 0) {
2875 2875 gem_mii_write(dp, MII_CONTROL, 0);
2876 2876 goto PHY_found;
2877 2877 }
2878 2878
2879 2879 if (dp->mii_phy_addr < 0) {
2880 2880 cmn_err(CE_NOTE,
2881 2881 "!%s: failed to probe default internal and/or non-MII PHY",
2882 2882 dp->name);
2883 2883 return (GEM_FAILURE);
2884 2884 }
2885 2885
2886 2886 cmn_err(CE_NOTE,
2887 2887 "!%s: failed to probe default MII PHY at %d",
2888 2888 dp->name, dp->mii_phy_addr);
2889 2889 }
2890 2890
2891 2891 /* Try all possible address */
2892 2892 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2893 2893 dp->mii_phy_addr = phy;
2894 2894 status = gem_mii_read(dp, MII_STATUS);
2895 2895
2896 2896 if (status != 0xffff && status != 0) {
2897 2897 gem_mii_write(dp, MII_CONTROL, 0);
2898 2898 goto PHY_found;
2899 2899 }
2900 2900 }
2901 2901
2902 2902 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2903 2903 dp->mii_phy_addr = phy;
2904 2904 gem_mii_write(dp, MII_CONTROL, 0);
2905 2905 status = gem_mii_read(dp, MII_STATUS);
2906 2906
2907 2907 if (status != 0xffff && status != 0) {
2908 2908 goto PHY_found;
2909 2909 }
2910 2910 }
2911 2911
2912 2912 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2913 2913 dp->mii_phy_addr = -1;
2914 2914
2915 2915 return (GEM_FAILURE);
2916 2916
2917 2917 PHY_found:
2918 2918 dp->mii_status = status;
2919 2919 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2920 2920 gem_mii_read(dp, MII_PHYIDL);
2921 2921
2922 2922 if (dp->mii_phy_addr < 0) {
2923 2923 cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2924 2924 dp->name, dp->mii_phy_id);
2925 2925 } else {
2926 2926 cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2927 2927 dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2928 2928 }
2929 2929
2930 2930 cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2931 2931 dp->name,
2932 2932 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2933 2933 status, MII_STATUS_BITS,
2934 2934 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2935 2935 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2936 2936
2937 2937 dp->mii_xstatus = 0;
2938 2938 if (status & MII_STATUS_XSTATUS) {
2939 2939 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940 2940
2941 2941 cmn_err(CE_CONT, "!%s: xstatus:%b",
2942 2942 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2943 2943 }
2944 2944
2945 2945 /* check if the phy can advertize pause abilities */
2946 2946 adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2947 2947
2948 2948 gem_mii_write(dp, MII_AN_ADVERT,
2949 2949 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2950 2950
2951 2951 adv = gem_mii_read(dp, MII_AN_ADVERT);
2952 2952
2953 2953 if ((adv & MII_ABILITY_PAUSE) == 0) {
2954 2954 dp->gc.gc_flow_control &= ~1;
2955 2955 }
2956 2956
2957 2957 if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2958 2958 dp->gc.gc_flow_control &= ~2;
2959 2959 }
2960 2960
2961 2961 gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2962 2962
2963 2963 return (GEM_SUCCESS);
2964 2964 }
2965 2965
2966 2966 static void
2967 2967 gem_mii_start(struct gem_dev *dp)
2968 2968 {
2969 2969 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970 2970
2971 2971 /* make a first call of check link */
2972 2972 dp->mii_state = MII_STATE_UNKNOWN;
2973 2973 dp->mii_last_check = ddi_get_lbolt();
2974 2974 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2975 2975 (void) gem_mii_link_watcher(dp);
2976 2976 }
2977 2977
2978 2978 static void
2979 2979 gem_mii_stop(struct gem_dev *dp)
2980 2980 {
2981 2981 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982 2982
2983 2983 /* Ensure timer routine stopped */
2984 2984 mutex_enter(&dp->intrlock);
2985 2985 if (dp->link_watcher_id) {
2986 2986 while (untimeout(dp->link_watcher_id) == -1)
2987 2987 ;
2988 2988 dp->link_watcher_id = 0;
2989 2989 }
2990 2990 mutex_exit(&dp->intrlock);
2991 2991 }
2992 2992
2993 2993 boolean_t
2994 2994 gem_get_mac_addr_conf(struct gem_dev *dp)
2995 2995 {
2996 2996 char propname[32];
2997 2997 char *valstr;
2998 2998 uint8_t mac[ETHERADDRL];
2999 2999 char *cp;
3000 3000 int c;
3001 3001 int i;
3002 3002 int j;
3003 3003 uint8_t v;
3004 3004 uint8_t d;
3005 3005 uint8_t ored;
3006 3006
3007 3007 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3008 3008 /*
3009 3009 * Get ethernet address from .conf file
3010 3010 */
3011 3011 (void) sprintf(propname, "mac-addr");
3012 3012 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3013 3013 DDI_PROP_DONTPASS, propname, &valstr)) !=
3014 3014 DDI_PROP_SUCCESS) {
3015 3015 return (B_FALSE);
3016 3016 }
3017 3017
3018 3018 if (strlen(valstr) != ETHERADDRL*3-1) {
3019 3019 goto syntax_err;
3020 3020 }
3021 3021
3022 3022 cp = valstr;
3023 3023 j = 0;
3024 3024 ored = 0;
3025 3025 for (;;) {
3026 3026 v = 0;
3027 3027 for (i = 0; i < 2; i++) {
3028 3028 c = *cp++;
3029 3029
3030 3030 if (c >= 'a' && c <= 'f') {
3031 3031 d = c - 'a' + 10;
3032 3032 } else if (c >= 'A' && c <= 'F') {
3033 3033 d = c - 'A' + 10;
3034 3034 } else if (c >= '0' && c <= '9') {
3035 3035 d = c - '0';
3036 3036 } else {
3037 3037 goto syntax_err;
3038 3038 }
3039 3039 v = (v << 4) | d;
3040 3040 }
3041 3041
3042 3042 mac[j++] = v;
3043 3043 ored |= v;
3044 3044 if (j == ETHERADDRL) {
3045 3045 /* done */
3046 3046 break;
3047 3047 }
3048 3048
3049 3049 c = *cp++;
3050 3050 if (c != ':') {
3051 3051 goto syntax_err;
3052 3052 }
3053 3053 }
3054 3054
3055 3055 if (ored == 0) {
3056 3056 goto err;
3057 3057 }
3058 3058 for (i = 0; i < ETHERADDRL; i++) {
3059 3059 dp->dev_addr.ether_addr_octet[i] = mac[i];
3060 3060 }
3061 3061 ddi_prop_free(valstr);
3062 3062 return (B_TRUE);
3063 3063
3064 3064 syntax_err:
3065 3065 cmn_err(CE_CONT,
3066 3066 "!%s: read mac addr: trying .conf: syntax err %s",
3067 3067 dp->name, valstr);
3068 3068 err:
3069 3069 ddi_prop_free(valstr);
3070 3070
3071 3071 return (B_FALSE);
3072 3072 }
3073 3073
3074 3074
3075 3075 /* ============================================================== */
3076 3076 /*
3077 3077 * internal start/stop interface
3078 3078 */
3079 3079 /* ============================================================== */
3080 3080 static int
3081 3081 gem_mac_set_rx_filter(struct gem_dev *dp)
3082 3082 {
3083 3083 return ((*dp->gc.gc_set_rx_filter)(dp));
3084 3084 }
3085 3085
3086 3086 /*
3087 3087 * gem_mac_init: cold start
3088 3088 */
3089 3089 static int
3090 3090 gem_mac_init(struct gem_dev *dp)
3091 3091 {
3092 3092 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3093 3093
3094 3094 if (dp->mac_suspended) {
3095 3095 return (GEM_FAILURE);
3096 3096 }
3097 3097
3098 3098 dp->mac_active = B_FALSE;
3099 3099
3100 3100 gem_init_rx_ring(dp);
3101 3101 gem_init_tx_ring(dp);
3102 3102
3103 3103 /* reset transmitter state */
3104 3104 dp->tx_blocked = (clock_t)0;
3105 3105 dp->tx_busy = 0;
3106 3106 dp->tx_reclaim_busy = 0;
3107 3107 dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3108 3108
3109 3109 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3110 3110 return (GEM_FAILURE);
3111 3111 }
3112 3112
3113 3113 gem_prepare_rx_buf(dp);
3114 3114
3115 3115 return (GEM_SUCCESS);
3116 3116 }
3117 3117 /*
3118 3118 * gem_mac_start: warm start
3119 3119 */
3120 3120 static int
3121 3121 gem_mac_start(struct gem_dev *dp)
3122 3122 {
3123 3123 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3124 3124
3125 3125 ASSERT(mutex_owned(&dp->intrlock));
3126 3126 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3127 3127 ASSERT(dp->mii_state == MII_STATE_LINKUP);
3128 3128
3129 3129 /* enable tx and rx */
3130 3130 mutex_enter(&dp->xmitlock);
3131 3131 if (dp->mac_suspended) {
3132 3132 mutex_exit(&dp->xmitlock);
3133 3133 return (GEM_FAILURE);
3134 3134 }
3135 3135 dp->mac_active = B_TRUE;
3136 3136 mutex_exit(&dp->xmitlock);
3137 3137
3138 3138 /* setup rx buffers */
3139 3139 (*dp->gc.gc_rx_start)(dp,
3140 3140 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3141 3141 dp->rx_active_tail - dp->rx_active_head);
3142 3142
3143 3143 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3144 3144 cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3145 3145 dp->name, __func__);
3146 3146 return (GEM_FAILURE);
3147 3147 }
3148 3148
3149 3149 mutex_enter(&dp->xmitlock);
3150 3150
3151 3151 /* load untranmitted packets to the nic */
3152 3152 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3153 3153 if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3154 3154 gem_tx_load_descs_oo(dp,
3155 3155 dp->tx_softq_head, dp->tx_softq_tail,
3156 3156 GEM_TXFLAG_HEAD);
3157 3157 /* issue preloaded tx buffers */
3158 3158 gem_tx_start_unit(dp);
3159 3159 }
3160 3160
3161 3161 mutex_exit(&dp->xmitlock);
3162 3162
3163 3163 return (GEM_SUCCESS);
3164 3164 }
3165 3165
3166 3166 static int
3167 3167 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3168 3168 {
3169 3169 int i;
3170 3170 int wait_time; /* in uS */
3171 3171 #ifdef GEM_DEBUG_LEVEL
3172 3172 clock_t now;
3173 3173 #endif
3174 3174 int ret = GEM_SUCCESS;
3175 3175
3176 3176 DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3177 3177 dp->name, __func__, dp->rx_buf_freecnt));
3178 3178
3179 3179 ASSERT(mutex_owned(&dp->intrlock));
3180 3180 ASSERT(!mutex_owned(&dp->xmitlock));
3181 3181
3182 3182 /*
3183 3183 * Block transmits
3184 3184 */
3185 3185 mutex_enter(&dp->xmitlock);
3186 3186 if (dp->mac_suspended) {
3187 3187 mutex_exit(&dp->xmitlock);
3188 3188 return (GEM_SUCCESS);
3189 3189 }
3190 3190 dp->mac_active = B_FALSE;
3191 3191
3192 3192 while (dp->tx_busy > 0) {
3193 3193 cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3194 3194 }
3195 3195 mutex_exit(&dp->xmitlock);
3196 3196
3197 3197 if ((flags & GEM_RESTART_NOWAIT) == 0) {
3198 3198 /*
3199 3199 * Wait for all tx buffers sent.
3200 3200 */
3201 3201 wait_time =
3202 3202 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3203 3203 (dp->tx_active_tail - dp->tx_active_head);
3204 3204
3205 3205 DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3206 3206 dp->name, __func__, wait_time));
3207 3207 i = 0;
3208 3208 #ifdef GEM_DEBUG_LEVEL
3209 3209 now = ddi_get_lbolt();
3210 3210 #endif
3211 3211 while (dp->tx_active_tail != dp->tx_active_head) {
3212 3212 if (i > wait_time) {
3213 3213 /* timeout */
3214 3214 cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3215 3215 dp->name, __func__);
3216 3216 break;
3217 3217 }
3218 3218 (void) gem_reclaim_txbuf(dp);
3219 3219 drv_usecwait(100);
3220 3220 i += 100;
3221 3221 }
3222 3222 DPRINTF(0, (CE_NOTE,
3223 3223 "!%s: %s: the nic have drained in %d uS, real %d mS",
3224 3224 dp->name, __func__, i,
3225 3225 10*((int)(ddi_get_lbolt() - now))));
3226 3226 }
3227 3227
3228 3228 /*
3229 3229 * Now we can stop the nic safely.
3230 3230 */
3231 3231 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3232 3232 cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3233 3233 dp->name, __func__);
3234 3234 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3235 3235 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3236 3236 dp->name, __func__);
3237 3237 }
3238 3238 }
3239 3239
3240 3240 /*
3241 3241 * Clear all rx buffers
3242 3242 */
3243 3243 if (flags & GEM_RESTART_KEEP_BUF) {
3244 3244 (void) gem_receive(dp);
3245 3245 }
3246 3246 gem_clean_rx_buf(dp);
3247 3247
3248 3248 /*
3249 3249 * Update final statistics
3250 3250 */
3251 3251 (*dp->gc.gc_get_stats)(dp);
3252 3252
3253 3253 /*
3254 3254 * Clear all pended tx packets
3255 3255 */
3256 3256 ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3257 3257 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258 3258 if (flags & GEM_RESTART_KEEP_BUF) {
3259 3259 /* restore active tx buffers */
3260 3260 dp->tx_active_tail = dp->tx_active_head;
3261 3261 dp->tx_softq_head = dp->tx_active_head;
3262 3262 } else {
3263 3263 gem_clean_tx_buf(dp);
3264 3264 }
3265 3265
3266 3266 return (ret);
3267 3267 }
3268 3268
3269 3269 static int
3270 3270 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3271 3271 {
3272 3272 int cnt;
3273 3273 int err;
3274 3274
3275 3275 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3276 3276
3277 3277 mutex_enter(&dp->intrlock);
3278 3278 if (dp->mac_suspended) {
3279 3279 mutex_exit(&dp->intrlock);
3280 3280 return (GEM_FAILURE);
3281 3281 }
3282 3282
3283 3283 if (dp->mc_count_req++ < GEM_MAXMC) {
3284 3284 /* append the new address at the end of the mclist */
3285 3285 cnt = dp->mc_count;
3286 3286 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3287 3287 ETHERADDRL);
3288 3288 if (dp->gc.gc_multicast_hash) {
3289 3289 dp->mc_list[cnt].hash =
3290 3290 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3291 3291 }
3292 3292 dp->mc_count = cnt + 1;
3293 3293 }
3294 3294
3295 3295 if (dp->mc_count_req != dp->mc_count) {
3296 3296 /* multicast address list overflow */
3297 3297 dp->rxmode |= RXMODE_MULTI_OVF;
3298 3298 } else {
3299 3299 dp->rxmode &= ~RXMODE_MULTI_OVF;
3300 3300 }
3301 3301
3302 3302 /* tell new multicast list to the hardware */
3303 3303 err = gem_mac_set_rx_filter(dp);
3304 3304
3305 3305 mutex_exit(&dp->intrlock);
3306 3306
3307 3307 return (err);
3308 3308 }
3309 3309
3310 3310 static int
3311 3311 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3312 3312 {
3313 3313 size_t len;
3314 3314 int i;
3315 3315 int cnt;
3316 3316 int err;
3317 3317
3318 3318 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3319 3319
3320 3320 mutex_enter(&dp->intrlock);
3321 3321 if (dp->mac_suspended) {
3322 3322 mutex_exit(&dp->intrlock);
3323 3323 return (GEM_FAILURE);
3324 3324 }
3325 3325
3326 3326 dp->mc_count_req--;
3327 3327 cnt = dp->mc_count;
3328 3328 for (i = 0; i < cnt; i++) {
3329 3329 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3330 3330 continue;
3331 3331 }
3332 3332 /* shrink the mclist by copying forward */
3333 3333 len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3334 3334 if (len > 0) {
3335 3335 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3336 3336 }
3337 3337 dp->mc_count--;
3338 3338 break;
3339 3339 }
3340 3340
3341 3341 if (dp->mc_count_req != dp->mc_count) {
3342 3342 /* multicast address list overflow */
3343 3343 dp->rxmode |= RXMODE_MULTI_OVF;
3344 3344 } else {
3345 3345 dp->rxmode &= ~RXMODE_MULTI_OVF;
3346 3346 }
3347 3347 /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3348 3348 err = gem_mac_set_rx_filter(dp);
3349 3349
3350 3350 mutex_exit(&dp->intrlock);
3351 3351
3352 3352 return (err);
3353 3353 }
3354 3354
3355 3355 /* ============================================================== */
3356 3356 /*
3357 3357 * ND interface
3358 3358 */
3359 3359 /* ============================================================== */
3360 3360 enum {
3361 3361 PARAM_AUTONEG_CAP,
3362 3362 PARAM_PAUSE_CAP,
3363 3363 PARAM_ASYM_PAUSE_CAP,
3364 3364 PARAM_1000FDX_CAP,
3365 3365 PARAM_1000HDX_CAP,
3366 3366 PARAM_100T4_CAP,
3367 3367 PARAM_100FDX_CAP,
3368 3368 PARAM_100HDX_CAP,
3369 3369 PARAM_10FDX_CAP,
3370 3370 PARAM_10HDX_CAP,
3371 3371
3372 3372 PARAM_ADV_AUTONEG_CAP,
3373 3373 PARAM_ADV_PAUSE_CAP,
3374 3374 PARAM_ADV_ASYM_PAUSE_CAP,
3375 3375 PARAM_ADV_1000FDX_CAP,
3376 3376 PARAM_ADV_1000HDX_CAP,
3377 3377 PARAM_ADV_100T4_CAP,
3378 3378 PARAM_ADV_100FDX_CAP,
3379 3379 PARAM_ADV_100HDX_CAP,
3380 3380 PARAM_ADV_10FDX_CAP,
3381 3381 PARAM_ADV_10HDX_CAP,
3382 3382
3383 3383 PARAM_LP_AUTONEG_CAP,
3384 3384 PARAM_LP_PAUSE_CAP,
3385 3385 PARAM_LP_ASYM_PAUSE_CAP,
3386 3386 PARAM_LP_1000FDX_CAP,
3387 3387 PARAM_LP_1000HDX_CAP,
3388 3388 PARAM_LP_100T4_CAP,
3389 3389 PARAM_LP_100FDX_CAP,
3390 3390 PARAM_LP_100HDX_CAP,
3391 3391 PARAM_LP_10FDX_CAP,
3392 3392 PARAM_LP_10HDX_CAP,
3393 3393
3394 3394 PARAM_LINK_STATUS,
3395 3395 PARAM_LINK_SPEED,
3396 3396 PARAM_LINK_DUPLEX,
3397 3397
3398 3398 PARAM_LINK_AUTONEG,
3399 3399 PARAM_LINK_RX_PAUSE,
3400 3400 PARAM_LINK_TX_PAUSE,
3401 3401
3402 3402 PARAM_LOOP_MODE,
3403 3403 PARAM_MSI_CNT,
3404 3404
3405 3405 #ifdef DEBUG_RESUME
3406 3406 PARAM_RESUME_TEST,
3407 3407 #endif
3408 3408 PARAM_COUNT
3409 3409 };
3410 3410
3411 3411 enum ioc_reply {
3412 3412 IOC_INVAL = -1, /* bad, NAK with EINVAL */
3413 3413 IOC_DONE, /* OK, reply sent */
3414 3414 IOC_ACK, /* OK, just send ACK */
3415 3415 IOC_REPLY, /* OK, just send reply */
3416 3416 IOC_RESTART_ACK, /* OK, restart & ACK */
3417 3417 IOC_RESTART_REPLY /* OK, restart & reply */
3418 3418 };
3419 3419
3420 3420 struct gem_nd_arg {
3421 3421 struct gem_dev *dp;
3422 3422 int item;
3423 3423 };
3424 3424
3425 3425 static int
3426 3426 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3427 3427 {
3428 3428 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3429 3429 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3430 3430 long val;
3431 3431
3432 3432 DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3433 3433 dp->name, __func__, item));
3434 3434
3435 3435 switch (item) {
3436 3436 case PARAM_AUTONEG_CAP:
3437 3437 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3438 3438 DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3439 3439 break;
3440 3440
3441 3441 case PARAM_PAUSE_CAP:
3442 3442 val = BOOLEAN(dp->gc.gc_flow_control & 1);
3443 3443 break;
3444 3444
3445 3445 case PARAM_ASYM_PAUSE_CAP:
3446 3446 val = BOOLEAN(dp->gc.gc_flow_control & 2);
3447 3447 break;
3448 3448
3449 3449 case PARAM_1000FDX_CAP:
3450 3450 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3451 3451 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3452 3452 break;
3453 3453
3454 3454 case PARAM_1000HDX_CAP:
3455 3455 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3456 3456 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3457 3457 break;
3458 3458
3459 3459 case PARAM_100T4_CAP:
3460 3460 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3461 3461 break;
3462 3462
3463 3463 case PARAM_100FDX_CAP:
3464 3464 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3465 3465 break;
3466 3466
3467 3467 case PARAM_100HDX_CAP:
3468 3468 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3469 3469 break;
3470 3470
3471 3471 case PARAM_10FDX_CAP:
3472 3472 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3473 3473 break;
3474 3474
3475 3475 case PARAM_10HDX_CAP:
3476 3476 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3477 3477 break;
3478 3478
3479 3479 case PARAM_ADV_AUTONEG_CAP:
3480 3480 val = dp->anadv_autoneg;
3481 3481 break;
3482 3482
3483 3483 case PARAM_ADV_PAUSE_CAP:
3484 3484 val = BOOLEAN(dp->anadv_flow_control & 1);
3485 3485 break;
3486 3486
3487 3487 case PARAM_ADV_ASYM_PAUSE_CAP:
3488 3488 val = BOOLEAN(dp->anadv_flow_control & 2);
3489 3489 break;
3490 3490
3491 3491 case PARAM_ADV_1000FDX_CAP:
3492 3492 val = dp->anadv_1000fdx;
3493 3493 break;
3494 3494
3495 3495 case PARAM_ADV_1000HDX_CAP:
3496 3496 val = dp->anadv_1000hdx;
3497 3497 break;
3498 3498
3499 3499 case PARAM_ADV_100T4_CAP:
3500 3500 val = dp->anadv_100t4;
3501 3501 break;
3502 3502
3503 3503 case PARAM_ADV_100FDX_CAP:
3504 3504 val = dp->anadv_100fdx;
3505 3505 break;
3506 3506
3507 3507 case PARAM_ADV_100HDX_CAP:
3508 3508 val = dp->anadv_100hdx;
3509 3509 break;
3510 3510
3511 3511 case PARAM_ADV_10FDX_CAP:
3512 3512 val = dp->anadv_10fdx;
3513 3513 break;
3514 3514
3515 3515 case PARAM_ADV_10HDX_CAP:
3516 3516 val = dp->anadv_10hdx;
3517 3517 break;
3518 3518
3519 3519 case PARAM_LP_AUTONEG_CAP:
3520 3520 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3521 3521 break;
3522 3522
3523 3523 case PARAM_LP_PAUSE_CAP:
3524 3524 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3525 3525 break;
3526 3526
3527 3527 case PARAM_LP_ASYM_PAUSE_CAP:
3528 3528 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3529 3529 break;
3530 3530
3531 3531 case PARAM_LP_1000FDX_CAP:
3532 3532 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3533 3533 break;
3534 3534
3535 3535 case PARAM_LP_1000HDX_CAP:
3536 3536 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3537 3537 break;
3538 3538
3539 3539 case PARAM_LP_100T4_CAP:
3540 3540 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3541 3541 break;
3542 3542
3543 3543 case PARAM_LP_100FDX_CAP:
3544 3544 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3545 3545 break;
3546 3546
3547 3547 case PARAM_LP_100HDX_CAP:
3548 3548 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3549 3549 break;
3550 3550
3551 3551 case PARAM_LP_10FDX_CAP:
3552 3552 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3553 3553 break;
3554 3554
3555 3555 case PARAM_LP_10HDX_CAP:
3556 3556 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3557 3557 break;
3558 3558
3559 3559 case PARAM_LINK_STATUS:
3560 3560 val = (dp->mii_state == MII_STATE_LINKUP);
3561 3561 break;
3562 3562
3563 3563 case PARAM_LINK_SPEED:
3564 3564 val = gem_speed_value[dp->speed];
3565 3565 break;
3566 3566
3567 3567 case PARAM_LINK_DUPLEX:
3568 3568 val = 0;
3569 3569 if (dp->mii_state == MII_STATE_LINKUP) {
3570 3570 val = dp->full_duplex ? 2 : 1;
3571 3571 }
3572 3572 break;
3573 3573
3574 3574 case PARAM_LINK_AUTONEG:
3575 3575 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3576 3576 break;
3577 3577
3578 3578 case PARAM_LINK_RX_PAUSE:
3579 3579 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580 3580 (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3581 3581 break;
3582 3582
3583 3583 case PARAM_LINK_TX_PAUSE:
3584 3584 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3585 3585 (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3586 3586 break;
3587 3587
3588 3588 #ifdef DEBUG_RESUME
3589 3589 case PARAM_RESUME_TEST:
3590 3590 val = 0;
3591 3591 break;
3592 3592 #endif
3593 3593 default:
3594 3594 cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3595 3595 dp->name, item);
3596 3596 break;
3597 3597 }
3598 3598
3599 3599 (void) mi_mpprintf(mp, "%ld", val);
3600 3600
3601 3601 return (0);
3602 3602 }
3603 3603
3604 3604 static int
3605 3605 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3606 3606 {
3607 3607 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3608 3608 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3609 3609 long val;
3610 3610 char *end;
3611 3611
3612 3612 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3613 3613 if (ddi_strtol(value, &end, 10, &val)) {
3614 3614 return (EINVAL);
3615 3615 }
3616 3616 if (end == value) {
3617 3617 return (EINVAL);
3618 3618 }
3619 3619
3620 3620 switch (item) {
3621 3621 case PARAM_ADV_AUTONEG_CAP:
3622 3622 if (val != 0 && val != 1) {
3623 3623 goto err;
3624 3624 }
3625 3625 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626 3626 goto err;
3627 3627 }
3628 3628 dp->anadv_autoneg = (int)val;
3629 3629 break;
3630 3630
3631 3631 case PARAM_ADV_PAUSE_CAP:
3632 3632 if (val != 0 && val != 1) {
3633 3633 goto err;
3634 3634 }
3635 3635 if (val) {
3636 3636 dp->anadv_flow_control |= 1;
3637 3637 } else {
3638 3638 dp->anadv_flow_control &= ~1;
3639 3639 }
3640 3640 break;
3641 3641
3642 3642 case PARAM_ADV_ASYM_PAUSE_CAP:
3643 3643 if (val != 0 && val != 1) {
3644 3644 goto err;
3645 3645 }
3646 3646 if (val) {
3647 3647 dp->anadv_flow_control |= 2;
3648 3648 } else {
3649 3649 dp->anadv_flow_control &= ~2;
3650 3650 }
3651 3651 break;
3652 3652
3653 3653 case PARAM_ADV_1000FDX_CAP:
3654 3654 if (val != 0 && val != 1) {
3655 3655 goto err;
3656 3656 }
3657 3657 if (val && (dp->mii_xstatus &
3658 3658 (MII_XSTATUS_1000BASET_FD |
3659 3659 MII_XSTATUS_1000BASEX_FD)) == 0) {
3660 3660 goto err;
3661 3661 }
3662 3662 dp->anadv_1000fdx = (int)val;
3663 3663 break;
3664 3664
3665 3665 case PARAM_ADV_1000HDX_CAP:
3666 3666 if (val != 0 && val != 1) {
3667 3667 goto err;
3668 3668 }
3669 3669 if (val && (dp->mii_xstatus &
3670 3670 (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3671 3671 goto err;
3672 3672 }
3673 3673 dp->anadv_1000hdx = (int)val;
3674 3674 break;
3675 3675
3676 3676 case PARAM_ADV_100T4_CAP:
3677 3677 if (val != 0 && val != 1) {
3678 3678 goto err;
3679 3679 }
3680 3680 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681 3681 goto err;
3682 3682 }
3683 3683 dp->anadv_100t4 = (int)val;
3684 3684 break;
3685 3685
3686 3686 case PARAM_ADV_100FDX_CAP:
3687 3687 if (val != 0 && val != 1) {
3688 3688 goto err;
3689 3689 }
3690 3690 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691 3691 goto err;
3692 3692 }
3693 3693 dp->anadv_100fdx = (int)val;
3694 3694 break;
3695 3695
3696 3696 case PARAM_ADV_100HDX_CAP:
3697 3697 if (val != 0 && val != 1) {
3698 3698 goto err;
3699 3699 }
3700 3700 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701 3701 goto err;
3702 3702 }
3703 3703 dp->anadv_100hdx = (int)val;
3704 3704 break;
3705 3705
3706 3706 case PARAM_ADV_10FDX_CAP:
3707 3707 if (val != 0 && val != 1) {
3708 3708 goto err;
3709 3709 }
3710 3710 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711 3711 goto err;
3712 3712 }
3713 3713 dp->anadv_10fdx = (int)val;
3714 3714 break;
3715 3715
3716 3716 case PARAM_ADV_10HDX_CAP:
3717 3717 if (val != 0 && val != 1) {
3718 3718 goto err;
3719 3719 }
3720 3720 if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721 3721 goto err;
3722 3722 }
3723 3723 dp->anadv_10hdx = (int)val;
3724 3724 break;
3725 3725 }
3726 3726
3727 3727 /* sync with PHY */
3728 3728 gem_choose_forcedmode(dp);
3729 3729
3730 3730 dp->mii_state = MII_STATE_UNKNOWN;
3731 3731 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3732 3732 /* XXX - Can we ignore the return code ? */
3733 3733 (void) gem_mii_link_check(dp);
3734 3734 }
3735 3735
3736 3736 return (0);
3737 3737 err:
3738 3738 return (EINVAL);
3739 3739 }
3740 3740
3741 3741 static void
3742 3742 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3743 3743 {
3744 3744 struct gem_nd_arg *arg;
3745 3745
3746 3746 ASSERT(item >= 0);
3747 3747 ASSERT(item < PARAM_COUNT);
3748 3748
3749 3749 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3750 3750 arg->dp = dp;
3751 3751 arg->item = item;
3752 3752
3753 3753 DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3754 3754 dp->name, __func__, name, item));
3755 3755 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3756 3756 }
3757 3757
3758 3758 static void
3759 3759 gem_nd_setup(struct gem_dev *dp)
3760 3760 {
3761 3761 DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3762 3762 dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3763 3763
3764 3764 ASSERT(dp->nd_arg_p == NULL);
3765 3765
3766 3766 dp->nd_arg_p =
3767 3767 kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3768 3768
3769 3769 #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3770 3770
3771 3771 gem_nd_load(dp, "autoneg_cap",
3772 3772 gem_param_get, NULL, PARAM_AUTONEG_CAP);
3773 3773 gem_nd_load(dp, "pause_cap",
3774 3774 gem_param_get, NULL, PARAM_PAUSE_CAP);
3775 3775 gem_nd_load(dp, "asym_pause_cap",
3776 3776 gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3777 3777 gem_nd_load(dp, "1000fdx_cap",
3778 3778 gem_param_get, NULL, PARAM_1000FDX_CAP);
3779 3779 gem_nd_load(dp, "1000hdx_cap",
3780 3780 gem_param_get, NULL, PARAM_1000HDX_CAP);
3781 3781 gem_nd_load(dp, "100T4_cap",
3782 3782 gem_param_get, NULL, PARAM_100T4_CAP);
3783 3783 gem_nd_load(dp, "100fdx_cap",
3784 3784 gem_param_get, NULL, PARAM_100FDX_CAP);
3785 3785 gem_nd_load(dp, "100hdx_cap",
3786 3786 gem_param_get, NULL, PARAM_100HDX_CAP);
3787 3787 gem_nd_load(dp, "10fdx_cap",
3788 3788 gem_param_get, NULL, PARAM_10FDX_CAP);
3789 3789 gem_nd_load(dp, "10hdx_cap",
3790 3790 gem_param_get, NULL, PARAM_10HDX_CAP);
3791 3791
3792 3792 /* Our advertised capabilities */
3793 3793 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3794 3794 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3795 3795 PARAM_ADV_AUTONEG_CAP);
3796 3796 gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3797 3797 SETFUNC(dp->gc.gc_flow_control & 1),
3798 3798 PARAM_ADV_PAUSE_CAP);
3799 3799 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3800 3800 SETFUNC(dp->gc.gc_flow_control & 2),
3801 3801 PARAM_ADV_ASYM_PAUSE_CAP);
3802 3802 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3803 3803 SETFUNC(dp->mii_xstatus &
3804 3804 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3805 3805 PARAM_ADV_1000FDX_CAP);
3806 3806 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3807 3807 SETFUNC(dp->mii_xstatus &
3808 3808 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3809 3809 PARAM_ADV_1000HDX_CAP);
3810 3810 gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3811 3811 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3812 3812 !dp->mii_advert_ro),
3813 3813 PARAM_ADV_100T4_CAP);
3814 3814 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3815 3815 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3816 3816 !dp->mii_advert_ro),
3817 3817 PARAM_ADV_100FDX_CAP);
3818 3818 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3819 3819 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3820 3820 !dp->mii_advert_ro),
3821 3821 PARAM_ADV_100HDX_CAP);
3822 3822 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3823 3823 SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3824 3824 !dp->mii_advert_ro),
3825 3825 PARAM_ADV_10FDX_CAP);
3826 3826 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3827 3827 SETFUNC((dp->mii_status & MII_STATUS_10) &&
3828 3828 !dp->mii_advert_ro),
3829 3829 PARAM_ADV_10HDX_CAP);
3830 3830
3831 3831 /* Partner's advertised capabilities */
3832 3832 gem_nd_load(dp, "lp_autoneg_cap",
3833 3833 gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3834 3834 gem_nd_load(dp, "lp_pause_cap",
3835 3835 gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3836 3836 gem_nd_load(dp, "lp_asym_pause_cap",
3837 3837 gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3838 3838 gem_nd_load(dp, "lp_1000fdx_cap",
3839 3839 gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3840 3840 gem_nd_load(dp, "lp_1000hdx_cap",
3841 3841 gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3842 3842 gem_nd_load(dp, "lp_100T4_cap",
3843 3843 gem_param_get, NULL, PARAM_LP_100T4_CAP);
3844 3844 gem_nd_load(dp, "lp_100fdx_cap",
3845 3845 gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3846 3846 gem_nd_load(dp, "lp_100hdx_cap",
3847 3847 gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3848 3848 gem_nd_load(dp, "lp_10fdx_cap",
3849 3849 gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3850 3850 gem_nd_load(dp, "lp_10hdx_cap",
3851 3851 gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3852 3852
3853 3853 /* Current operating modes */
3854 3854 gem_nd_load(dp, "link_status",
3855 3855 gem_param_get, NULL, PARAM_LINK_STATUS);
3856 3856 gem_nd_load(dp, "link_speed",
3857 3857 gem_param_get, NULL, PARAM_LINK_SPEED);
3858 3858 gem_nd_load(dp, "link_duplex",
3859 3859 gem_param_get, NULL, PARAM_LINK_DUPLEX);
3860 3860 gem_nd_load(dp, "link_autoneg",
3861 3861 gem_param_get, NULL, PARAM_LINK_AUTONEG);
3862 3862 gem_nd_load(dp, "link_rx_pause",
3863 3863 gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3864 3864 gem_nd_load(dp, "link_tx_pause",
3865 3865 gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3866 3866 #ifdef DEBUG_RESUME
3867 3867 gem_nd_load(dp, "resume_test",
3868 3868 gem_param_get, NULL, PARAM_RESUME_TEST);
3869 3869 #endif
3870 3870 #undef SETFUNC
3871 3871 }
3872 3872
3873 3873 static
3874 3874 enum ioc_reply
3875 3875 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3876 3876 {
3877 3877 boolean_t ok;
3878 3878
3879 3879 ASSERT(mutex_owned(&dp->intrlock));
3880 3880
3881 3881 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3882 3882
3883 3883 switch (iocp->ioc_cmd) {
3884 3884 case ND_GET:
3885 3885 ok = nd_getset(wq, dp->nd_data_p, mp);
3886 3886 DPRINTF(0, (CE_CONT,
3887 3887 "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3888 3888 return (ok ? IOC_REPLY : IOC_INVAL);
3889 3889
3890 3890 case ND_SET:
3891 3891 ok = nd_getset(wq, dp->nd_data_p, mp);
3892 3892
3893 3893 DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3894 3894 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3895 3895
3896 3896 if (!ok) {
3897 3897 return (IOC_INVAL);
3898 3898 }
3899 3899
3900 3900 if (iocp->ioc_error) {
3901 3901 return (IOC_REPLY);
3902 3902 }
3903 3903
3904 3904 return (IOC_RESTART_REPLY);
3905 3905 }
3906 3906
3907 3907 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3908 3908
3909 3909 return (IOC_INVAL);
3910 3910 }
3911 3911
3912 3912 static void
3913 3913 gem_nd_cleanup(struct gem_dev *dp)
3914 3914 {
3915 3915 ASSERT(dp->nd_data_p != NULL);
3916 3916 ASSERT(dp->nd_arg_p != NULL);
3917 3917
3918 3918 nd_free(&dp->nd_data_p);
3919 3919
3920 3920 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3921 3921 dp->nd_arg_p = NULL;
3922 3922 }
3923 3923
3924 3924 static void
3925 3925 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3926 3926 {
3927 3927 struct iocblk *iocp;
3928 3928 enum ioc_reply status;
3929 3929 int cmd;
3930 3930
3931 3931 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3932 3932
3933 3933 /*
3934 3934 * Validate the command before bothering with the mutex ...
3935 3935 */
3936 3936 iocp = (void *)mp->b_rptr;
3937 3937 iocp->ioc_error = 0;
3938 3938 cmd = iocp->ioc_cmd;
3939 3939
3940 3940 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3941 3941
3942 3942 mutex_enter(&dp->intrlock);
3943 3943 mutex_enter(&dp->xmitlock);
3944 3944
3945 3945 switch (cmd) {
3946 3946 default:
3947 3947 _NOTE(NOTREACHED)
3948 3948 status = IOC_INVAL;
3949 3949 break;
3950 3950
3951 3951 case ND_GET:
3952 3952 case ND_SET:
3953 3953 status = gem_nd_ioctl(dp, wq, mp, iocp);
3954 3954 break;
3955 3955 }
3956 3956
3957 3957 mutex_exit(&dp->xmitlock);
3958 3958 mutex_exit(&dp->intrlock);
3959 3959
3960 3960 #ifdef DEBUG_RESUME
3961 3961 if (cmd == ND_GET) {
3962 3962 gem_suspend(dp->dip);
3963 3963 gem_resume(dp->dip);
3964 3964 }
3965 3965 #endif
3966 3966 /*
3967 3967 * Finally, decide how to reply
3968 3968 */
3969 3969 switch (status) {
3970 3970 default:
3971 3971 case IOC_INVAL:
3972 3972 /*
3973 3973 * Error, reply with a NAK and EINVAL or the specified error
3974 3974 */
3975 3975 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3976 3976 EINVAL : iocp->ioc_error);
3977 3977 break;
3978 3978
3979 3979 case IOC_DONE:
3980 3980 /*
3981 3981 * OK, reply already sent
3982 3982 */
3983 3983 break;
3984 3984
3985 3985 case IOC_RESTART_ACK:
3986 3986 case IOC_ACK:
3987 3987 /*
3988 3988 * OK, reply with an ACK
3989 3989 */
3990 3990 miocack(wq, mp, 0, 0);
3991 3991 break;
3992 3992
3993 3993 case IOC_RESTART_REPLY:
3994 3994 case IOC_REPLY:
3995 3995 /*
3996 3996 * OK, send prepared reply as ACK or NAK
3997 3997 */
3998 3998 mp->b_datap->db_type =
3999 3999 iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
4000 4000 qreply(wq, mp);
4001 4001 break;
4002 4002 }
4003 4003 }
4004 4004
4005 4005 #ifndef SYS_MAC_H
4006 4006 #define XCVR_UNDEFINED 0
4007 4007 #define XCVR_NONE 1
4008 4008 #define XCVR_10 2
4009 4009 #define XCVR_100T4 3
4010 4010 #define XCVR_100X 4
4011 4011 #define XCVR_100T2 5
4012 4012 #define XCVR_1000X 6
4013 4013 #define XCVR_1000T 7
4014 4014 #endif
4015 4015 static int
4016 4016 gem_mac_xcvr_inuse(struct gem_dev *dp)
4017 4017 {
4018 4018 int val = XCVR_UNDEFINED;
4019 4019
4020 4020 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4021 4021 if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4022 4022 val = XCVR_100T4;
4023 4023 } else if (dp->mii_status &
4024 4024 (MII_STATUS_100_BASEX_FD |
4025 4025 MII_STATUS_100_BASEX)) {
4026 4026 val = XCVR_100X;
4027 4027 } else if (dp->mii_status &
4028 4028 (MII_STATUS_100_BASE_T2_FD |
4029 4029 MII_STATUS_100_BASE_T2)) {
4030 4030 val = XCVR_100T2;
4031 4031 } else if (dp->mii_status &
4032 4032 (MII_STATUS_10_FD | MII_STATUS_10)) {
4033 4033 val = XCVR_10;
4034 4034 }
4035 4035 } else if (dp->mii_xstatus &
4036 4036 (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4037 4037 val = XCVR_1000T;
4038 4038 } else if (dp->mii_xstatus &
4039 4039 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4040 4040 val = XCVR_1000X;
4041 4041 }
4042 4042
4043 4043 return (val);
4044 4044 }
4045 4045
4046 4046 /* ============================================================== */
4047 4047 /*
4048 4048 * GLDv3 interface
4049 4049 */
4050 4050 /* ============================================================== */
4051 4051 static int gem_m_getstat(void *, uint_t, uint64_t *);
4052 4052 static int gem_m_start(void *);
4053 4053 static void gem_m_stop(void *);
4054 4054 static int gem_m_setpromisc(void *, boolean_t);
4055 4055 static int gem_m_multicst(void *, boolean_t, const uint8_t *);
4056 4056 static int gem_m_unicst(void *, const uint8_t *);
4057 4057 static mblk_t *gem_m_tx(void *, mblk_t *);
4058 4058 static void gem_m_ioctl(void *, queue_t *, mblk_t *);
4059 4059 static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
4060 4060
4061 4061 #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4062 4062
4063 4063 static mac_callbacks_t gem_m_callbacks = {
4064 4064 GEM_M_CALLBACK_FLAGS,
4065 4065 gem_m_getstat,
4066 4066 gem_m_start,
4067 4067 gem_m_stop,
4068 4068 gem_m_setpromisc,
4069 4069 gem_m_multicst,
4070 4070 gem_m_unicst,
4071 4071 gem_m_tx,
4072 4072 NULL,
4073 4073 gem_m_ioctl,
4074 4074 gem_m_getcapab,
4075 4075 };
4076 4076
4077 4077 static int
4078 4078 gem_m_start(void *arg)
4079 4079 {
4080 4080 int err = 0;
4081 4081 struct gem_dev *dp = arg;
4082 4082
4083 4083 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4084 4084
4085 4085 mutex_enter(&dp->intrlock);
4086 4086 if (dp->mac_suspended) {
4087 4087 err = EIO;
4088 4088 goto x;
4089 4089 }
4090 4090 if (gem_mac_init(dp) != GEM_SUCCESS) {
4091 4091 err = EIO;
4092 4092 goto x;
4093 4093 }
4094 4094 dp->nic_state = NIC_STATE_INITIALIZED;
4095 4095
4096 4096 /* reset rx filter state */
4097 4097 dp->mc_count = 0;
4098 4098 dp->mc_count_req = 0;
4099 4099
4100 4100 /* setup media mode if the link have been up */
4101 4101 if (dp->mii_state == MII_STATE_LINKUP) {
4102 4102 (dp->gc.gc_set_media)(dp);
4103 4103 }
4104 4104
4105 4105 /* setup initial rx filter */
4106 4106 bcopy(dp->dev_addr.ether_addr_octet,
4107 4107 dp->cur_addr.ether_addr_octet, ETHERADDRL);
4108 4108 dp->rxmode |= RXMODE_ENABLE;
4109 4109
4110 4110 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4111 4111 err = EIO;
4112 4112 goto x;
4113 4113 }
4114 4114
4115 4115 dp->nic_state = NIC_STATE_ONLINE;
4116 4116 if (dp->mii_state == MII_STATE_LINKUP) {
4117 4117 if (gem_mac_start(dp) != GEM_SUCCESS) {
4118 4118 err = EIO;
4119 4119 goto x;
4120 4120 }
4121 4121 }
4122 4122
4123 4123 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4124 4124 (void *)dp, dp->gc.gc_tx_timeout_interval);
4125 4125 mutex_exit(&dp->intrlock);
4126 4126
4127 4127 return (0);
4128 4128 x:
4129 4129 dp->nic_state = NIC_STATE_STOPPED;
4130 4130 mutex_exit(&dp->intrlock);
4131 4131 return (err);
4132 4132 }
4133 4133
4134 4134 static void
4135 4135 gem_m_stop(void *arg)
4136 4136 {
4137 4137 struct gem_dev *dp = arg;
4138 4138
4139 4139 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4140 4140
4141 4141 /* stop rx */
4142 4142 mutex_enter(&dp->intrlock);
4143 4143 if (dp->mac_suspended) {
4144 4144 mutex_exit(&dp->intrlock);
4145 4145 return;
4146 4146 }
4147 4147 dp->rxmode &= ~RXMODE_ENABLE;
4148 4148 (void) gem_mac_set_rx_filter(dp);
4149 4149 mutex_exit(&dp->intrlock);
4150 4150
4151 4151 /* stop tx timeout watcher */
4152 4152 if (dp->timeout_id) {
4153 4153 while (untimeout(dp->timeout_id) == -1)
4154 4154 ;
4155 4155 dp->timeout_id = 0;
4156 4156 }
4157 4157
4158 4158 /* make the nic state inactive */
4159 4159 mutex_enter(&dp->intrlock);
4160 4160 if (dp->mac_suspended) {
4161 4161 mutex_exit(&dp->intrlock);
4162 4162 return;
4163 4163 }
4164 4164 dp->nic_state = NIC_STATE_STOPPED;
4165 4165
4166 4166 /* we need deassert mac_active due to block interrupt handler */
4167 4167 mutex_enter(&dp->xmitlock);
4168 4168 dp->mac_active = B_FALSE;
4169 4169 mutex_exit(&dp->xmitlock);
4170 4170
4171 4171 /* block interrupts */
4172 4172 while (dp->intr_busy) {
4173 4173 cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4174 4174 }
4175 4175 (void) gem_mac_stop(dp, 0);
4176 4176 mutex_exit(&dp->intrlock);
4177 4177 }
4178 4178
4179 4179 static int
4180 4180 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4181 4181 {
4182 4182 int err;
4183 4183 int ret;
4184 4184 struct gem_dev *dp = arg;
4185 4185
4186 4186 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4187 4187
4188 4188 if (add) {
4189 4189 ret = gem_add_multicast(dp, ep);
4190 4190 } else {
4191 4191 ret = gem_remove_multicast(dp, ep);
4192 4192 }
4193 4193
4194 4194 err = 0;
4195 4195 if (ret != GEM_SUCCESS) {
4196 4196 err = EIO;
4197 4197 }
4198 4198
4199 4199 return (err);
4200 4200 }
4201 4201
4202 4202 static int
4203 4203 gem_m_setpromisc(void *arg, boolean_t on)
4204 4204 {
4205 4205 int err = 0; /* no error */
4206 4206 struct gem_dev *dp = arg;
4207 4207
4208 4208 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4209 4209
4210 4210 mutex_enter(&dp->intrlock);
4211 4211 if (dp->mac_suspended) {
4212 4212 mutex_exit(&dp->intrlock);
4213 4213 return (EIO);
4214 4214 }
4215 4215 if (on) {
4216 4216 dp->rxmode |= RXMODE_PROMISC;
4217 4217 } else {
4218 4218 dp->rxmode &= ~RXMODE_PROMISC;
4219 4219 }
4220 4220
4221 4221 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4222 4222 err = EIO;
4223 4223 }
4224 4224 mutex_exit(&dp->intrlock);
4225 4225
4226 4226 return (err);
4227 4227 }
4228 4228
4229 4229 int
4230 4230 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4231 4231 {
4232 4232 struct gem_dev *dp = arg;
4233 4233 struct gem_stats *gstp = &dp->stats;
4234 4234 uint64_t val = 0;
4235 4235
4236 4236 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4237 4237
4238 4238 if (mutex_owned(&dp->intrlock)) {
4239 4239 if (dp->mac_suspended) {
4240 4240 return (EIO);
4241 4241 }
4242 4242 } else {
4243 4243 mutex_enter(&dp->intrlock);
4244 4244 if (dp->mac_suspended) {
4245 4245 mutex_exit(&dp->intrlock);
4246 4246 return (EIO);
4247 4247 }
4248 4248 mutex_exit(&dp->intrlock);
4249 4249 }
4250 4250
4251 4251 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4252 4252 return (EIO);
4253 4253 }
4254 4254
4255 4255 switch (stat) {
4256 4256 case MAC_STAT_IFSPEED:
4257 4257 val = gem_speed_value[dp->speed] *1000000ull;
4258 4258 break;
4259 4259
4260 4260 case MAC_STAT_MULTIRCV:
4261 4261 val = gstp->rmcast;
4262 4262 break;
4263 4263
4264 4264 case MAC_STAT_BRDCSTRCV:
4265 4265 val = gstp->rbcast;
4266 4266 break;
4267 4267
4268 4268 case MAC_STAT_MULTIXMT:
4269 4269 val = gstp->omcast;
4270 4270 break;
4271 4271
4272 4272 case MAC_STAT_BRDCSTXMT:
4273 4273 val = gstp->obcast;
4274 4274 break;
4275 4275
4276 4276 case MAC_STAT_NORCVBUF:
4277 4277 val = gstp->norcvbuf + gstp->missed;
4278 4278 break;
4279 4279
4280 4280 case MAC_STAT_IERRORS:
4281 4281 val = gstp->errrcv;
4282 4282 break;
4283 4283
4284 4284 case MAC_STAT_NOXMTBUF:
4285 4285 val = gstp->noxmtbuf;
4286 4286 break;
4287 4287
4288 4288 case MAC_STAT_OERRORS:
4289 4289 val = gstp->errxmt;
4290 4290 break;
4291 4291
4292 4292 case MAC_STAT_COLLISIONS:
4293 4293 val = gstp->collisions;
4294 4294 break;
4295 4295
4296 4296 case MAC_STAT_RBYTES:
4297 4297 val = gstp->rbytes;
4298 4298 break;
4299 4299
4300 4300 case MAC_STAT_IPACKETS:
4301 4301 val = gstp->rpackets;
4302 4302 break;
4303 4303
4304 4304 case MAC_STAT_OBYTES:
4305 4305 val = gstp->obytes;
4306 4306 break;
4307 4307
4308 4308 case MAC_STAT_OPACKETS:
4309 4309 val = gstp->opackets;
4310 4310 break;
4311 4311
4312 4312 case MAC_STAT_UNDERFLOWS:
4313 4313 val = gstp->underflow;
4314 4314 break;
4315 4315
4316 4316 case MAC_STAT_OVERFLOWS:
4317 4317 val = gstp->overflow;
4318 4318 break;
4319 4319
4320 4320 case ETHER_STAT_ALIGN_ERRORS:
4321 4321 val = gstp->frame;
4322 4322 break;
4323 4323
4324 4324 case ETHER_STAT_FCS_ERRORS:
4325 4325 val = gstp->crc;
4326 4326 break;
4327 4327
4328 4328 case ETHER_STAT_FIRST_COLLISIONS:
4329 4329 val = gstp->first_coll;
4330 4330 break;
4331 4331
4332 4332 case ETHER_STAT_MULTI_COLLISIONS:
4333 4333 val = gstp->multi_coll;
4334 4334 break;
4335 4335
4336 4336 case ETHER_STAT_SQE_ERRORS:
4337 4337 val = gstp->sqe;
4338 4338 break;
4339 4339
4340 4340 case ETHER_STAT_DEFER_XMTS:
4341 4341 val = gstp->defer;
4342 4342 break;
4343 4343
4344 4344 case ETHER_STAT_TX_LATE_COLLISIONS:
4345 4345 val = gstp->xmtlatecoll;
4346 4346 break;
4347 4347
4348 4348 case ETHER_STAT_EX_COLLISIONS:
4349 4349 val = gstp->excoll;
4350 4350 break;
4351 4351
4352 4352 case ETHER_STAT_MACXMT_ERRORS:
4353 4353 val = gstp->xmit_internal_err;
4354 4354 break;
4355 4355
4356 4356 case ETHER_STAT_CARRIER_ERRORS:
4357 4357 val = gstp->nocarrier;
4358 4358 break;
4359 4359
4360 4360 case ETHER_STAT_TOOLONG_ERRORS:
4361 4361 val = gstp->frame_too_long;
4362 4362 break;
4363 4363
4364 4364 case ETHER_STAT_MACRCV_ERRORS:
4365 4365 val = gstp->rcv_internal_err;
4366 4366 break;
4367 4367
4368 4368 case ETHER_STAT_XCVR_ADDR:
4369 4369 val = dp->mii_phy_addr;
4370 4370 break;
4371 4371
4372 4372 case ETHER_STAT_XCVR_ID:
4373 4373 val = dp->mii_phy_id;
4374 4374 break;
4375 4375
4376 4376 case ETHER_STAT_XCVR_INUSE:
4377 4377 val = gem_mac_xcvr_inuse(dp);
4378 4378 break;
4379 4379
4380 4380 case ETHER_STAT_CAP_1000FDX:
4381 4381 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4382 4382 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4383 4383 break;
4384 4384
4385 4385 case ETHER_STAT_CAP_1000HDX:
4386 4386 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4387 4387 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4388 4388 break;
4389 4389
4390 4390 case ETHER_STAT_CAP_100FDX:
4391 4391 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4392 4392 break;
4393 4393
4394 4394 case ETHER_STAT_CAP_100HDX:
4395 4395 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4396 4396 break;
4397 4397
4398 4398 case ETHER_STAT_CAP_10FDX:
4399 4399 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4400 4400 break;
4401 4401
4402 4402 case ETHER_STAT_CAP_10HDX:
4403 4403 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4404 4404 break;
4405 4405
4406 4406 case ETHER_STAT_CAP_ASMPAUSE:
4407 4407 val = BOOLEAN(dp->gc.gc_flow_control & 2);
4408 4408 break;
4409 4409
4410 4410 case ETHER_STAT_CAP_PAUSE:
4411 4411 val = BOOLEAN(dp->gc.gc_flow_control & 1);
4412 4412 break;
4413 4413
4414 4414 case ETHER_STAT_CAP_AUTONEG:
4415 4415 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4416 4416 break;
4417 4417
4418 4418 case ETHER_STAT_ADV_CAP_1000FDX:
4419 4419 val = dp->anadv_1000fdx;
4420 4420 break;
4421 4421
4422 4422 case ETHER_STAT_ADV_CAP_1000HDX:
4423 4423 val = dp->anadv_1000hdx;
4424 4424 break;
4425 4425
4426 4426 case ETHER_STAT_ADV_CAP_100FDX:
4427 4427 val = dp->anadv_100fdx;
4428 4428 break;
4429 4429
4430 4430 case ETHER_STAT_ADV_CAP_100HDX:
4431 4431 val = dp->anadv_100hdx;
4432 4432 break;
4433 4433
4434 4434 case ETHER_STAT_ADV_CAP_10FDX:
4435 4435 val = dp->anadv_10fdx;
4436 4436 break;
4437 4437
4438 4438 case ETHER_STAT_ADV_CAP_10HDX:
4439 4439 val = dp->anadv_10hdx;
4440 4440 break;
4441 4441
4442 4442 case ETHER_STAT_ADV_CAP_ASMPAUSE:
4443 4443 val = BOOLEAN(dp->anadv_flow_control & 2);
4444 4444 break;
4445 4445
4446 4446 case ETHER_STAT_ADV_CAP_PAUSE:
4447 4447 val = BOOLEAN(dp->anadv_flow_control & 1);
4448 4448 break;
4449 4449
4450 4450 case ETHER_STAT_ADV_CAP_AUTONEG:
4451 4451 val = dp->anadv_autoneg;
4452 4452 break;
4453 4453
4454 4454 case ETHER_STAT_LP_CAP_1000FDX:
4455 4455 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4456 4456 break;
4457 4457
4458 4458 case ETHER_STAT_LP_CAP_1000HDX:
4459 4459 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4460 4460 break;
4461 4461
4462 4462 case ETHER_STAT_LP_CAP_100FDX:
4463 4463 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4464 4464 break;
4465 4465
4466 4466 case ETHER_STAT_LP_CAP_100HDX:
4467 4467 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4468 4468 break;
4469 4469
4470 4470 case ETHER_STAT_LP_CAP_10FDX:
4471 4471 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4472 4472 break;
4473 4473
4474 4474 case ETHER_STAT_LP_CAP_10HDX:
4475 4475 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4476 4476 break;
4477 4477
4478 4478 case ETHER_STAT_LP_CAP_ASMPAUSE:
4479 4479 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4480 4480 break;
4481 4481
4482 4482 case ETHER_STAT_LP_CAP_PAUSE:
4483 4483 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4484 4484 break;
4485 4485
4486 4486 case ETHER_STAT_LP_CAP_AUTONEG:
4487 4487 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4488 4488 break;
4489 4489
4490 4490 case ETHER_STAT_LINK_ASMPAUSE:
4491 4491 val = BOOLEAN(dp->flow_control & 2);
4492 4492 break;
4493 4493
4494 4494 case ETHER_STAT_LINK_PAUSE:
4495 4495 val = BOOLEAN(dp->flow_control & 1);
4496 4496 break;
4497 4497
4498 4498 case ETHER_STAT_LINK_AUTONEG:
4499 4499 val = dp->anadv_autoneg &&
4500 4500 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4501 4501 break;
4502 4502
4503 4503 case ETHER_STAT_LINK_DUPLEX:
4504 4504 val = (dp->mii_state == MII_STATE_LINKUP) ?
4505 4505 (dp->full_duplex ? 2 : 1) : 0;
4506 4506 break;
4507 4507
4508 4508 case ETHER_STAT_TOOSHORT_ERRORS:
4509 4509 val = gstp->runt;
4510 4510 break;
4511 4511 case ETHER_STAT_LP_REMFAULT:
4512 4512 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4513 4513 break;
4514 4514
4515 4515 case ETHER_STAT_JABBER_ERRORS:
4516 4516 val = gstp->jabber;
4517 4517 break;
4518 4518
4519 4519 case ETHER_STAT_CAP_100T4:
4520 4520 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4521 4521 break;
4522 4522
4523 4523 case ETHER_STAT_ADV_CAP_100T4:
4524 4524 val = dp->anadv_100t4;
4525 4525 break;
4526 4526
4527 4527 case ETHER_STAT_LP_CAP_100T4:
4528 4528 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4529 4529 break;
4530 4530
4531 4531 default:
4532 4532 #if GEM_DEBUG_LEVEL > 2
4533 4533 cmn_err(CE_WARN,
4534 4534 "%s: unrecognized parameter value = %d",
4535 4535 __func__, stat);
4536 4536 #endif
4537 4537 return (ENOTSUP);
4538 4538 }
4539 4539
4540 4540 *valp = val;
4541 4541
4542 4542 return (0);
4543 4543 }
4544 4544
4545 4545 static int
4546 4546 gem_m_unicst(void *arg, const uint8_t *mac)
4547 4547 {
4548 4548 int err = 0;
4549 4549 struct gem_dev *dp = arg;
4550 4550
4551 4551 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4552 4552
4553 4553 mutex_enter(&dp->intrlock);
4554 4554 if (dp->mac_suspended) {
4555 4555 mutex_exit(&dp->intrlock);
4556 4556 return (EIO);
4557 4557 }
4558 4558 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4559 4559 dp->rxmode |= RXMODE_ENABLE;
4560 4560
4561 4561 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4562 4562 err = EIO;
4563 4563 }
4564 4564 mutex_exit(&dp->intrlock);
4565 4565
4566 4566 return (err);
4567 4567 }
4568 4568
4569 4569 /*
4570 4570 * gem_m_tx is used only for sending data packets into ethernet wire.
4571 4571 */
4572 4572 static mblk_t *
4573 4573 gem_m_tx(void *arg, mblk_t *mp)
4574 4574 {
4575 4575 uint32_t flags = 0;
4576 4576 struct gem_dev *dp = arg;
4577 4577 mblk_t *tp;
4578 4578
4579 4579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4580 4580
4581 4581 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4582 4582 if (dp->mii_state != MII_STATE_LINKUP) {
4583 4583 /* Some nics hate to send packets when the link is down. */
4584 4584 while (mp) {
4585 4585 tp = mp->b_next;
4586 4586 mp->b_next = NULL;
4587 4587 freemsg(mp);
4588 4588 mp = tp;
4589 4589 }
4590 4590 return (NULL);
4591 4591 }
4592 4592
4593 4593 return (gem_send_common(dp, mp, flags));
4594 4594 }
4595 4595
4596 4596 static void
4597 4597 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4598 4598 {
4599 4599 DPRINTF(0, (CE_CONT, "!%s: %s: called",
4600 4600 ((struct gem_dev *)arg)->name, __func__));
4601 4601
4602 4602 gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4603 4603 }
4604 4604
4605 4605 /* ARGSUSED */
4606 4606 static boolean_t
4607 4607 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4608 4608 {
4609 4609 return (B_FALSE);
4610 4610 }
4611 4611
4612 4612 static void
4613 4613 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4614 4614 {
4615 4615 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4616 4616 macp->m_driver = dp;
4617 4617 macp->m_dip = dp->dip;
4618 4618 macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4619 4619 macp->m_callbacks = &gem_m_callbacks;
4620 4620 macp->m_min_sdu = 0;
4621 4621 macp->m_max_sdu = dp->mtu;
4622 4622
4623 4623 if (dp->misc_flag & GEM_VLAN) {
4624 4624 macp->m_margin = VTAG_SIZE;
4625 4625 }
4626 4626 }
4627 4627
4628 4628 /* ======================================================================== */
4629 4629 /*
4630 4630 * attach/detatch support
4631 4631 */
4632 4632 /* ======================================================================== */
4633 4633 static void
4634 4634 gem_read_conf(struct gem_dev *dp)
4635 4635 {
4636 4636 int val;
4637 4637
4638 4638 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4639 4639
4640 4640 /*
4641 4641 * Get media mode infomation from .conf file
4642 4642 */
4643 4643 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4644 4644 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4645 4645 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4646 4646 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4647 4647 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4648 4648 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4649 4649 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4650 4650 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4651 4651
4652 4652 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4653 4653 DDI_PROP_DONTPASS, "full-duplex"))) {
4654 4654 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4655 4655 dp->anadv_autoneg = B_FALSE;
4656 4656 if (dp->full_duplex) {
4657 4657 dp->anadv_1000hdx = B_FALSE;
4658 4658 dp->anadv_100hdx = B_FALSE;
4659 4659 dp->anadv_10hdx = B_FALSE;
4660 4660 } else {
4661 4661 dp->anadv_1000fdx = B_FALSE;
4662 4662 dp->anadv_100fdx = B_FALSE;
4663 4663 dp->anadv_10fdx = B_FALSE;
4664 4664 }
4665 4665 }
4666 4666
4667 4667 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4668 4668 dp->anadv_autoneg = B_FALSE;
4669 4669 switch (val) {
4670 4670 case 1000:
4671 4671 dp->speed = GEM_SPD_1000;
4672 4672 dp->anadv_100t4 = B_FALSE;
4673 4673 dp->anadv_100fdx = B_FALSE;
4674 4674 dp->anadv_100hdx = B_FALSE;
4675 4675 dp->anadv_10fdx = B_FALSE;
4676 4676 dp->anadv_10hdx = B_FALSE;
4677 4677 break;
4678 4678 case 100:
4679 4679 dp->speed = GEM_SPD_100;
4680 4680 dp->anadv_1000fdx = B_FALSE;
4681 4681 dp->anadv_1000hdx = B_FALSE;
4682 4682 dp->anadv_10fdx = B_FALSE;
4683 4683 dp->anadv_10hdx = B_FALSE;
4684 4684 break;
4685 4685 case 10:
4686 4686 dp->speed = GEM_SPD_10;
4687 4687 dp->anadv_1000fdx = B_FALSE;
4688 4688 dp->anadv_1000hdx = B_FALSE;
4689 4689 dp->anadv_100t4 = B_FALSE;
4690 4690 dp->anadv_100fdx = B_FALSE;
4691 4691 dp->anadv_100hdx = B_FALSE;
4692 4692 break;
4693 4693 default:
4694 4694 cmn_err(CE_WARN,
4695 4695 "!%s: property %s: illegal value:%d",
4696 4696 dp->name, "speed", val);
4697 4697 dp->anadv_autoneg = B_TRUE;
4698 4698 break;
4699 4699 }
4700 4700 }
4701 4701
4702 4702 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4703 4703 if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4704 4704 cmn_err(CE_WARN,
4705 4705 "!%s: property %s: illegal value:%d",
4706 4706 dp->name, "flow-control", val);
4707 4707 } else {
4708 4708 val = min(val, dp->gc.gc_flow_control);
4709 4709 }
4710 4710 dp->anadv_flow_control = val;
4711 4711
4712 4712 if (gem_prop_get_int(dp, "nointr", 0)) {
4713 4713 dp->misc_flag |= GEM_NOINTR;
4714 4714 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4715 4715 }
4716 4716
4717 4717 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4718 4718 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4719 4719 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4720 4720 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4721 4721 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4722 4722 }
4723 4723
4724 4724
4725 4725 /*
4726 4726 * Gem kstat support
4727 4727 */
4728 4728
4729 4729 #define GEM_LOCAL_DATA_SIZE(gc) \
4730 4730 (sizeof (struct gem_dev) + \
4731 4731 sizeof (struct mcast_addr) * GEM_MAXMC + \
4732 4732 sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4733 4733 sizeof (void *) * ((gc)->gc_tx_buf_size))
4734 4734
4735 4735 struct gem_dev *
4736 4736 gem_do_attach(dev_info_t *dip, int port,
4737 4737 struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4738 4738 void *lp, int lmsize)
4739 4739 {
4740 4740 struct gem_dev *dp;
4741 4741 int i;
4742 4742 ddi_iblock_cookie_t c;
4743 4743 mac_register_t *macp = NULL;
4744 4744 int ret;
4745 4745 int unit;
4746 4746 int nports;
4747 4747
4748 4748 unit = ddi_get_instance(dip);
4749 4749 if ((nports = gc->gc_nports) == 0) {
4750 4750 nports = 1;
4751 4751 }
4752 4752 if (nports == 1) {
4753 4753 ddi_set_driver_private(dip, NULL);
4754 4754 }
4755 4755
4756 4756 DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4757 4757 unit));
4758 4758
4759 4759 /*
4760 4760 * Allocate soft data structure
4761 4761 */
4762 4762 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4763 4763
4764 4764 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4765 4765 cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4766 4766 unit, __func__);
4767 4767 return (NULL);
4768 4768 }
4769 4769 /* ddi_set_driver_private(dip, dp); */
4770 4770
4771 4771 /* link to private area */
4772 4772 dp->private = lp;
4773 4773 dp->priv_size = lmsize;
4774 4774 dp->mc_list = (struct mcast_addr *)&dp[1];
4775 4775
4776 4776 dp->dip = dip;
4777 4777 (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4778 4778
4779 4779 /*
4780 4780 * Get iblock cookie
4781 4781 */
4782 4782 if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4783 4783 cmn_err(CE_CONT,
4784 4784 "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4785 4785 dp->name);
4786 4786 goto err_free_private;
4787 4787 }
4788 4788 dp->iblock_cookie = c;
4789 4789
4790 4790 /*
4791 4791 * Initialize mutex's for this device.
4792 4792 */
4793 4793 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4794 4794 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4795 4795 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4796 4796
4797 4797 /*
4798 4798 * configure gem parameter
4799 4799 */
4800 4800 dp->base_addr = base;
4801 4801 dp->regs_handle = *regs_handlep;
4802 4802 dp->gc = *gc;
4803 4803 gc = &dp->gc;
4804 4804 /* patch for simplify dma resource management */
4805 4805 gc->gc_tx_max_frags = 1;
4806 4806 gc->gc_tx_max_descs_per_pkt = 1;
4807 4807 gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4808 4808 gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4809 4809 gc->gc_tx_desc_write_oo = B_TRUE;
4810 4810
4811 4811 gc->gc_nports = nports; /* fix nports */
4812 4812
4813 4813 /* fix copy threadsholds */
4814 4814 gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4815 4815 gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4816 4816
4817 4817 /* fix rx buffer boundary for iocache line size */
4818 4818 ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4819 4819 ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4820 4820 gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4821 4821 gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4822 4822
4823 4823 /* fix descriptor boundary for cache line size */
4824 4824 gc->gc_dma_attr_desc.dma_attr_align =
4825 4825 max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4826 4826
4827 4827 /* patch get_packet method */
4828 4828 if (gc->gc_get_packet == NULL) {
4829 4829 gc->gc_get_packet = &gem_get_packet_default;
4830 4830 }
4831 4831
4832 4832 /* patch get_rx_start method */
4833 4833 if (gc->gc_rx_start == NULL) {
4834 4834 gc->gc_rx_start = &gem_rx_start_default;
4835 4835 }
4836 4836
4837 4837 /* calculate descriptor area */
4838 4838 if (gc->gc_rx_desc_unit_shift >= 0) {
4839 4839 dp->rx_desc_size =
4840 4840 ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4841 4841 gc->gc_dma_attr_desc.dma_attr_align);
4842 4842 }
4843 4843 if (gc->gc_tx_desc_unit_shift >= 0) {
4844 4844 dp->tx_desc_size =
4845 4845 ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4846 4846 gc->gc_dma_attr_desc.dma_attr_align);
4847 4847 }
4848 4848
4849 4849 dp->mtu = ETHERMTU;
4850 4850 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4851 4851 /* link tx buffers */
4852 4852 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4853 4853 dp->tx_buf[i].txb_next =
4854 4854 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4855 4855 }
4856 4856
4857 4857 dp->rxmode = 0;
4858 4858 dp->speed = GEM_SPD_10; /* default is 10Mbps */
4859 4859 dp->full_duplex = B_FALSE; /* default is half */
4860 4860 dp->flow_control = FLOW_CONTROL_NONE;
4861 4861 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4862 4862
4863 4863 /* performance tuning parameters */
4864 4864 dp->txthr = ETHERMAX; /* tx fifo threshold */
4865 4865 dp->txmaxdma = 16*4; /* tx max dma burst size */
4866 4866 dp->rxthr = 128; /* rx fifo threshold */
4867 4867 dp->rxmaxdma = 16*4; /* rx max dma burst size */
4868 4868
4869 4869 /*
4870 4870 * Get media mode information from .conf file
4871 4871 */
4872 4872 gem_read_conf(dp);
4873 4873
4874 4874 /* rx_buf_len is required buffer length without padding for alignment */
4875 4875 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4876 4876
4877 4877 /*
4878 4878 * Reset the chip
4879 4879 */
4880 4880 mutex_enter(&dp->intrlock);
4881 4881 dp->nic_state = NIC_STATE_STOPPED;
4882 4882 ret = (*dp->gc.gc_reset_chip)(dp);
4883 4883 mutex_exit(&dp->intrlock);
4884 4884 if (ret != GEM_SUCCESS) {
4885 4885 goto err_free_regs;
4886 4886 }
4887 4887
4888 4888 /*
4889 4889 * HW dependant paremeter initialization
4890 4890 */
4891 4891 mutex_enter(&dp->intrlock);
4892 4892 ret = (*dp->gc.gc_attach_chip)(dp);
4893 4893 mutex_exit(&dp->intrlock);
4894 4894 if (ret != GEM_SUCCESS) {
4895 4895 goto err_free_regs;
4896 4896 }
4897 4897
4898 4898 #ifdef DEBUG_MULTIFRAGS
4899 4899 dp->gc.gc_tx_copy_thresh = dp->mtu;
4900 4900 #endif
4901 4901 /* allocate tx and rx resources */
4902 4902 if (gem_alloc_memory(dp)) {
4903 4903 goto err_free_regs;
4904 4904 }
4905 4905
4906 4906 DPRINTF(0, (CE_CONT,
4907 4907 "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4908 4908 dp->name, (long)dp->base_addr,
4909 4909 dp->dev_addr.ether_addr_octet[0],
4910 4910 dp->dev_addr.ether_addr_octet[1],
4911 4911 dp->dev_addr.ether_addr_octet[2],
4912 4912 dp->dev_addr.ether_addr_octet[3],
4913 4913 dp->dev_addr.ether_addr_octet[4],
4914 4914 dp->dev_addr.ether_addr_octet[5]));
4915 4915
4916 4916 /* copy mac address */
4917 4917 dp->cur_addr = dp->dev_addr;
4918 4918
4919 4919 gem_gld3_init(dp, macp);
4920 4920
4921 4921 /* Probe MII phy (scan phy) */
4922 4922 dp->mii_lpable = 0;
4923 4923 dp->mii_advert = 0;
4924 4924 dp->mii_exp = 0;
4925 4925 dp->mii_ctl1000 = 0;
4926 4926 dp->mii_stat1000 = 0;
4927 4927 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4928 4928 goto err_free_ring;
4929 4929 }
4930 4930
4931 4931 /* mask unsupported abilities */
4932 4932 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4933 4933 dp->anadv_1000fdx &=
4934 4934 BOOLEAN(dp->mii_xstatus &
4935 4935 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4936 4936 dp->anadv_1000hdx &=
4937 4937 BOOLEAN(dp->mii_xstatus &
4938 4938 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4939 4939 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4940 4940 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4941 4941 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4942 4942 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4943 4943 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4944 4944
4945 4945 gem_choose_forcedmode(dp);
4946 4946
4947 4947 /* initialize MII phy if required */
4948 4948 if (dp->gc.gc_mii_init) {
4949 4949 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4950 4950 goto err_free_ring;
4951 4951 }
4952 4952 }
4953 4953
4954 4954 /*
4955 4955 * initialize kstats including mii statistics
4956 4956 */
4957 4957 gem_nd_setup(dp);
4958 4958
4959 4959 /*
4960 4960 * Add interrupt to system.
4961 4961 */
4962 4962 if (ret = mac_register(macp, &dp->mh)) {
4963 4963 cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4964 4964 dp->name, ret);
4965 4965 goto err_release_stats;
4966 4966 }
4967 4967 mac_free(macp);
4968 4968 macp = NULL;
4969 4969
4970 4970 if (dp->misc_flag & GEM_SOFTINTR) {
4971 4971 if (ddi_add_softintr(dip,
4972 4972 DDI_SOFTINT_LOW, &dp->soft_id,
4973 4973 NULL, NULL,
4974 4974 (uint_t (*)(caddr_t))gem_intr,
4975 4975 (caddr_t)dp) != DDI_SUCCESS) {
4976 4976 cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4977 4977 dp->name);
4978 4978 goto err_unregister;
4979 4979 }
4980 4980 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4981 4981 if (ddi_add_intr(dip, 0, NULL, NULL,
4982 4982 (uint_t (*)(caddr_t))gem_intr,
4983 4983 (caddr_t)dp) != DDI_SUCCESS) {
↓ open down ↓ |
4983 lines elided |
↑ open up ↑ |
4984 4984 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4985 4985 goto err_unregister;
4986 4986 }
4987 4987 } else {
4988 4988 /*
4989 4989 * Dont use interrupt.
4990 4990 * schedule first call of gem_intr_watcher
4991 4991 */
4992 4992 dp->intr_watcher_id =
4993 4993 timeout((void (*)(void *))gem_intr_watcher,
4994 - (void *)dp, drv_usectohz(3*1000000));
4994 + (void *)dp, drv_sectohz(3));
4995 4995 }
4996 4996
4997 4997 /* link this device to dev_info */
4998 4998 dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4999 4999 dp->port = port;
5000 5000 ddi_set_driver_private(dip, (caddr_t)dp);
5001 5001
5002 5002 /* reset mii phy and start mii link watcher */
5003 5003 gem_mii_start(dp);
5004 5004
5005 5005 DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5006 5006 return (dp);
5007 5007
5008 5008 err_unregister:
5009 5009 (void) mac_unregister(dp->mh);
5010 5010 err_release_stats:
5011 5011 /* release NDD resources */
5012 5012 gem_nd_cleanup(dp);
5013 5013
5014 5014 err_free_ring:
5015 5015 gem_free_memory(dp);
5016 5016 err_free_regs:
5017 5017 ddi_regs_map_free(&dp->regs_handle);
5018 5018 err_free_locks:
5019 5019 mutex_destroy(&dp->xmitlock);
5020 5020 mutex_destroy(&dp->intrlock);
5021 5021 cv_destroy(&dp->tx_drain_cv);
5022 5022 err_free_private:
5023 5023 if (macp) {
5024 5024 mac_free(macp);
5025 5025 }
5026 5026 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5027 5027
5028 5028 return (NULL);
5029 5029 }
5030 5030
5031 5031 int
5032 5032 gem_do_detach(dev_info_t *dip)
5033 5033 {
5034 5034 struct gem_dev *dp;
5035 5035 struct gem_dev *tmp;
5036 5036 caddr_t private;
5037 5037 int priv_size;
5038 5038 ddi_acc_handle_t rh;
5039 5039
5040 5040 dp = GEM_GET_DEV(dip);
5041 5041 if (dp == NULL) {
5042 5042 return (DDI_SUCCESS);
5043 5043 }
5044 5044
5045 5045 rh = dp->regs_handle;
5046 5046 private = dp->private;
5047 5047 priv_size = dp->priv_size;
5048 5048
5049 5049 while (dp) {
5050 5050 /* unregister with gld v3 */
5051 5051 if (mac_unregister(dp->mh) != 0) {
5052 5052 return (DDI_FAILURE);
5053 5053 }
5054 5054
5055 5055 /* ensure any rx buffers are not used */
5056 5056 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5057 5057 /* resource is busy */
5058 5058 cmn_err(CE_PANIC,
5059 5059 "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5060 5060 dp->name, __func__,
5061 5061 dp->rx_buf_allocated, dp->rx_buf_freecnt);
5062 5062 /* NOT REACHED */
5063 5063 }
5064 5064
5065 5065 /* stop mii link watcher */
5066 5066 gem_mii_stop(dp);
5067 5067
5068 5068 /* unregister interrupt handler */
5069 5069 if (dp->misc_flag & GEM_SOFTINTR) {
5070 5070 ddi_remove_softintr(dp->soft_id);
5071 5071 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5072 5072 ddi_remove_intr(dip, 0, dp->iblock_cookie);
5073 5073 } else {
5074 5074 /* stop interrupt watcher */
5075 5075 if (dp->intr_watcher_id) {
5076 5076 while (untimeout(dp->intr_watcher_id) == -1)
5077 5077 ;
5078 5078 dp->intr_watcher_id = 0;
5079 5079 }
5080 5080 }
5081 5081
5082 5082 /* release NDD resources */
5083 5083 gem_nd_cleanup(dp);
5084 5084 /* release buffers, descriptors and dma resources */
5085 5085 gem_free_memory(dp);
5086 5086
5087 5087 /* release locks and condition variables */
5088 5088 mutex_destroy(&dp->xmitlock);
5089 5089 mutex_destroy(&dp->intrlock);
5090 5090 cv_destroy(&dp->tx_drain_cv);
5091 5091
5092 5092 /* release basic memory resources */
5093 5093 tmp = dp->next;
5094 5094 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5095 5095 dp = tmp;
5096 5096 }
5097 5097
5098 5098 /* release common private memory for the nic */
5099 5099 kmem_free(private, priv_size);
5100 5100
5101 5101 /* release register mapping resources */
5102 5102 ddi_regs_map_free(&rh);
5103 5103
5104 5104 DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5105 5105 ddi_driver_name(dip), ddi_get_instance(dip)));
5106 5106
5107 5107 return (DDI_SUCCESS);
5108 5108 }
5109 5109
5110 5110 int
5111 5111 gem_suspend(dev_info_t *dip)
5112 5112 {
5113 5113 struct gem_dev *dp;
5114 5114
5115 5115 /*
5116 5116 * stop the device
5117 5117 */
5118 5118 dp = GEM_GET_DEV(dip);
5119 5119 ASSERT(dp);
5120 5120
5121 5121 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5122 5122
5123 5123 for (; dp; dp = dp->next) {
5124 5124
5125 5125 /* stop mii link watcher */
5126 5126 gem_mii_stop(dp);
5127 5127
5128 5128 /* stop interrupt watcher for no-intr mode */
5129 5129 if (dp->misc_flag & GEM_NOINTR) {
5130 5130 if (dp->intr_watcher_id) {
5131 5131 while (untimeout(dp->intr_watcher_id) == -1)
5132 5132 ;
5133 5133 }
5134 5134 dp->intr_watcher_id = 0;
5135 5135 }
5136 5136
5137 5137 /* stop tx timeout watcher */
5138 5138 if (dp->timeout_id) {
5139 5139 while (untimeout(dp->timeout_id) == -1)
5140 5140 ;
5141 5141 dp->timeout_id = 0;
5142 5142 }
5143 5143
5144 5144 /* make the nic state inactive */
5145 5145 mutex_enter(&dp->intrlock);
5146 5146 (void) gem_mac_stop(dp, 0);
5147 5147 ASSERT(!dp->mac_active);
5148 5148
5149 5149 /* no further register access */
5150 5150 dp->mac_suspended = B_TRUE;
5151 5151 mutex_exit(&dp->intrlock);
5152 5152 }
5153 5153
5154 5154 /* XXX - power down the nic */
5155 5155
5156 5156 return (DDI_SUCCESS);
5157 5157 }
5158 5158
5159 5159 int
5160 5160 gem_resume(dev_info_t *dip)
5161 5161 {
5162 5162 struct gem_dev *dp;
5163 5163
5164 5164 /*
5165 5165 * restart the device
5166 5166 */
5167 5167 dp = GEM_GET_DEV(dip);
5168 5168 ASSERT(dp);
5169 5169
5170 5170 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5171 5171
5172 5172 for (; dp; dp = dp->next) {
5173 5173
5174 5174 /*
5175 5175 * Bring up the nic after power up
5176 5176 */
5177 5177
5178 5178 /* gem_xxx.c layer to setup power management state. */
5179 5179 ASSERT(!dp->mac_active);
5180 5180
5181 5181 /* reset the chip, because we are just after power up. */
5182 5182 mutex_enter(&dp->intrlock);
5183 5183
5184 5184 dp->mac_suspended = B_FALSE;
5185 5185 dp->nic_state = NIC_STATE_STOPPED;
5186 5186
5187 5187 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5188 5188 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5189 5189 dp->name, __func__);
5190 5190 mutex_exit(&dp->intrlock);
5191 5191 goto err;
5192 5192 }
5193 5193 mutex_exit(&dp->intrlock);
5194 5194
5195 5195 /* initialize mii phy because we are just after power up */
5196 5196 if (dp->gc.gc_mii_init) {
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
5197 5197 (void) (*dp->gc.gc_mii_init)(dp);
5198 5198 }
5199 5199
5200 5200 if (dp->misc_flag & GEM_NOINTR) {
5201 5201 /*
5202 5202 * schedule first call of gem_intr_watcher
5203 5203 * instead of interrupts.
5204 5204 */
5205 5205 dp->intr_watcher_id =
5206 5206 timeout((void (*)(void *))gem_intr_watcher,
5207 - (void *)dp, drv_usectohz(3*1000000));
5207 + (void *)dp, drv_sectohz(3));
5208 5208 }
5209 5209
5210 5210 /* restart mii link watcher */
5211 5211 gem_mii_start(dp);
5212 5212
5213 5213 /* restart mac */
5214 5214 mutex_enter(&dp->intrlock);
5215 5215
5216 5216 if (gem_mac_init(dp) != GEM_SUCCESS) {
5217 5217 mutex_exit(&dp->intrlock);
5218 5218 goto err_reset;
5219 5219 }
5220 5220 dp->nic_state = NIC_STATE_INITIALIZED;
5221 5221
5222 5222 /* setup media mode if the link have been up */
5223 5223 if (dp->mii_state == MII_STATE_LINKUP) {
5224 5224 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5225 5225 mutex_exit(&dp->intrlock);
5226 5226 goto err_reset;
5227 5227 }
5228 5228 }
5229 5229
5230 5230 /* enable mac address and rx filter */
5231 5231 dp->rxmode |= RXMODE_ENABLE;
5232 5232 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5233 5233 mutex_exit(&dp->intrlock);
5234 5234 goto err_reset;
5235 5235 }
5236 5236 dp->nic_state = NIC_STATE_ONLINE;
5237 5237
5238 5238 /* restart tx timeout watcher */
5239 5239 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5240 5240 (void *)dp,
5241 5241 dp->gc.gc_tx_timeout_interval);
5242 5242
5243 5243 /* now the nic is fully functional */
5244 5244 if (dp->mii_state == MII_STATE_LINKUP) {
5245 5245 if (gem_mac_start(dp) != GEM_SUCCESS) {
5246 5246 mutex_exit(&dp->intrlock);
5247 5247 goto err_reset;
5248 5248 }
5249 5249 }
5250 5250 mutex_exit(&dp->intrlock);
5251 5251 }
5252 5252
5253 5253 return (DDI_SUCCESS);
5254 5254
5255 5255 err_reset:
5256 5256 if (dp->intr_watcher_id) {
5257 5257 while (untimeout(dp->intr_watcher_id) == -1)
5258 5258 ;
5259 5259 dp->intr_watcher_id = 0;
5260 5260 }
5261 5261 mutex_enter(&dp->intrlock);
5262 5262 (*dp->gc.gc_reset_chip)(dp);
5263 5263 dp->nic_state = NIC_STATE_STOPPED;
5264 5264 mutex_exit(&dp->intrlock);
5265 5265
5266 5266 err:
5267 5267 return (DDI_FAILURE);
5268 5268 }
5269 5269
5270 5270 /*
5271 5271 * misc routines for PCI
5272 5272 */
5273 5273 uint8_t
5274 5274 gem_search_pci_cap(dev_info_t *dip,
5275 5275 ddi_acc_handle_t conf_handle, uint8_t target)
5276 5276 {
5277 5277 uint8_t pci_cap_ptr;
5278 5278 uint32_t pci_cap;
5279 5279
5280 5280 /* search power management capablities */
5281 5281 pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5282 5282 while (pci_cap_ptr) {
5283 5283 /* read pci capability header */
5284 5284 pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5285 5285 if ((pci_cap & 0xff) == target) {
5286 5286 /* found */
5287 5287 break;
5288 5288 }
5289 5289 /* get next_ptr */
5290 5290 pci_cap_ptr = (pci_cap >> 8) & 0xff;
5291 5291 }
5292 5292 return (pci_cap_ptr);
5293 5293 }
5294 5294
5295 5295 int
5296 5296 gem_pci_set_power_state(dev_info_t *dip,
5297 5297 ddi_acc_handle_t conf_handle, uint_t new_mode)
5298 5298 {
5299 5299 uint8_t pci_cap_ptr;
5300 5300 uint32_t pmcsr;
5301 5301 uint_t unit;
5302 5302 const char *drv_name;
5303 5303
5304 5304 ASSERT(new_mode < 4);
5305 5305
5306 5306 unit = ddi_get_instance(dip);
5307 5307 drv_name = ddi_driver_name(dip);
5308 5308
5309 5309 /* search power management capablities */
5310 5310 pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5311 5311
5312 5312 if (pci_cap_ptr == 0) {
5313 5313 cmn_err(CE_CONT,
5314 5314 "!%s%d: doesn't have pci power management capability",
5315 5315 drv_name, unit);
5316 5316 return (DDI_FAILURE);
5317 5317 }
5318 5318
5319 5319 /* read power management capabilities */
5320 5320 pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5321 5321
5322 5322 DPRINTF(0, (CE_CONT,
5323 5323 "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5324 5324 drv_name, unit, pci_cap_ptr, pmcsr));
5325 5325
5326 5326 /*
5327 5327 * Is the resuested power mode supported?
5328 5328 */
5329 5329 /* not yet */
5330 5330
5331 5331 /*
5332 5332 * move to new mode
5333 5333 */
5334 5334 pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5335 5335 pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5336 5336
5337 5337 return (DDI_SUCCESS);
5338 5338 }
5339 5339
5340 5340 /*
5341 5341 * select suitable register for by specified address space or register
5342 5342 * offset in PCI config space
5343 5343 */
5344 5344 int
5345 5345 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5346 5346 struct ddi_device_acc_attr *attrp,
5347 5347 caddr_t *basep, ddi_acc_handle_t *hp)
5348 5348 {
5349 5349 struct pci_phys_spec *regs;
5350 5350 uint_t len;
5351 5351 uint_t unit;
5352 5352 uint_t n;
5353 5353 uint_t i;
5354 5354 int ret;
5355 5355 const char *drv_name;
5356 5356
5357 5357 unit = ddi_get_instance(dip);
5358 5358 drv_name = ddi_driver_name(dip);
5359 5359
5360 5360 /* Search IO-range or memory-range to be mapped */
5361 5361 regs = NULL;
5362 5362 len = 0;
5363 5363
5364 5364 if ((ret = ddi_prop_lookup_int_array(
5365 5365 DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5366 5366 "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
5367 5367 cmn_err(CE_WARN,
5368 5368 "!%s%d: failed to get reg property (ret:%d)",
5369 5369 drv_name, unit, ret);
5370 5370 return (DDI_FAILURE);
5371 5371 }
5372 5372 n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5373 5373
5374 5374 ASSERT(regs != NULL && len > 0);
5375 5375
5376 5376 #if GEM_DEBUG_LEVEL > 0
5377 5377 for (i = 0; i < n; i++) {
5378 5378 cmn_err(CE_CONT,
5379 5379 "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5380 5380 drv_name, unit, i,
5381 5381 regs[i].pci_phys_hi,
5382 5382 regs[i].pci_phys_mid,
5383 5383 regs[i].pci_phys_low,
5384 5384 regs[i].pci_size_hi,
5385 5385 regs[i].pci_size_low);
5386 5386 }
5387 5387 #endif
5388 5388 for (i = 0; i < n; i++) {
5389 5389 if ((regs[i].pci_phys_hi & mask) == which) {
5390 5390 /* it's the requested space */
5391 5391 ddi_prop_free(regs);
5392 5392 goto address_range_found;
5393 5393 }
5394 5394 }
5395 5395 ddi_prop_free(regs);
5396 5396 return (DDI_FAILURE);
5397 5397
5398 5398 address_range_found:
5399 5399 if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5400 5400 != DDI_SUCCESS) {
5401 5401 cmn_err(CE_CONT,
5402 5402 "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5403 5403 drv_name, unit, ret);
5404 5404 }
5405 5405
5406 5406 return (ret);
5407 5407 }
5408 5408
5409 5409 void
5410 5410 gem_mod_init(struct dev_ops *dop, char *name)
5411 5411 {
5412 5412 mac_init_ops(dop, name);
5413 5413 }
5414 5414
5415 5415 void
5416 5416 gem_mod_fini(struct dev_ops *dop)
5417 5417 {
5418 5418 mac_fini_ops(dop);
5419 5419 }
↓ open down ↓ |
202 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX