Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/chxge/pe.c
+++ new/usr/src/uts/common/io/chxge/pe.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
28 28 * This file is part of the Chelsio T1 Ethernet driver.
29 29 *
30 30 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
31 31 */
32 32
33 33 /*
34 34 * Solaris Multithreaded STREAMS Chelsio PCI Ethernet Driver.
35 35 * Interface code
36 36 */
37 37
38 -#pragma ident "%Z%%M% %I% %E% SMI"
39 -
40 38 #include <sys/types.h>
41 39 #include <sys/systm.h>
42 40 #include <sys/cmn_err.h>
43 41 #include <sys/ddi.h>
44 42 #include <sys/sunddi.h>
45 43 #include <sys/byteorder.h>
46 44 #include <sys/atomic.h>
47 45 #include <sys/ethernet.h>
48 46 #if PE_PROFILING_ENABLED
49 47 #include <sys/time.h>
50 48 #endif
51 49 #include <sys/gld.h>
52 50 #include "ostypes.h"
53 51 #include "common.h"
54 52 #include "oschtoe.h"
55 53 #ifdef CONFIG_CHELSIO_T1_1G
56 54 #include "fpga_defs.h"
57 55 #endif
58 56 #include "regs.h"
59 57 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
60 58 #include "mc3.h"
61 59 #include "mc4.h"
62 60 #endif
63 61 #include "sge.h"
64 62 #include "tp.h"
65 63 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
66 64 #include "ulp.h"
67 65 #endif
68 66 #include "espi.h"
69 67 #include "elmer0.h"
70 68 #include "gmac.h"
71 69 #include "cphy.h"
72 70 #include "suni1x10gexp_regs.h"
73 71 #include "ch.h"
74 72
75 73 #define MLEN(mp) ((mp)->b_wptr - (mp)->b_rptr)
76 74
77 75 extern uint32_t buffers_in_use[];
78 76 extern kmutex_t in_use_l;
79 77 extern uint32_t in_use_index;
80 78
81 79 static void link_start(ch_t *sa, struct pe_port_t *pp);
82 80 static ch_esb_t *ch_alloc_small_esbbuf(ch_t *sa, uint32_t i);
83 81 static ch_esb_t *ch_alloc_big_esbbuf(ch_t *sa, uint32_t i);
84 82 void ch_big_rbuf_recycle(ch_esb_t *rbp);
85 83 void ch_small_rbuf_recycle(ch_esb_t *rbp);
86 84 static const struct board_info *pe_sa_init(ch_t *sa);
87 85 static int ch_set_config_data(ch_t *chp);
88 86 void pe_rbuf_pool_free(ch_t *chp);
89 87 static void pe_free_driver_resources(ch_t *sa);
90 88 static void update_mtu_tab(ch_t *adapter);
91 89 static int pe_change_mtu(ch_t *chp);
92 90
93 91 /*
94 92 * CPL5 Defines (from netinet/cpl5_commands.h)
95 93 */
96 94 #define FLITSTOBYTES 8
97 95
98 96 #define CPL_FORMAT_0_SIZE 8
99 97 #define CPL_FORMAT_1_SIZE 16
100 98 #define CPL_FORMAT_2_SIZE 24
101 99 #define CPL_FORMAT_3_SIZE 32
102 100 #define CPL_FORMAT_4_SIZE 40
103 101 #define CPL_FORMAT_5_SIZE 48
104 102
105 103 #define TID_MASK 0xffffff
106 104
107 105 #define PE_LINK_SPEED_AUTONEG 5
108 106
109 107 static int pe_small_rbuf_pool_init(ch_t *sa);
110 108 static int pe_big_rbuf_pool_init(ch_t *sa);
111 109 static int pe_make_fake_arp(ch_t *chp, unsigned char *arpp);
112 110 static uint32_t pe_get_ip(unsigned char *arpp);
113 111
114 112 /*
115 113 * May be set in /etc/system to 0 to use default latency timer for 10G.
116 114 * See PCI register 0xc definition.
117 115 */
118 116 int enable_latency_timer = 1;
119 117
120 118 /*
121 119 * May be set in /etc/system to 0 to disable hardware checksum for
122 120 * TCP and UDP.
123 121 */
124 122 int enable_checksum_offload = 1;
125 123
126 124 /*
127 125 * Multiplier for freelist pool.
128 126 */
129 127 int fl_sz_multiplier = 6;
130 128
131 129 uint_t
132 130 pe_intr(ch_t *sa)
133 131 {
134 132 mutex_enter(&sa->ch_intr);
135 133
136 134 if (sge_data_in(sa->sge)) {
137 135 sa->isr_intr++;
138 136 mutex_exit(&sa->ch_intr);
139 137 return (DDI_INTR_CLAIMED);
140 138 }
141 139
142 140 mutex_exit(&sa->ch_intr);
143 141
144 142 return (DDI_INTR_UNCLAIMED);
145 143 }
146 144
147 145 /*
148 146 * Each setup struct will call this function to
149 147 * initialize.
150 148 */
151 149 void
152 150 pe_init(void* xsa)
153 151 {
154 152 ch_t *sa = NULL;
155 153 int i = 0;
156 154
157 155 sa = (ch_t *)xsa;
158 156
159 157 /*
160 158 * Need to count the number of times this routine is called
161 159 * because we only want the resources to be allocated once.
162 160 * The 7500 has four ports and so this routine can be called
163 161 * once for each port.
164 162 */
165 163 if (sa->init_counter == 0) {
166 164 for_each_port(sa, i) {
167 165
168 166 /*
169 167 * We only want to initialize the line if it is down.
170 168 */
171 169 if (sa->port[i].line_up == 0) {
172 170 link_start(sa, &sa->port[i]);
173 171 sa->port[i].line_up = 1;
174 172 }
175 173 }
176 174
177 175 (void) t1_init_hw_modules(sa);
178 176
179 177 /*
180 178 * Enable/Disable checksum offloading.
181 179 */
182 180 if (sa->ch_config.cksum_enabled) {
183 181 if (sa->config_data.offload_ip_cksum) {
184 182 /* Notify that HW will do the checksum. */
185 183 t1_tp_set_ip_checksum_offload(sa->tp, 1);
186 184 }
187 185
188 186 if (sa->config_data.offload_tcp_cksum) {
189 187 /* Notify that HW will do the checksum. */
190 188 t1_tp_set_tcp_checksum_offload(sa->tp, 1);
191 189 }
192 190
193 191 if (sa->config_data.offload_udp_cksum) {
194 192 /* Notify that HW will do the checksum. */
195 193 t1_tp_set_udp_checksum_offload(sa->tp, 1);
196 194 }
197 195 }
198 196
199 197 sa->ch_flags |= PEINITDONE;
200 198
201 199 sa->init_counter++;
202 200 }
203 201
204 202 /*
205 203 * Enable interrupts after starting the SGE so
206 204 * that the SGE is ready to handle interrupts.
207 205 */
208 206 (void) sge_start(sa->sge);
209 207 t1_interrupts_enable(sa);
210 208
211 209 /*
212 210 * set mtu (either 1500 or bigger)
213 211 */
214 212 (void) pe_change_mtu(sa);
215 213 #ifdef HOST_PAUSE
216 214 /*
217 215 * get the configured value of the MAC.
218 216 */
219 217 (void) t1_tpi_read(sa, SUNI1x10GEXP_REG_TXXG_CONFIG_1 << 2,
220 218 &sa->txxg_cfg1);
221 219 #endif
222 220 }
223 221
224 222 /* ARGSUSED */
225 223 static void
226 224 link_start(ch_t *sa, struct pe_port_t *p)
227 225 {
228 226 struct cmac *mac = p->mac;
229 227
230 228 mac->ops->reset(mac);
231 229 if (mac->ops->macaddress_set)
232 230 mac->ops->macaddress_set(mac, p->enaddr);
233 231 (void) t1_link_start(p->phy, mac, &p->link_config);
234 232 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
235 233 }
236 234
237 235 /*
238 236 * turn off interrupts...
239 237 */
240 238 void
241 239 pe_stop(ch_t *sa)
242 240 {
243 241 t1_interrupts_disable(sa);
244 242 (void) sge_stop(sa->sge);
245 243
246 244 /*
247 245 * we can still be running an interrupt thread in sge_data_in().
248 246 * If we are, we'll block on the ch_intr lock
249 247 */
250 248 mutex_enter(&sa->ch_intr);
251 249 mutex_exit(&sa->ch_intr);
252 250 }
253 251
254 252 /*
255 253 * output mblk to SGE level and out to the wire.
256 254 */
257 255
258 256 int
259 257 pe_start(ch_t *sa, mblk_t *mp, uint32_t flg)
260 258 {
261 259 mblk_t *m0 = mp;
262 260 cmdQ_ce_t cm[16];
263 261 cmdQ_ce_t *cmp;
264 262 cmdQ_ce_t *hmp = &cm[0]; /* head of cm table (may be kmem_alloed) */
265 263 int cm_flg = 0; /* flag (1 - if kmem-alloced) */
266 264 int nseg = 0; /* number cmdQ_ce entries created */
267 265 int mseg = 16; /* maximum entries in hmp arrary */
268 266 int freeme = 0; /* we have an mblk to free in case of error */
269 267 uint32_t ch_bind_dma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
270 268 uint32_t);
271 269 #if defined(__sparc)
272 270 uint32_t ch_bind_dvma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
273 271 uint32_t);
274 272 #endif
275 273 int rv; /* return value on error */
276 274
277 275 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
278 276 if (flg & CH_OFFLOAD) {
279 277 hmp->ce_pa = ((tbuf_t *)mp)->tb_pa;
280 278 hmp->ce_dh = NULL;
281 279 hmp->ce_flg = DH_TOE;
282 280 hmp->ce_len = ((tbuf_t *)mp)->tb_len;
283 281 hmp->ce_mp = mp;
284 282
285 283 /* make sure data is flushed to physical memory */
286 284 (void) ddi_dma_sync((ddi_dma_handle_t)((tbuf_t *)mp)->tb_dh,
287 285 (off_t)0, hmp->ce_len, DDI_DMA_SYNC_FORDEV);
288 286
289 287 if (sge_data_out(sa->sge, 0, mp, hmp, 1, flg) == 0) {
290 288 return (0);
291 289 }
292 290
293 291 /*
294 292 * set a flag so we'll restart upper layer when
295 293 * resources become available.
296 294 */
297 295 sa->ch_blked = 1;
298 296 return (1);
299 297 }
300 298 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
301 299
302 300 /* writes from toe will always have CPL header in place */
303 301 if (flg & CH_NO_CPL) {
304 302 struct cpl_tx_pkt *cpl;
305 303
306 304 /* PR2928 & PR3309 */
307 305 if (sa->ch_ip == NULL) {
308 306 ushort_t ethertype = ntohs(*(short *)&mp->b_rptr[12]);
309 307 if (ethertype == ETHERTYPE_ARP) {
310 308 if (is_T2(sa)) {
311 309 /*
312 310 * We assume here that the arp will be
313 311 * contained in one mblk.
314 312 */
315 313 if (pe_make_fake_arp(sa, mp->b_rptr)) {
316 314 freemsg(mp);
317 315 sa->oerr++;
318 316 return (0);
319 317 }
320 318 } else {
321 319 sa->ch_ip = pe_get_ip(mp->b_rptr);
322 320 }
323 321 }
324 322 }
325 323
326 324 /*
327 325 * if space in front of packet big enough for CPL
328 326 * header, then use it. We'll allocate an mblk
329 327 * otherwise.
330 328 */
331 329 if ((mp->b_rptr - mp->b_datap->db_base) >= SZ_CPL_TX_PKT) {
332 330
333 331 mp->b_rptr -= SZ_CPL_TX_PKT;
334 332
335 333 } else {
336 334
337 335 #ifdef SUN_KSTATS
338 336 sa->sge->intr_cnt.tx_need_cpl_space++;
339 337 #endif
340 338 m0 = allocb(SZ_CPL_TX_PKT, BPRI_HI);
341 339 if (m0 == NULL) {
342 340 freemsg(mp);
343 341 sa->oerr++;
344 342 return (0);
345 343 }
346 344
347 345 m0->b_wptr = m0->b_rptr + SZ_CPL_TX_PKT;
348 346 m0->b_cont = mp;
349 347 freeme = 1;
350 348
351 349 mp = m0;
352 350 }
353 351
354 352 /* fill in cpl header */
355 353 cpl = (struct cpl_tx_pkt *)mp->b_rptr;
356 354 cpl->opcode = CPL_TX_PKT;
357 355 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
358 356 cpl->ip_csum_dis = 1; /* no IP header cksum */
359 357 cpl->l4_csum_dis =
360 358 flg & CH_NO_HWCKSUM; /* CH_NO_HWCKSUM == 1 */
361 359 cpl->vlan_valid = 0; /* no vlan */
362 360 }
363 361
364 362 if (m0->b_cont) {
365 363
366 364 #ifdef SUN_KSTATS
367 365 sa->sge->intr_cnt.tx_multi_mblks++;
368 366 #endif
369 367
370 368 while (mp) {
371 369 int lseg; /* added by ch_bind_dma_handle() */
372 370 int len;
373 371
374 372 len = MLEN(mp);
375 373 /* skip mlks with no data */
376 374 if (len == 0) {
377 375 mp = mp->b_cont;
378 376 continue;
379 377 }
380 378
381 379 /*
382 380 * if we've run out of space on stack, then we
383 381 * allocate a temporary buffer to hold the
384 382 * information. This will kill the the performance,
385 383 * but since it shouldn't really occur, we can live
386 384 * with it. Since jumbo frames may map multiple
387 385 * descriptors, we reallocate the hmp[] array before
388 386 * we reach the end.
389 387 */
390 388 if (nseg >= (mseg-4)) {
391 389 cmdQ_ce_t *buf;
392 390 int j;
393 391
394 392 buf = kmem_alloc(sizeof (cmdQ_ce_t) * 2 * mseg,
395 393 KM_SLEEP);
396 394
397 395 for (j = 0; j < nseg; j++)
398 396 buf[j] = hmp[j];
399 397
400 398 if (cm_flg) {
401 399 kmem_free(hmp,
402 400 mseg * sizeof (cmdQ_ce_t));
403 401 } else
404 402 cm_flg = 1;
405 403
406 404 hmp = buf;
407 405 mseg = 2*mseg;
408 406
409 407 /*
410 408 * We've used up ch table on stack
411 409 */
412 410 }
413 411
414 412 #if defined(__sparc)
415 413 if (sa->ch_config.enable_dvma) {
416 414 lseg = ch_bind_dvma_handle(sa, len,
417 415 (void *)mp->b_rptr,
418 416 &hmp[nseg], mseg - nseg);
419 417 if (lseg == NULL) {
420 418 sa->sge->intr_cnt.tx_no_dvma1++;
421 419 if ((lseg = ch_bind_dma_handle(sa, len,
422 420 (void *)mp->b_rptr,
423 421 &hmp[nseg],
424 422 mseg - nseg)) == NULL) {
425 423 sa->sge->intr_cnt.tx_no_dma1++;
426 424
427 425 /*
428 426 * ran out of space. Gonna bale
429 427 */
430 428 rv = 0;
431 429
432 430 /*
433 431 * we may have processed
434 432 * previous mblks and have
435 433 * descriptors. If so, we need
436 434 * to free the meta struct
437 435 * entries before freeing
438 436 * the mblk.
439 437 */
440 438 if (nseg)
441 439 goto error;
442 440 goto error1;
443 441 }
444 442 }
445 443 } else {
446 444 lseg = ch_bind_dma_handle(sa, len,
447 445 (void *)mp->b_rptr, &hmp[nseg],
448 446 mseg - nseg);
449 447 if (lseg == NULL) {
450 448 sa->sge->intr_cnt.tx_no_dma1++;
451 449
452 450 /*
453 451 * ran out of space. Gona bale
454 452 */
455 453 rv = 0;
456 454
457 455 /*
458 456 * we may have processed previous
459 457 * mblks and have descriptors. If so,
460 458 * we need to free the meta struct
461 459 * entries before freeing the mblk.
462 460 */
463 461 if (nseg)
464 462 goto error;
465 463 goto error1;
466 464 }
467 465 }
468 466 #else /* defined(__sparc) */
469 467 lseg = ch_bind_dma_handle(sa, len,
470 468 (void *)mp->b_rptr, &hmp[nseg],
471 469 mseg - nseg);
472 470 if (lseg == NULL) {
473 471 sa->sge->intr_cnt.tx_no_dma1++;
474 472
475 473 /*
476 474 * ran out of space. Gona bale
477 475 */
478 476 rv = 0;
479 477
480 478 /*
481 479 * we may have processed previous mblks and
482 480 * have descriptors. If so, we need to free
483 481 * the meta struct entries before freeing
484 482 * the mblk.
485 483 */
486 484 if (nseg)
487 485 goto error;
488 486 goto error1;
489 487 }
490 488 #endif /* defined(__sparc) */
491 489 nseg += lseg;
492 490 mp = mp->b_cont;
493 491 }
494 492
495 493 /*
496 494 * SHOULD NEVER OCCUR, BUT...
497 495 * no data if nseg 0 or
498 496 * nseg 1 and a CPL mblk (CPL mblk only with offload mode)
499 497 * and no data
500 498 */
501 499 if ((nseg == 0) || (freeme && (nseg == 1))) {
502 500 rv = 0;
503 501 goto error1;
504 502 }
505 503
506 504 } else {
507 505 int len;
508 506
509 507 /* we assume that we always have data with one packet */
510 508 len = MLEN(mp);
511 509
512 510 #if defined(__sparc)
513 511 if (sa->ch_config.enable_dvma) {
514 512 nseg = ch_bind_dvma_handle(sa, len,
515 513 (void *)mp->b_rptr,
516 514 &hmp[0], 16);
517 515 if (nseg == NULL) {
518 516 sa->sge->intr_cnt.tx_no_dvma2++;
519 517 nseg = ch_bind_dma_handle(sa, len,
520 518 (void *)mp->b_rptr,
521 519 &hmp[0], 16);
522 520 if (nseg == NULL) {
523 521 sa->sge->intr_cnt.tx_no_dma2++;
524 522
525 523 /*
526 524 * ran out of space. Gona bale
527 525 */
528 526 rv = 0;
529 527 goto error1;
530 528 }
531 529 }
532 530 } else {
533 531 nseg = ch_bind_dma_handle(sa, len,
534 532 (void *)mp->b_rptr, &hmp[0], 16);
535 533 if (nseg == NULL) {
536 534 sa->sge->intr_cnt.tx_no_dma2++;
537 535
538 536 /*
539 537 * ran out of space. Gona bale
540 538 */
541 539 rv = 0;
542 540 goto error1;
543 541 }
544 542 }
545 543 #else /* defined(__sparc) */
546 544 nseg = ch_bind_dma_handle(sa, len,
547 545 (void *)mp->b_rptr, &hmp[0], 16);
548 546 if (nseg == NULL) {
549 547 sa->sge->intr_cnt.tx_no_dma2++;
550 548
551 549 /*
552 550 * ran out of space. Gona bale
553 551 */
554 552 rv = 0;
555 553 goto error1;
556 554 }
557 555 #endif /* defined(__sparc) */
558 556
559 557 /*
560 558 * dummy arp message to handle PR3309 & PR2928
561 559 */
562 560 if (flg & CH_ARP)
563 561 hmp->ce_flg |= DH_ARP;
564 562 }
565 563
566 564 if (sge_data_out(sa->sge, 0, m0, hmp, nseg, flg) == 0) {
567 565 if (cm_flg)
568 566 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
569 567 return (0);
570 568 }
571 569
572 570 /*
573 571 * set a flag so we'll restart upper layer when
574 572 * resources become available.
575 573 */
576 574 if ((flg & CH_ARP) == 0)
577 575 sa->ch_blked = 1;
578 576 rv = 1;
579 577
580 578 error:
581 579 /*
582 580 * unmap the physical addresses allocated earlier.
583 581 */
584 582 cmp = hmp;
585 583 for (--nseg; nseg >= 0; nseg--) {
586 584 if (cmp->ce_dh) {
587 585 if (cmp->ce_flg == DH_DMA)
588 586 ch_unbind_dma_handle(sa, cmp->ce_dh);
589 587 #if defined(__sparc)
590 588 else
591 589 ch_unbind_dvma_handle(sa, cmp->ce_dh);
592 590 #endif
593 591 }
594 592 cmp++;
595 593 }
596 594
597 595 error1:
598 596
599 597 /* free the temporary array */
600 598 if (cm_flg)
601 599 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
602 600
603 601 /*
604 602 * if we've allocated an mblk above, then we need to free it
605 603 * before returning. This is safe since we haven't done anything to
606 604 * the original message. The caller, gld, will still have a pointer
607 605 * to the original mblk.
608 606 */
609 607 if (rv == 1) {
610 608 if (freeme) {
611 609 /* we had to allocate an mblk. Free it. */
612 610 freeb(m0);
613 611 } else {
614 612 /* adjust the mblk back to original start */
615 613 if (flg & CH_NO_CPL)
616 614 m0->b_rptr += SZ_CPL_TX_PKT;
617 615 }
618 616 } else {
619 617 freemsg(m0);
620 618 sa->oerr++;
621 619 }
622 620
623 621 return (rv);
624 622 }
625 623
626 624 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
627 625 void
628 626 pe_set_mac(ch_t *sa, unsigned char *ac_enaddr)
629 627 {
630 628 sa->port[0].mac->ops->macaddress_set(sa->port[0].mac, ac_enaddr);
631 629 }
632 630
633 631 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
634 632 unsigned char *
635 633 pe_get_mac(ch_t *sa)
636 634 {
637 635 return (sa->port[0].enaddr);
638 636 }
639 637
640 638 /* KLUDGE ALERT. HARD WIRED TO ONE PORT */
641 639 void
642 640 pe_set_promiscuous(ch_t *sa, int flag)
643 641 {
644 642 struct cmac *mac = sa->port[0].mac;
645 643 struct t1_rx_mode rm;
646 644
647 645 switch (flag) {
648 646 case 0: /* turn off promiscuous mode */
649 647 sa->ch_flags &= ~(PEPROMISC|PEALLMULTI);
650 648 break;
651 649
652 650 case 1: /* turn on promiscuous mode */
653 651 sa->ch_flags |= PEPROMISC;
654 652 break;
655 653
656 654 case 2: /* turn on multicast reception */
657 655 sa->ch_flags |= PEALLMULTI;
658 656 break;
659 657 }
660 658
661 659 mutex_enter(&sa->ch_mc_lck);
662 660 rm.chp = sa;
663 661 rm.mc = sa->ch_mc;
664 662
665 663 mac->ops->set_rx_mode(mac, &rm);
666 664 mutex_exit(&sa->ch_mc_lck);
667 665 }
668 666
669 667 int
670 668 pe_set_mc(ch_t *sa, uint8_t *ep, int flg)
671 669 {
672 670 struct cmac *mac = sa->port[0].mac;
673 671 struct t1_rx_mode rm;
674 672
675 673 if (flg == GLD_MULTI_ENABLE) {
676 674 ch_mc_t *mcp;
677 675
678 676 mcp = (ch_mc_t *)kmem_zalloc(sizeof (struct ch_mc),
679 677 KM_NOSLEEP);
680 678 if (mcp == NULL)
681 679 return (GLD_NORESOURCES);
682 680
683 681 bcopy(ep, &mcp->cmc_mca, 6);
684 682
685 683 mutex_enter(&sa->ch_mc_lck);
686 684 mcp->cmc_next = sa->ch_mc;
687 685 sa->ch_mc = mcp;
688 686 sa->ch_mc_cnt++;
689 687 mutex_exit(&sa->ch_mc_lck);
690 688
691 689 } else if (flg == GLD_MULTI_DISABLE) {
692 690 ch_mc_t **p = &sa->ch_mc;
693 691 ch_mc_t *q = NULL;
694 692
695 693 mutex_enter(&sa->ch_mc_lck);
696 694 p = &sa->ch_mc;
697 695 while (*p) {
698 696 if (bcmp(ep, (*p)->cmc_mca, 6) == 0) {
699 697 q = *p;
700 698 *p = (*p)->cmc_next;
701 699 kmem_free(q, sizeof (*q));
702 700 sa->ch_mc_cnt--;
703 701 break;
704 702 }
705 703
706 704 p = &(*p)->cmc_next;
707 705 }
708 706 mutex_exit(&sa->ch_mc_lck);
709 707
710 708 if (q == NULL)
711 709 return (GLD_BADARG);
712 710 } else
713 711 return (GLD_BADARG);
714 712
715 713 mutex_enter(&sa->ch_mc_lck);
716 714 rm.chp = sa;
717 715 rm.mc = sa->ch_mc;
718 716
719 717 mac->ops->set_rx_mode(mac, &rm);
720 718 mutex_exit(&sa->ch_mc_lck);
721 719
722 720 return (GLD_SUCCESS);
723 721 }
724 722
725 723 /*
726 724 * return: speed - bandwidth of interface
727 725 * return: intrcnt - # interrupts
728 726 * return: norcvbuf - # recedived packets dropped by driver
729 727 * return: oerrors - # bad send packets
730 728 * return: ierrors - # bad receive packets
731 729 * return: underrun - # bad underrun xmit packets
732 730 * return: overrun - # bad overrun recv packets
733 731 * return: framing - # bad aligned recv packets
734 732 * return: crc - # bad FCS (crc) recv packets
735 733 * return: carrier - times carrier was lost
736 734 * return: collisions - # xmit collisions
737 735 * return: xcollisions - # xmit pkts dropped due to collisions
738 736 * return: late - # late xmit collisions
739 737 * return: defer - # deferred xmit packets
740 738 * return: xerrs - # xmit dropped packets
741 739 * return: rerrs - # recv dropped packets
742 740 * return: toolong - # recv pkts too long
743 741 * return: runt - # recv runt pkts
744 742 * return: multixmt - # multicast pkts xmitted
745 743 * return: multircv - # multicast pkts recved
746 744 * return: brdcstxmt - # broadcast pkts xmitted
747 745 * return: brdcstrcv - # broadcast pkts rcv
748 746 */
749 747
750 748 int
751 749 pe_get_stats(ch_t *sa, uint64_t *speed, uint32_t *intrcnt, uint32_t *norcvbuf,
752 750 uint32_t *oerrors, uint32_t *ierrors, uint32_t *underrun,
753 751 uint32_t *overrun, uint32_t *framing, uint32_t *crc,
754 752 uint32_t *carrier, uint32_t *collisions, uint32_t *xcollisions,
755 753 uint32_t *late, uint32_t *defer, uint32_t *xerrs, uint32_t *rerrs,
756 754 uint32_t *toolong, uint32_t *runt, ulong_t *multixmt, ulong_t *multircv,
757 755 ulong_t *brdcstxmt, ulong_t *brdcstrcv)
758 756 {
759 757 struct pe_port_t *pt;
760 758 int line_speed;
761 759 int line_duplex;
762 760 int line_is_active;
763 761 uint64_t v;
764 762 const struct cmac_statistics *sp;
765 763
766 764 pt = &(sa->port[0]);
767 765 (void) pt->phy->ops->get_link_status(pt->phy,
768 766 &line_is_active, &line_speed, &line_duplex, NULL);
769 767
770 768 switch (line_speed) {
771 769 case SPEED_10:
772 770 *speed = 10000000;
773 771 break;
774 772 case SPEED_100:
775 773 *speed = 100000000;
776 774 break;
777 775 case SPEED_1000:
778 776 *speed = 1000000000;
779 777 break;
780 778 case SPEED_10000:
781 779 /*
782 780 * kludge to get 10,000,000,000 constant (and keep
783 781 * compiler happy).
784 782 */
785 783 v = 10000000;
786 784 v *= 1000;
787 785 *speed = v;
788 786 break;
789 787 default:
790 788 goto error;
791 789 }
792 790
793 791 *intrcnt = sa->isr_intr;
794 792 *norcvbuf = sa->norcvbuf;
795 793
796 794 sp = sa->port[0].mac->ops->statistics_update(sa->port[0].mac,
797 795 MAC_STATS_UPDATE_FULL);
798 796
799 797 *ierrors = sp->RxOctetsBad;
800 798
801 799 /*
802 800 * not sure this is correct. # aborted at driver level +
803 801 * # at hardware level
804 802 */
805 803 *oerrors = sa->oerr + sp->TxFramesAbortedDueToXSCollisions +
806 804 sp->TxUnderrun + sp->TxLengthErrors +
807 805 sp->TxInternalMACXmitError +
808 806 sp->TxFramesWithExcessiveDeferral +
809 807 sp->TxFCSErrors;
810 808
811 809 *underrun = sp->TxUnderrun;
812 810 *overrun = sp->RxFrameTooLongErrors;
813 811 *framing = sp->RxAlignErrors;
814 812 *crc = sp->RxFCSErrors;
815 813 *carrier = 0; /* need to find this */
816 814 *collisions = sp->TxTotalCollisions;
817 815 *xcollisions = sp->TxFramesAbortedDueToXSCollisions;
818 816 *late = sp->TxLateCollisions;
819 817 *defer = sp->TxFramesWithDeferredXmissions;
820 818 *xerrs = sp->TxUnderrun + sp->TxLengthErrors +
821 819 sp->TxInternalMACXmitError + sp->TxFCSErrors;
822 820 *rerrs = sp->RxSymbolErrors + sp->RxSequenceErrors + sp->RxRuntErrors +
823 821 sp->RxJabberErrors + sp->RxInternalMACRcvError +
824 822 sp->RxInRangeLengthErrors + sp->RxOutOfRangeLengthField;
825 823 *toolong = sp->RxFrameTooLongErrors;
826 824 *runt = sp->RxRuntErrors;
827 825
828 826 *multixmt = sp->TxMulticastFramesOK;
829 827 *multircv = sp->RxMulticastFramesOK;
830 828 *brdcstxmt = sp->TxBroadcastFramesOK;
831 829 *brdcstrcv = sp->RxBroadcastFramesOK;
832 830
833 831 return (0);
834 832
835 833 error:
836 834 *speed = 0;
837 835 *intrcnt = 0;
838 836 *norcvbuf = 0;
839 837 *norcvbuf = 0;
840 838 *oerrors = 0;
841 839 *ierrors = 0;
842 840 *underrun = 0;
843 841 *overrun = 0;
844 842 *framing = 0;
845 843 *crc = 0;
846 844 *carrier = 0;
847 845 *collisions = 0;
848 846 *xcollisions = 0;
849 847 *late = 0;
850 848 *defer = 0;
851 849 *xerrs = 0;
852 850 *rerrs = 0;
853 851 *toolong = 0;
854 852 *runt = 0;
855 853 *multixmt = 0;
856 854 *multircv = 0;
857 855 *brdcstxmt = 0;
858 856 *brdcstrcv = 0;
859 857
860 858 return (1);
861 859 }
862 860
863 861 uint32_t ch_gtm = 0; /* Default: Global Tunnel Mode off */
864 862 uint32_t ch_global_config = 0x07000000; /* Default: errors, warnings, status */
865 863 uint32_t ch_is_asic = 0; /* Default: non-ASIC */
866 864 uint32_t ch_link_speed = PE_LINK_SPEED_AUTONEG; /* Default: auto-negoiate */
867 865 uint32_t ch_num_of_ports = 1; /* Default: 1 port */
868 866 uint32_t ch_tp_reset_cm = 1; /* Default: reset CM memory map */
869 867 uint32_t ch_phy_tx_fifo = 0; /* Default: 0 phy tx fifo depth */
870 868 uint32_t ch_phy_rx_fifo = 0; /* Default: 0 phy rx fifo depth */
871 869 uint32_t ch_phy_force_master = 1; /* Default: link always master mode */
872 870 uint32_t ch_mc5_rtbl_size = 2048; /* Default: TCAM routing table size */
873 871 uint32_t ch_mc5_dbsvr_size = 128; /* Default: TCAM server size */
874 872 uint32_t ch_mc5_parity = 1; /* Default: parity error checking */
875 873 uint32_t ch_mc5_issue_syn = 0; /* Default: Allow transaction overlap */
876 874 uint32_t ch_packet_tracing = 0; /* Default: no packet tracing */
877 875 uint32_t ch_server_region_len =
878 876 DEFAULT_SERVER_REGION_LEN;
879 877 uint32_t ch_rt_region_len =
880 878 DEFAULT_RT_REGION_LEN;
881 879 uint32_t ch_offload_ip_cksum = 0; /* Default: no checksum offloading */
882 880 uint32_t ch_offload_udp_cksum = 1; /* Default: offload UDP ckecksum */
883 881 uint32_t ch_offload_tcp_cksum = 1; /* Default: offload TCP checksum */
884 882 uint32_t ch_sge_cmdq_threshold = 0; /* Default: threshold 0 */
885 883 uint32_t ch_sge_flq_threshold = 0; /* Default: SGE flq threshold */
886 884 uint32_t ch_sge_cmdq0_cnt = /* Default: cmd queue 0 size */
887 885 SGE_CMDQ0_CNT;
888 886 uint32_t ch_sge_cmdq1_cnt = /* Default: cmd queue 1 size */
889 887 SGE_CMDQ0_CNT;
890 888 uint32_t ch_sge_flq0_cnt = /* Default: free list queue-0 length */
891 889 SGE_FLQ0_CNT;
892 890 uint32_t ch_sge_flq1_cnt = /* Default: free list queue-1 length */
893 891 SGE_FLQ0_CNT;
894 892 uint32_t ch_sge_respq_cnt = /* Default: reqsponse queue size */
895 893 SGE_RESPQ_CNT;
896 894 uint32_t ch_stats = 1; /* Default: Automatic Update MAC stats */
897 895 uint32_t ch_tx_delay_us = 0; /* Default: No Msec delay to Tx pkts */
898 896 int32_t ch_chip = -1; /* Default: use hardware lookup tbl */
899 897 uint32_t ch_exit_early = 0; /* Default: complete initialization */
900 898 uint32_t ch_rb_num_of_entries = 1000; /* Default: number ring buffer entries */
901 899 uint32_t ch_rb_size_of_entries = 64; /* Default: ring buffer entry size */
902 900 uint32_t ch_rb_flag = 1; /* Default: ring buffer flag */
903 901 uint32_t ch_type;
904 902 uint64_t ch_cat_opt0 = 0;
905 903 uint64_t ch_cat_opt1 = 0;
906 904 uint32_t ch_timer_delay = 0; /* Default: use value from board entry */
907 905
908 906 int
909 907 pe_attach(ch_t *chp)
910 908 {
911 909 int return_val = 1;
912 910 const struct board_info *bi;
913 911 uint32_t pcix_cmd;
914 912
915 913 (void) ch_set_config_data(chp);
916 914
917 915 bi = pe_sa_init(chp);
918 916 if (bi == 0)
919 917 return (1);
920 918
921 919 if (t1_init_sw_modules(chp, bi) < 0)
922 920 return (1);
923 921
924 922 if (pe_small_rbuf_pool_init(chp) == NULL)
925 923 return (1);
926 924
927 925 if (pe_big_rbuf_pool_init(chp) == NULL)
928 926 return (1);
929 927
930 928 /*
931 929 * We gain significaint performance improvements when we
932 930 * increase the PCI's maximum memory read byte count to
933 931 * 2K(HW doesn't support 4K at this time) and set the PCI's
934 932 * maximum outstanding split transactions to 4. We want to do
935 933 * this for 10G. Done by software utility.
936 934 */
937 935
938 936 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
939 937 (void) t1_os_pci_read_config_4(chp, A_PCICFG_PCIX_CMD,
940 938 &pcix_cmd);
941 939 /*
942 940 * if the burstsize is set, then use it instead of default
943 941 */
944 942 if (chp->ch_config.burstsize_set) {
945 943 pcix_cmd &= ~0xc0000;
946 944 pcix_cmd |= (chp->ch_config.burstsize << 18);
947 945 }
948 946 /*
949 947 * if the split transaction count is set, then use it.
950 948 */
951 949 if (chp->ch_config.transaction_cnt_set) {
952 950 pcix_cmd &= ~ 0x700000;
953 951 pcix_cmd |= (chp->ch_config.transaction_cnt << 20);
954 952 }
955 953
956 954 /*
957 955 * set ralaxed ordering flag as configured in chxge.conf
958 956 */
959 957 pcix_cmd |= (chp->ch_config.relaxed_ordering << 17);
960 958
961 959 (void) t1_os_pci_write_config_4(chp, A_PCICFG_PCIX_CMD,
962 960 pcix_cmd);
963 961 }
964 962
965 963 /*
966 964 * set the latency time to F8 for 10G cards.
967 965 * Done by software utiltiy.
968 966 */
969 967 if (enable_latency_timer) {
970 968 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
971 969 (void) t1_os_pci_write_config_4(chp, 0xc, 0xf800);
972 970 }
973 971 }
974 972
975 973 /*
976 974 * update mtu table (regs: 0x404 - 0x420) with bigger values than
977 975 * default.
978 976 */
979 977 update_mtu_tab(chp);
980 978
981 979 /*
982 980 * Clear all interrupts now. Don't enable
983 981 * them until later.
984 982 */
985 983 t1_interrupts_clear(chp);
986 984
987 985 /*
988 986 * Function succeeded.
989 987 */
990 988 return_val = 0;
991 989
992 990 return (return_val);
993 991 }
994 992
995 993 /*
996 994 * DESC: Read variables set in /boot/loader.conf and save
997 995 * them internally. These internal values are then
998 996 * used to make decisions at run-time on behavior thus
999 997 * allowing a certain level of customization.
1000 998 * OUT: p_config - pointer to config structure that
1001 999 * contains all of the new values.
1002 1000 * RTN: 0 - Success;
1003 1001 */
1004 1002 static int
1005 1003 ch_set_config_data(ch_t *chp)
1006 1004 {
1007 1005 pe_config_data_t *p_config = (pe_config_data_t *)&chp->config_data;
1008 1006
1009 1007 bzero(p_config, sizeof (pe_config_data_t));
1010 1008
1011 1009 /*
1012 1010 * Global Tunnel Mode configuration
1013 1011 */
1014 1012 p_config->gtm = ch_gtm;
1015 1013
1016 1014 p_config->global_config = ch_global_config;
1017 1015
1018 1016 if (p_config->gtm)
1019 1017 p_config->global_config |= CFGMD_TUNNEL;
1020 1018
1021 1019 p_config->tp_reset_cm = ch_tp_reset_cm;
1022 1020 p_config->is_asic = ch_is_asic;
1023 1021
1024 1022 /*
1025 1023 * MC5 configuration.
1026 1024 */
1027 1025 p_config->mc5_rtbl_size = ch_mc5_rtbl_size;
1028 1026 p_config->mc5_dbsvr_size = ch_mc5_dbsvr_size;
1029 1027 p_config->mc5_parity = ch_mc5_parity;
1030 1028 p_config->mc5_issue_syn = ch_mc5_issue_syn;
1031 1029
1032 1030 p_config->offload_ip_cksum = ch_offload_ip_cksum;
1033 1031 p_config->offload_udp_cksum = ch_offload_udp_cksum;
1034 1032 p_config->offload_tcp_cksum = ch_offload_tcp_cksum;
1035 1033
1036 1034 p_config->packet_tracing = ch_packet_tracing;
1037 1035
1038 1036 p_config->server_region_len = ch_server_region_len;
1039 1037 p_config->rt_region_len = ch_rt_region_len;
1040 1038
1041 1039 /*
1042 1040 * Link configuration.
1043 1041 *
1044 1042 * 5-auto-neg 2-1000Gbps; 1-100Gbps; 0-10Gbps
1045 1043 */
1046 1044 p_config->link_speed = ch_link_speed;
1047 1045 p_config->num_of_ports = ch_num_of_ports;
1048 1046
1049 1047 /*
1050 1048 * Catp options
1051 1049 */
1052 1050 p_config->cat_opt0 = ch_cat_opt0;
1053 1051 p_config->cat_opt1 = ch_cat_opt1;
1054 1052
1055 1053 /*
1056 1054 * SGE configuration.
1057 1055 */
1058 1056 p_config->sge_cmdq0_cnt = ch_sge_cmdq0_cnt;
1059 1057 p_config->sge_cmdq1_cnt = ch_sge_cmdq1_cnt;
1060 1058 p_config->sge_flq0_cnt = ch_sge_flq0_cnt;
1061 1059 p_config->sge_flq1_cnt = ch_sge_flq1_cnt;
1062 1060 p_config->sge_respq_cnt = ch_sge_respq_cnt;
1063 1061
1064 1062 p_config->phy_rx_fifo = ch_phy_rx_fifo;
1065 1063 p_config->phy_tx_fifo = ch_phy_tx_fifo;
1066 1064
1067 1065 p_config->sge_cmdq_threshold = ch_sge_cmdq_threshold;
1068 1066
1069 1067 p_config->sge_flq_threshold = ch_sge_flq_threshold;
1070 1068
1071 1069 p_config->phy_force_master = ch_phy_force_master;
1072 1070
1073 1071 p_config->rb_num_of_entries = ch_rb_num_of_entries;
1074 1072
1075 1073 p_config->rb_size_of_entries = ch_rb_size_of_entries;
1076 1074
1077 1075 p_config->rb_flag = ch_rb_flag;
1078 1076
1079 1077 p_config->exit_early = ch_exit_early;
1080 1078
1081 1079 p_config->chip = ch_chip;
1082 1080
1083 1081 p_config->stats = ch_stats;
1084 1082
1085 1083 p_config->tx_delay_us = ch_tx_delay_us;
1086 1084
1087 1085 return (0);
1088 1086 }
1089 1087
1090 1088 static const struct board_info *
1091 1089 pe_sa_init(ch_t *sa)
1092 1090 {
1093 1091 uint16_t device_id;
1094 1092 uint16_t device_subid;
1095 1093 const struct board_info *bi;
1096 1094
1097 1095 sa->config = sa->config_data.global_config;
1098 1096 device_id = pci_config_get16(sa->ch_hpci, 2);
1099 1097 device_subid = pci_config_get16(sa->ch_hpci, 0x2e);
1100 1098
1101 1099 bi = t1_get_board_info_from_ids(device_id, device_subid);
1102 1100 if (bi == NULL) {
1103 1101 cmn_err(CE_NOTE,
1104 1102 "The adapter with device_id %d %d is not supported.\n",
1105 1103 device_id, device_subid);
1106 1104 return (NULL);
1107 1105 }
1108 1106
1109 1107 if (t1_get_board_rev(sa, bi, &sa->params)) {
1110 1108 cmn_err(CE_NOTE, "unknown device_id %d %d\n",
1111 1109 device_id, device_subid);
1112 1110 return ((const struct board_info *)NULL);
1113 1111 }
1114 1112
1115 1113 return (bi);
1116 1114 }
1117 1115
1118 1116 /*
1119 1117 * allocate pool of small receive buffers (with vaddr & paddr) and
1120 1118 * receiver buffer control structure (ch_esb_t *rbp).
1121 1119 * XXX we should allow better tuning of the # of preallocated
1122 1120 * free buffers against the # of freelist entries.
1123 1121 */
1124 1122 static int
1125 1123 pe_small_rbuf_pool_init(ch_t *sa)
1126 1124 {
1127 1125 int i;
1128 1126 ch_esb_t *rbp;
1129 1127 extern uint32_t sge_flq0_cnt;
1130 1128 extern uint32_t sge_flq1_cnt;
1131 1129 int size;
1132 1130 uint32_t j;
1133 1131
1134 1132 if (is_T2(sa))
1135 1133 size = sge_flq1_cnt * fl_sz_multiplier;
1136 1134 else
1137 1135 size = sge_flq0_cnt * fl_sz_multiplier;
1138 1136
1139 1137 mutex_init(&sa->ch_small_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1140 1138
1141 1139 mutex_enter(&in_use_l);
1142 1140 j = in_use_index++;
1143 1141 if (in_use_index >= SZ_INUSE)
1144 1142 in_use_index = 0;
1145 1143 mutex_exit(&in_use_l);
1146 1144
1147 1145 sa->ch_small_owner = NULL;
1148 1146 sa->ch_sm_index = j;
1149 1147 sa->ch_small_esb_free = NULL;
1150 1148 for (i = 0; i < size; i++) {
1151 1149 rbp = ch_alloc_small_esbbuf(sa, j);
1152 1150 if (rbp == NULL)
1153 1151 goto error;
1154 1152 /*
1155 1153 * add entry to free list
1156 1154 */
1157 1155 rbp->cs_next = sa->ch_small_esb_free;
1158 1156 sa->ch_small_esb_free = rbp;
1159 1157
1160 1158 /*
1161 1159 * add entry to owned list
1162 1160 */
1163 1161 rbp->cs_owner = sa->ch_small_owner;
1164 1162 sa->ch_small_owner = rbp;
1165 1163 }
1166 1164 return (1);
1167 1165
1168 1166 error:
1169 1167 sa->ch_small_owner = NULL;
1170 1168
1171 1169 /* free whatever we've already allocated */
1172 1170 pe_rbuf_pool_free(sa);
1173 1171
1174 1172 return (0);
1175 1173 }
1176 1174
1177 1175 /*
1178 1176 * allocate pool of receive buffers (with vaddr & paddr) and
1179 1177 * receiver buffer control structure (ch_esb_t *rbp).
1180 1178 * XXX we should allow better tuning of the # of preallocated
1181 1179 * free buffers against the # of freelist entries.
1182 1180 */
1183 1181 static int
1184 1182 pe_big_rbuf_pool_init(ch_t *sa)
1185 1183 {
1186 1184 int i;
1187 1185 ch_esb_t *rbp;
1188 1186 extern uint32_t sge_flq0_cnt;
1189 1187 extern uint32_t sge_flq1_cnt;
1190 1188 int size;
1191 1189 uint32_t j;
1192 1190
1193 1191 if (is_T2(sa))
1194 1192 size = sge_flq0_cnt * fl_sz_multiplier;
1195 1193 else
1196 1194 size = sge_flq1_cnt * fl_sz_multiplier;
1197 1195
1198 1196 mutex_init(&sa->ch_big_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1199 1197
1200 1198 mutex_enter(&in_use_l);
1201 1199 j = in_use_index++;
1202 1200 if (in_use_index >= SZ_INUSE)
1203 1201 in_use_index = 0;
1204 1202 mutex_exit(&in_use_l);
1205 1203
1206 1204 sa->ch_big_owner = NULL;
1207 1205 sa->ch_big_index = j;
1208 1206 sa->ch_big_esb_free = NULL;
1209 1207 for (i = 0; i < size; i++) {
1210 1208 rbp = ch_alloc_big_esbbuf(sa, j);
1211 1209 if (rbp == NULL)
1212 1210 goto error;
1213 1211 rbp->cs_next = sa->ch_big_esb_free;
1214 1212 sa->ch_big_esb_free = rbp;
1215 1213
1216 1214 /*
1217 1215 * add entry to owned list
1218 1216 */
1219 1217 rbp->cs_owner = sa->ch_big_owner;
1220 1218 sa->ch_big_owner = rbp;
1221 1219 }
1222 1220 return (1);
1223 1221
1224 1222 error:
1225 1223 sa->ch_big_owner = NULL;
1226 1224
1227 1225 /* free whatever we've already allocated */
1228 1226 pe_rbuf_pool_free(sa);
1229 1227
1230 1228 return (0);
1231 1229 }
1232 1230
1233 1231 /*
1234 1232 * allocate receive buffer structure and dma mapped buffer (SGE_SM_BUF_SZ bytes)
1235 1233 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1236 1234 * is enabled.
1237 1235 */
1238 1236 static ch_esb_t *
1239 1237 ch_alloc_small_esbbuf(ch_t *sa, uint32_t i)
1240 1238 {
1241 1239 ch_esb_t *rbp;
1242 1240
1243 1241 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1244 1242 if (rbp == NULL) {
1245 1243 return ((ch_esb_t *)0);
1246 1244 }
1247 1245
1248 1246 #if BYTE_ORDER == BIG_ENDIAN
1249 1247 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_SMALN,
1250 1248 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1251 1249 #else
1252 1250 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_SMALN,
1253 1251 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1254 1252 #endif
1255 1253
1256 1254 if (rbp->cs_buf == NULL) {
1257 1255 kmem_free(rbp, sizeof (ch_esb_t));
1258 1256 return ((ch_esb_t *)0);
1259 1257 }
1260 1258
1261 1259 rbp->cs_sa = sa;
1262 1260 rbp->cs_index = i;
1263 1261
1264 1262 rbp->cs_frtn.free_func = (void (*)())&ch_small_rbuf_recycle;
1265 1263 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1266 1264
1267 1265 return (rbp);
1268 1266 }
1269 1267
1270 1268 /*
1271 1269 * allocate receive buffer structure and dma mapped buffer (SGE_BG_BUF_SZ bytes)
1272 1270 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1273 1271 * is enabled.
1274 1272 */
1275 1273 static ch_esb_t *
1276 1274 ch_alloc_big_esbbuf(ch_t *sa, uint32_t i)
1277 1275 {
1278 1276 ch_esb_t *rbp;
1279 1277
1280 1278 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1281 1279 if (rbp == NULL) {
1282 1280 return ((ch_esb_t *)0);
1283 1281 }
1284 1282
1285 1283 #if BYTE_ORDER == BIG_ENDIAN
1286 1284 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_BGALN,
1287 1285 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1288 1286 #else
1289 1287 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_BGALN,
1290 1288 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1291 1289 #endif
1292 1290
1293 1291 if (rbp->cs_buf == NULL) {
1294 1292 kmem_free(rbp, sizeof (ch_esb_t));
1295 1293 return ((ch_esb_t *)0);
1296 1294 }
1297 1295
1298 1296 rbp->cs_sa = sa;
1299 1297 rbp->cs_index = i;
1300 1298
1301 1299 rbp->cs_frtn.free_func = (void (*)())&ch_big_rbuf_recycle;
1302 1300 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1303 1301
1304 1302 return (rbp);
1305 1303 }
1306 1304
1307 1305 /*
1308 1306 * free entries on the receive buffer list.
1309 1307 */
1310 1308 void
1311 1309 pe_rbuf_pool_free(ch_t *sa)
1312 1310 {
1313 1311 ch_esb_t *rbp;
1314 1312
1315 1313 mutex_enter(&sa->ch_small_esbl);
1316 1314
1317 1315 /*
1318 1316 * Now set-up the rest to commit suicide.
1319 1317 */
1320 1318 while (sa->ch_small_owner) {
1321 1319 rbp = sa->ch_small_owner;
1322 1320 sa->ch_small_owner = rbp->cs_owner;
1323 1321 rbp->cs_owner = NULL;
1324 1322 rbp->cs_flag = 1;
1325 1323 }
1326 1324
1327 1325 while ((rbp = sa->ch_small_esb_free) != NULL) {
1328 1326 /* advance head ptr to next entry */
1329 1327 sa->ch_small_esb_free = rbp->cs_next;
1330 1328 /* free private buffer allocated in ch_alloc_esbbuf() */
1331 1329 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1332 1330 /* free descripter buffer */
1333 1331 kmem_free(rbp, sizeof (ch_esb_t));
1334 1332 }
1335 1333
1336 1334 mutex_exit(&sa->ch_small_esbl);
1337 1335
1338 1336 /* destroy ch_esbl lock */
1339 1337 mutex_destroy(&sa->ch_small_esbl);
1340 1338
1341 1339
1342 1340 mutex_enter(&sa->ch_big_esbl);
1343 1341
1344 1342 /*
1345 1343 * Now set-up the rest to commit suicide.
1346 1344 */
1347 1345 while (sa->ch_big_owner) {
1348 1346 rbp = sa->ch_big_owner;
1349 1347 sa->ch_big_owner = rbp->cs_owner;
1350 1348 rbp->cs_owner = NULL;
1351 1349 rbp->cs_flag = 1;
1352 1350 }
1353 1351
1354 1352 while ((rbp = sa->ch_big_esb_free) != NULL) {
1355 1353 /* advance head ptr to next entry */
1356 1354 sa->ch_big_esb_free = rbp->cs_next;
1357 1355 /* free private buffer allocated in ch_alloc_esbbuf() */
1358 1356 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1359 1357 /* free descripter buffer */
1360 1358 kmem_free(rbp, sizeof (ch_esb_t));
1361 1359 }
1362 1360
1363 1361 mutex_exit(&sa->ch_big_esbl);
1364 1362
1365 1363 /* destroy ch_esbl lock */
1366 1364 mutex_destroy(&sa->ch_big_esbl);
1367 1365 }
1368 1366
1369 1367 void
1370 1368 ch_small_rbuf_recycle(ch_esb_t *rbp)
1371 1369 {
1372 1370 ch_t *sa = rbp->cs_sa;
1373 1371
1374 1372 if (rbp->cs_flag) {
1375 1373 uint32_t i;
1376 1374 /*
1377 1375 * free private buffer allocated in ch_alloc_esbbuf()
1378 1376 */
1379 1377 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1380 1378
1381 1379 i = rbp->cs_index;
1382 1380
↓ open down ↓ |
1333 lines elided |
↑ open up ↑ |
1383 1381 /*
1384 1382 * free descripter buffer
1385 1383 */
1386 1384 kmem_free(rbp, sizeof (ch_esb_t));
1387 1385
1388 1386 /*
1389 1387 * decrement count of receive buffers freed by callback
1390 1388 * We decrement here so anyone trying to do fini will
1391 1389 * only remove the driver once the counts go to 0.
1392 1390 */
1393 - atomic_add_32(&buffers_in_use[i], -1);
1391 + atomic_dec_32(&buffers_in_use[i]);
1394 1392
1395 1393 return;
1396 1394 }
1397 1395
1398 1396 mutex_enter(&sa->ch_small_esbl);
1399 1397 rbp->cs_next = sa->ch_small_esb_free;
1400 1398 sa->ch_small_esb_free = rbp;
1401 1399 mutex_exit(&sa->ch_small_esbl);
1402 1400
1403 1401 /*
1404 1402 * decrement count of receive buffers freed by callback
1405 1403 */
1406 - atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1404 + atomic_dec_32(&buffers_in_use[rbp->cs_index]);
1407 1405 }
1408 1406
1409 1407 /*
1410 1408 * callback function from freeb() when esballoced mblk freed.
1411 1409 */
1412 1410 void
1413 1411 ch_big_rbuf_recycle(ch_esb_t *rbp)
1414 1412 {
1415 1413 ch_t *sa = rbp->cs_sa;
1416 1414
1417 1415 if (rbp->cs_flag) {
1418 1416 uint32_t i;
1419 1417 /*
1420 1418 * free private buffer allocated in ch_alloc_esbbuf()
1421 1419 */
1422 1420 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1423 1421
1424 1422 i = rbp->cs_index;
1425 1423
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
1426 1424 /*
1427 1425 * free descripter buffer
1428 1426 */
1429 1427 kmem_free(rbp, sizeof (ch_esb_t));
1430 1428
1431 1429 /*
1432 1430 * decrement count of receive buffers freed by callback
1433 1431 * We decrement here so anyone trying to do fini will
1434 1432 * only remove the driver once the counts go to 0.
1435 1433 */
1436 - atomic_add_32(&buffers_in_use[i], -1);
1434 + atomic_dec_32(&buffers_in_use[i]);
1437 1435
1438 1436 return;
1439 1437 }
1440 1438
1441 1439 mutex_enter(&sa->ch_big_esbl);
1442 1440 rbp->cs_next = sa->ch_big_esb_free;
1443 1441 sa->ch_big_esb_free = rbp;
1444 1442 mutex_exit(&sa->ch_big_esbl);
1445 1443
1446 1444 /*
1447 1445 * decrement count of receive buffers freed by callback
1448 1446 */
1449 - atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1447 + atomic_dec_32(&buffers_in_use[rbp->cs_index]);
1450 1448 }
1451 1449
1452 1450 /*
1453 1451 * get a pre-allocated, pre-mapped receive buffer from free list.
1454 1452 * (used sge.c)
1455 1453 */
1456 1454 ch_esb_t *
1457 1455 ch_get_small_rbuf(ch_t *sa)
1458 1456 {
1459 1457 ch_esb_t *rbp;
1460 1458
1461 1459 mutex_enter(&sa->ch_small_esbl);
1462 1460 rbp = sa->ch_small_esb_free;
1463 1461 if (rbp) {
1464 1462 sa->ch_small_esb_free = rbp->cs_next;
1465 1463 }
1466 1464 mutex_exit(&sa->ch_small_esbl);
1467 1465
1468 1466 return (rbp);
1469 1467 }
1470 1468
1471 1469 /*
1472 1470 * get a pre-allocated, pre-mapped receive buffer from free list.
1473 1471 * (used sge.c)
1474 1472 */
1475 1473
1476 1474 ch_esb_t *
1477 1475 ch_get_big_rbuf(ch_t *sa)
1478 1476 {
1479 1477 ch_esb_t *rbp;
1480 1478
1481 1479 mutex_enter(&sa->ch_big_esbl);
1482 1480 rbp = sa->ch_big_esb_free;
1483 1481 if (rbp) {
1484 1482 sa->ch_big_esb_free = rbp->cs_next;
1485 1483 }
1486 1484 mutex_exit(&sa->ch_big_esbl);
1487 1485
1488 1486 return (rbp);
1489 1487 }
1490 1488
1491 1489 void
1492 1490 pe_detach(ch_t *sa)
1493 1491 {
1494 1492 (void) sge_stop(sa->sge);
1495 1493
1496 1494 pe_free_driver_resources(sa);
1497 1495 }
1498 1496
1499 1497 static void
1500 1498 pe_free_driver_resources(ch_t *sa)
1501 1499 {
1502 1500 if (sa) {
1503 1501 t1_free_sw_modules(sa);
1504 1502
1505 1503 /* free pool of receive buffers */
1506 1504 pe_rbuf_pool_free(sa);
1507 1505 }
1508 1506 }
1509 1507
1510 1508 /*
1511 1509 * Processes elmer0 external interrupts in process context.
1512 1510 */
1513 1511 static void
1514 1512 ext_intr_task(ch_t *adapter)
1515 1513 {
1516 1514 u32 enable;
1517 1515
1518 1516 (void) elmer0_ext_intr_handler(adapter);
1519 1517
1520 1518 /* Now reenable external interrupts */
1521 1519 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
1522 1520 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1523 1521 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
1524 1522 adapter->slow_intr_mask |= F_PL_INTR_EXT;
1525 1523 }
1526 1524
1527 1525 /*
1528 1526 * Interrupt-context handler for elmer0 external interrupts.
1529 1527 */
1530 1528 void
1531 1529 t1_os_elmer0_ext_intr(ch_t *adapter)
1532 1530 {
1533 1531 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1534 1532
1535 1533 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1536 1534 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
1537 1535 #ifdef NOTYET
1538 1536 schedule_work(&adapter->ext_intr_handler_task);
1539 1537 #else
1540 1538 ext_intr_task(adapter);
1541 1539 #endif
1542 1540 }
1543 1541
1544 1542 uint8_t *
1545 1543 t1_get_next_mcaddr(struct t1_rx_mode *rmp)
1546 1544 {
1547 1545 uint8_t *addr = 0;
1548 1546 if (rmp->mc) {
1549 1547 addr = rmp->mc->cmc_mca;
1550 1548 rmp->mc = rmp->mc->cmc_next;
1551 1549 }
1552 1550 return (addr);
1553 1551 }
1554 1552
1555 1553 void
1556 1554 pe_dma_handle_init(ch_t *chp, int cnt)
1557 1555 {
1558 1556 free_dh_t *dhe;
1559 1557 #if defined(__sparc)
1560 1558 int tcnt = cnt/2;
1561 1559
1562 1560 for (; cnt; cnt--) {
1563 1561 dhe = ch_get_dvma_handle(chp);
1564 1562 if (dhe == NULL)
1565 1563 break;
1566 1564 mutex_enter(&chp->ch_dh_lck);
1567 1565 dhe->dhe_next = chp->ch_vdh;
1568 1566 chp->ch_vdh = dhe;
1569 1567 mutex_exit(&chp->ch_dh_lck);
1570 1568 }
1571 1569
1572 1570 cnt += tcnt;
1573 1571 #endif
1574 1572 while (cnt--) {
1575 1573 dhe = ch_get_dma_handle(chp);
1576 1574 if (dhe == NULL)
1577 1575 return;
1578 1576 mutex_enter(&chp->ch_dh_lck);
1579 1577 dhe->dhe_next = chp->ch_dh;
1580 1578 chp->ch_dh = dhe;
1581 1579 mutex_exit(&chp->ch_dh_lck);
1582 1580 }
1583 1581 }
1584 1582
1585 1583 /*
1586 1584 * Write new values to the MTU table. Caller must validate that the new MTUs
1587 1585 * are in ascending order. params.mtus[] is initialized by init_mtus()
1588 1586 * called in t1_init_sw_modules().
1589 1587 */
1590 1588 #define MTUREG(idx) (A_TP_MTU_REG0 + (idx) * 4)
1591 1589
1592 1590 static void
1593 1591 update_mtu_tab(ch_t *adapter)
1594 1592 {
1595 1593 int i;
1596 1594
1597 1595 for (i = 0; i < NMTUS; ++i) {
1598 1596 int mtu = (unsigned int)adapter->params.mtus[i];
1599 1597
1600 1598 t1_write_reg_4(adapter, MTUREG(i), mtu);
1601 1599 }
1602 1600 }
1603 1601
1604 1602 static int
1605 1603 pe_change_mtu(ch_t *chp)
1606 1604 {
1607 1605 struct cmac *mac = chp->port[0].mac;
1608 1606 int ret;
1609 1607
1610 1608 if (!mac->ops->set_mtu) {
1611 1609 return (EOPNOTSUPP);
1612 1610 }
1613 1611 if (chp->ch_mtu < 68) {
1614 1612 return (EINVAL);
1615 1613 }
1616 1614 if (ret = mac->ops->set_mtu(mac, chp->ch_mtu)) {
1617 1615 return (ret);
1618 1616 }
1619 1617
1620 1618 return (0);
1621 1619 }
1622 1620
1623 1621 typedef struct fake_arp {
1624 1622 char fa_dst[6]; /* ethernet header */
1625 1623 char fa_src[6]; /* ethernet header */
1626 1624 ushort_t fa_typ; /* ethernet header */
1627 1625
1628 1626 ushort_t fa_hrd; /* arp */
1629 1627 ushort_t fa_pro;
1630 1628 char fa_hln;
1631 1629 char fa_pln;
1632 1630 ushort_t fa_op;
1633 1631 char fa_src_mac[6];
1634 1632 uint_t fa_src_ip;
1635 1633 char fa_dst_mac[6];
1636 1634 char fa_dst_ip[4];
1637 1635 } fake_arp_t;
1638 1636
1639 1637 /*
1640 1638 * PR2928 & PR3309
1641 1639 * construct packet in mblk and attach it to sge structure.
1642 1640 */
1643 1641 static int
1644 1642 pe_make_fake_arp(ch_t *chp, unsigned char *arpp)
1645 1643 {
1646 1644 pesge *sge = chp->sge;
1647 1645 mblk_t *bp;
1648 1646 fake_arp_t *fap;
1649 1647 static char buf[6] = {0, 7, 0x43, 0, 0, 0};
1650 1648 struct cpl_tx_pkt *cpl;
1651 1649
1652 1650 bp = allocb(sizeof (struct fake_arp) + SZ_CPL_TX_PKT, BPRI_HI);
1653 1651 if (bp == NULL) {
1654 1652 return (1);
1655 1653 }
1656 1654 bzero(bp->b_rptr, sizeof (struct fake_arp) + SZ_CPL_TX_PKT);
1657 1655
1658 1656 /* fill in cpl header */
1659 1657 cpl = (struct cpl_tx_pkt *)bp->b_rptr;
1660 1658 cpl->opcode = CPL_TX_PKT;
1661 1659 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
1662 1660 cpl->ip_csum_dis = 1; /* no IP header cksum */
1663 1661 cpl->l4_csum_dis = 1; /* no tcp/udp cksum */
1664 1662 cpl->vlan_valid = 0; /* no vlan */
1665 1663
1666 1664 fap = (fake_arp_t *)&bp->b_rptr[SZ_CPL_TX_PKT];
1667 1665
1668 1666 bcopy(arpp, fap, sizeof (*fap)); /* copy first arp to mblk */
1669 1667
1670 1668 bcopy(buf, fap->fa_dst, 6); /* overwrite dst mac */
1671 1669 chp->ch_ip = fap->fa_src_ip; /* not used yet */
1672 1670 bcopy(buf, fap->fa_dst_mac, 6); /* overwrite dst mac */
1673 1671
1674 1672 bp->b_wptr = bp->b_rptr + sizeof (struct fake_arp)+SZ_CPL_TX_PKT;
1675 1673
1676 1674 sge_add_fake_arp(sge, (void *)bp);
1677 1675
1678 1676 return (0);
1679 1677 }
1680 1678
1681 1679 /*
1682 1680 * PR2928 & PR3309
1683 1681 * free the fake arp's mblk on sge structure.
1684 1682 */
1685 1683 void
1686 1684 pe_free_fake_arp(void *arp)
1687 1685 {
1688 1686 mblk_t *bp = (mblk_t *)(arp);
1689 1687
1690 1688 freemsg(bp);
1691 1689 }
1692 1690
1693 1691 /*
1694 1692 * extract ip address of nic from first outgoing arp.
1695 1693 */
1696 1694 static uint32_t
1697 1695 pe_get_ip(unsigned char *arpp)
1698 1696 {
1699 1697 fake_arp_t fap;
1700 1698
1701 1699 /*
1702 1700 * first copy packet to buffer so we know
1703 1701 * it will be properly aligned.
1704 1702 */
1705 1703 bcopy(arpp, &fap, sizeof (fap)); /* copy first arp to buffer */
1706 1704 return (fap.fa_src_ip);
1707 1705 }
1708 1706
1709 1707 /* ARGSUSED */
1710 1708 void
1711 1709 t1_os_link_changed(ch_t *obj, int port_id, int link_status,
1712 1710 int speed, int duplex, int fc)
1713 1711 {
1714 1712 gld_mac_info_t *macinfo = obj->ch_macp;
1715 1713 if (link_status) {
1716 1714 gld_linkstate(macinfo, GLD_LINKSTATE_UP);
1717 1715 /*
1718 1716 * Link states should be reported to user
1719 1717 * whenever it changes
1720 1718 */
1721 1719 cmn_err(CE_NOTE, "%s: link is up", adapter_name(obj));
1722 1720 } else {
1723 1721 gld_linkstate(macinfo, GLD_LINKSTATE_DOWN);
1724 1722 /*
1725 1723 * Link states should be reported to user
1726 1724 * whenever it changes
1727 1725 */
1728 1726 cmn_err(CE_NOTE, "%s: link is down", adapter_name(obj));
1729 1727 }
1730 1728 }
↓ open down ↓ |
271 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX