Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/xge/drv/xgell.c
+++ new/usr/src/uts/common/io/xge/drv/xgell.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2002-2009 Neterion, Inc.
29 29 * All right Reserved.
30 30 *
31 31 * FileName : xgell.c
32 32 *
33 33 * Description: Xge Link Layer data path implementation
34 34 *
35 35 */
36 36
37 37 #include "xgell.h"
38 38
39 39 #include <netinet/ip.h>
40 40 #include <netinet/tcp.h>
41 41 #include <netinet/udp.h>
42 42
43 43 #define XGELL_MAX_FRAME_SIZE(hldev) ((hldev)->config.mtu + \
44 44 sizeof (struct ether_vlan_header))
45 45
46 46 #define HEADROOM 2 /* for DIX-only packets */
47 47
48 48 void header_free_func(void *arg) { }
49 49 frtn_t header_frtn = {header_free_func, NULL};
50 50
51 51 /* DMA attributes used for Tx side */
52 52 static struct ddi_dma_attr tx_dma_attr = {
53 53 DMA_ATTR_V0, /* dma_attr_version */
54 54 0x0ULL, /* dma_attr_addr_lo */
55 55 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
56 56 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
57 57 #if defined(__sparc)
58 58 0x2000, /* dma_attr_align */
59 59 #else
60 60 0x1000, /* dma_attr_align */
61 61 #endif
62 62 0xFC00FC, /* dma_attr_burstsizes */
63 63 0x1, /* dma_attr_minxfer */
64 64 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
65 65 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
66 66 18, /* dma_attr_sgllen */
67 67 (unsigned int)1, /* dma_attr_granular */
68 68 0 /* dma_attr_flags */
69 69 };
70 70
71 71 /*
72 72 * DMA attributes used when using ddi_dma_mem_alloc to
73 73 * allocat HAL descriptors and Rx buffers during replenish
74 74 */
75 75 static struct ddi_dma_attr hal_dma_attr = {
76 76 DMA_ATTR_V0, /* dma_attr_version */
77 77 0x0ULL, /* dma_attr_addr_lo */
78 78 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
79 79 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
80 80 #if defined(__sparc)
81 81 0x2000, /* dma_attr_align */
82 82 #else
83 83 0x1000, /* dma_attr_align */
84 84 #endif
85 85 0xFC00FC, /* dma_attr_burstsizes */
86 86 0x1, /* dma_attr_minxfer */
87 87 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
88 88 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
89 89 1, /* dma_attr_sgllen */
90 90 (unsigned int)1, /* dma_attr_sgllen */
91 91 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
92 92 };
93 93
94 94 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95 95
96 96 static int xgell_m_stat(void *, uint_t, uint64_t *);
97 97 static int xgell_m_start(void *);
98 98 static void xgell_m_stop(void *);
99 99 static int xgell_m_promisc(void *, boolean_t);
100 100 static int xgell_m_multicst(void *, boolean_t, const uint8_t *);
101 101 static void xgell_m_ioctl(void *, queue_t *, mblk_t *);
102 102 static boolean_t xgell_m_getcapab(void *, mac_capab_t, void *);
103 103
104 104 #define XGELL_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
105 105
106 106 static mac_callbacks_t xgell_m_callbacks = {
107 107 XGELL_M_CALLBACK_FLAGS,
108 108 xgell_m_stat,
109 109 xgell_m_start,
110 110 xgell_m_stop,
111 111 xgell_m_promisc,
112 112 xgell_m_multicst,
113 113 NULL,
114 114 NULL,
115 115 NULL,
116 116 xgell_m_ioctl,
117 117 xgell_m_getcapab
118 118 };
119 119
120 120 /*
121 121 * xge_device_poll
122 122 *
123 123 * Timeout should call me every 1s. xge_callback_event_queued should call me
124 124 * when HAL hope event was rescheduled.
125 125 */
126 126 /*ARGSUSED*/
127 127 void
128 128 xge_device_poll(void *data)
129 129 {
130 130 xgelldev_t *lldev = xge_hal_device_private(data);
131 131
132 132 mutex_enter(&lldev->genlock);
133 133 if (lldev->is_initialized) {
134 134 xge_hal_device_poll(data);
135 135 lldev->timeout_id = timeout(xge_device_poll, data,
136 136 XGE_DEV_POLL_TICKS);
137 137 } else if (lldev->in_reset == 1) {
138 138 lldev->timeout_id = timeout(xge_device_poll, data,
139 139 XGE_DEV_POLL_TICKS);
140 140 } else {
141 141 lldev->timeout_id = 0;
142 142 }
143 143 mutex_exit(&lldev->genlock);
144 144 }
145 145
146 146 /*
147 147 * xge_device_poll_now
148 148 *
149 149 * Will call xge_device_poll() immediately
150 150 */
151 151 void
152 152 xge_device_poll_now(void *data)
153 153 {
154 154 xgelldev_t *lldev = xge_hal_device_private(data);
155 155
156 156 mutex_enter(&lldev->genlock);
157 157 if (lldev->is_initialized) {
158 158 xge_hal_device_poll(data);
159 159 }
160 160 mutex_exit(&lldev->genlock);
161 161 }
162 162
163 163 /*
164 164 * xgell_callback_link_up
165 165 *
166 166 * This function called by HAL to notify HW link up state change.
167 167 */
168 168 void
169 169 xgell_callback_link_up(void *userdata)
170 170 {
171 171 xgelldev_t *lldev = (xgelldev_t *)userdata;
172 172
173 173 mac_link_update(lldev->mh, LINK_STATE_UP);
174 174 }
175 175
176 176 /*
177 177 * xgell_callback_link_down
178 178 *
179 179 * This function called by HAL to notify HW link down state change.
180 180 */
181 181 void
182 182 xgell_callback_link_down(void *userdata)
183 183 {
184 184 xgelldev_t *lldev = (xgelldev_t *)userdata;
185 185
186 186 mac_link_update(lldev->mh, LINK_STATE_DOWN);
187 187 }
188 188
189 189 /*
190 190 * xgell_rx_buffer_replenish_all
191 191 *
192 192 * To replenish all freed dtr(s) with buffers in free pool. It's called by
193 193 * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
194 194 * Must be called with pool_lock held.
195 195 */
196 196 static void
197 197 xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
198 198 {
199 199 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
200 200 xge_hal_dtr_h dtr;
201 201 xgell_rx_buffer_t *rx_buffer;
202 202 xgell_rxd_priv_t *rxd_priv;
203 203
204 204 xge_assert(mutex_owned(&bf_pool->pool_lock));
205 205
206 206 while ((bf_pool->free > 0) &&
207 207 (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
208 208 xge_assert(bf_pool->head);
209 209
210 210 rx_buffer = bf_pool->head;
211 211
212 212 bf_pool->head = rx_buffer->next;
213 213 bf_pool->free--;
214 214
215 215 xge_assert(rx_buffer->dma_addr);
216 216
217 217 rxd_priv = (xgell_rxd_priv_t *)
218 218 xge_hal_ring_dtr_private(ring->channelh, dtr);
219 219 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
220 220 bf_pool->size);
221 221
222 222 rxd_priv->rx_buffer = rx_buffer;
223 223 xge_hal_ring_dtr_post(ring->channelh, dtr);
224 224 }
225 225 }
226 226
227 227 /*
228 228 * xgell_rx_buffer_release
229 229 *
230 230 * The only thing done here is to put the buffer back to the pool.
231 231 * Calling this function need be protected by mutex, bf_pool.pool_lock.
232 232 */
233 233 static void
234 234 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
235 235 {
236 236 xgell_rx_ring_t *ring = rx_buffer->ring;
237 237 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
238 238
239 239 xge_assert(mutex_owned(&bf_pool->pool_lock));
240 240
241 241 /* Put the buffer back to pool */
242 242 rx_buffer->next = bf_pool->head;
243 243 bf_pool->head = rx_buffer;
244 244
245 245 bf_pool->free++;
246 246 }
247 247
248 248 /*
249 249 * xgell_rx_buffer_recycle
250 250 *
251 251 * Called by desballoc() to "free" the resource.
252 252 * We will try to replenish all descripters.
253 253 */
254 254
255 255 /*
256 256 * Previously there were much lock contention between xgell_rx_1b_compl() and
257 257 * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
258 258 * effect on rx performance. A separate recycle list is introduced to overcome
259 259 * this. The recycle list is used to record the rx buffer that has been recycled
260 260 * and these buffers will be retuned back to the free list in bulk instead of
261 261 * one-by-one.
262 262 */
263 263
264 264 static void
265 265 xgell_rx_buffer_recycle(char *arg)
266 266 {
267 267 xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
268 268 xgell_rx_ring_t *ring = rx_buffer->ring;
269 269 xgelldev_t *lldev = ring->lldev;
270 270 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
271 271
272 272 mutex_enter(&bf_pool->recycle_lock);
273 273
274 274 rx_buffer->next = bf_pool->recycle_head;
275 275 bf_pool->recycle_head = rx_buffer;
276 276 if (bf_pool->recycle_tail == NULL)
277 277 bf_pool->recycle_tail = rx_buffer;
278 278 bf_pool->recycle++;
279 279
280 280 /*
281 281 * Before finding a good way to set this hiwat, just always call to
282 282 * replenish_all. *TODO*
283 283 */
284 284 if ((lldev->is_initialized != 0) && (ring->live) &&
285 285 (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
286 286 mutex_enter(&bf_pool->pool_lock);
287 287 bf_pool->recycle_tail->next = bf_pool->head;
288 288 bf_pool->head = bf_pool->recycle_head;
289 289 bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
290 290 bf_pool->post -= bf_pool->recycle;
291 291 bf_pool->free += bf_pool->recycle;
292 292 bf_pool->recycle = 0;
293 293 xgell_rx_buffer_replenish_all(ring);
294 294 mutex_exit(&bf_pool->pool_lock);
295 295 }
296 296
297 297 mutex_exit(&bf_pool->recycle_lock);
298 298 }
299 299
300 300 /*
301 301 * xgell_rx_buffer_alloc
302 302 *
303 303 * Allocate one rx buffer and return with the pointer to the buffer.
304 304 * Return NULL if failed.
305 305 */
306 306 static xgell_rx_buffer_t *
307 307 xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
308 308 {
309 309 xgelldev_t *lldev = ring->lldev;
310 310 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
311 311 xge_hal_device_t *hldev;
312 312 void *vaddr;
313 313 ddi_dma_handle_t dma_handle;
314 314 ddi_acc_handle_t dma_acch;
315 315 dma_addr_t dma_addr;
316 316 uint_t ncookies;
317 317 ddi_dma_cookie_t dma_cookie;
318 318 size_t real_size;
319 319 extern ddi_device_acc_attr_t *p_xge_dev_attr;
320 320 xgell_rx_buffer_t *rx_buffer;
321 321
322 322 hldev = (xge_hal_device_t *)lldev->devh;
323 323
324 324 if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
325 325 0, &dma_handle) != DDI_SUCCESS) {
326 326 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
327 327 XGELL_IFNAME, lldev->instance);
328 328 goto handle_failed;
329 329 }
330 330
331 331 /* reserve some space at the end of the buffer for recycling */
332 332 if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
333 333 sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
334 334 DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
335 335 DDI_SUCCESS) {
336 336 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
337 337 XGELL_IFNAME, lldev->instance);
338 338 goto mem_failed;
339 339 }
340 340
341 341 if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
342 342 real_size) {
343 343 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
344 344 XGELL_IFNAME, lldev->instance);
345 345 goto bind_failed;
346 346 }
347 347
348 348 if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
349 349 bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
350 350 DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
351 351 xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
352 352 XGELL_IFNAME, lldev->instance);
353 353 goto bind_failed;
354 354 }
355 355
356 356 if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
357 357 xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
358 358 XGELL_IFNAME, lldev->instance);
359 359 goto check_failed;
360 360 }
361 361
362 362 dma_addr = dma_cookie.dmac_laddress;
363 363
364 364 rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
365 365 sizeof (xgell_rx_buffer_t));
366 366 rx_buffer->next = NULL;
367 367 rx_buffer->vaddr = vaddr;
368 368 rx_buffer->dma_addr = dma_addr;
369 369 rx_buffer->dma_handle = dma_handle;
370 370 rx_buffer->dma_acch = dma_acch;
371 371 rx_buffer->ring = ring;
372 372 rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
373 373 rx_buffer->frtn.free_arg = (void *)rx_buffer;
374 374
375 375 return (rx_buffer);
376 376
377 377 check_failed:
378 378 (void) ddi_dma_unbind_handle(dma_handle);
379 379 bind_failed:
380 380 XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
381 381 ddi_dma_mem_free(&dma_acch);
382 382 mem_failed:
383 383 ddi_dma_free_handle(&dma_handle);
384 384 handle_failed:
385 385
386 386 return (NULL);
387 387 }
388 388
389 389 /*
390 390 * xgell_rx_destroy_buffer_pool
391 391 *
392 392 * Destroy buffer pool. If there is still any buffer hold by upper layer,
393 393 * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
394 394 */
395 395 static boolean_t
396 396 xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
397 397 {
398 398 xgelldev_t *lldev = ring->lldev;
399 399 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
400 400 xgell_rx_buffer_t *rx_buffer;
401 401 ddi_dma_handle_t dma_handle;
402 402 ddi_acc_handle_t dma_acch;
403 403 int i;
404 404
405 405 /*
406 406 * If the pool has been destroied, just return B_TRUE
407 407 */
408 408 if (!bf_pool->live)
409 409 return (B_TRUE);
410 410
411 411 mutex_enter(&bf_pool->recycle_lock);
412 412 if (bf_pool->recycle > 0) {
413 413 mutex_enter(&bf_pool->pool_lock);
414 414 bf_pool->recycle_tail->next = bf_pool->head;
415 415 bf_pool->head = bf_pool->recycle_head;
416 416 bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
417 417 bf_pool->post -= bf_pool->recycle;
418 418 bf_pool->free += bf_pool->recycle;
419 419 bf_pool->recycle = 0;
420 420 mutex_exit(&bf_pool->pool_lock);
421 421 }
422 422 mutex_exit(&bf_pool->recycle_lock);
423 423
424 424 /*
425 425 * If there is any posted buffer, the driver should reject to be
426 426 * detached. Need notice upper layer to release them.
427 427 */
428 428 if (bf_pool->post != 0) {
429 429 xge_debug_ll(XGE_ERR,
430 430 "%s%d has some buffers not be recycled, try later!",
431 431 XGELL_IFNAME, lldev->instance);
432 432 return (B_FALSE);
433 433 }
434 434
435 435 /*
436 436 * Release buffers one by one.
437 437 */
438 438 for (i = bf_pool->total; i > 0; i--) {
439 439 rx_buffer = bf_pool->head;
440 440 xge_assert(rx_buffer != NULL);
441 441
442 442 bf_pool->head = rx_buffer->next;
443 443
444 444 dma_handle = rx_buffer->dma_handle;
445 445 dma_acch = rx_buffer->dma_acch;
446 446
447 447 if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
448 448 xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
449 449 bf_pool->head = rx_buffer;
450 450 return (B_FALSE);
451 451 }
452 452 ddi_dma_mem_free(&dma_acch);
453 453 ddi_dma_free_handle(&dma_handle);
454 454
455 455 bf_pool->total--;
456 456 bf_pool->free--;
457 457 }
458 458
459 459 xge_assert(!mutex_owned(&bf_pool->pool_lock));
460 460
461 461 mutex_destroy(&bf_pool->recycle_lock);
462 462 mutex_destroy(&bf_pool->pool_lock);
463 463 bf_pool->live = B_FALSE;
464 464
465 465 return (B_TRUE);
466 466 }
467 467
468 468 /*
469 469 * xgell_rx_create_buffer_pool
470 470 *
471 471 * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
472 472 */
473 473 static boolean_t
474 474 xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
475 475 {
476 476 xgelldev_t *lldev = ring->lldev;
477 477 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
478 478 xge_hal_device_t *hldev;
479 479 xgell_rx_buffer_t *rx_buffer;
480 480 int i;
481 481
482 482 if (bf_pool->live)
483 483 return (B_TRUE);
484 484
485 485 hldev = (xge_hal_device_t *)lldev->devh;
486 486
487 487 bf_pool->total = 0;
488 488 bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
489 489 bf_pool->head = NULL;
490 490 bf_pool->free = 0;
491 491 bf_pool->post = 0;
492 492 bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
493 493 bf_pool->recycle = 0;
494 494 bf_pool->recycle_head = NULL;
495 495 bf_pool->recycle_tail = NULL;
496 496 bf_pool->live = B_TRUE;
497 497
498 498 mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
499 499 DDI_INTR_PRI(hldev->irqh));
500 500 mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
501 501 DDI_INTR_PRI(hldev->irqh));
502 502
503 503 /*
504 504 * Allocate buffers one by one. If failed, destroy whole pool by
505 505 * call to xgell_rx_destroy_buffer_pool().
506 506 */
507 507
508 508 for (i = 0; i < lldev->config.rx_buffer_total; i++) {
509 509 if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
510 510 (void) xgell_rx_destroy_buffer_pool(ring);
511 511 return (B_FALSE);
512 512 }
513 513
514 514 rx_buffer->next = bf_pool->head;
515 515 bf_pool->head = rx_buffer;
516 516
517 517 bf_pool->total++;
518 518 bf_pool->free++;
519 519 }
520 520
521 521 return (B_TRUE);
522 522 }
523 523
524 524 /*
525 525 * xgell_rx_dtr_replenish
526 526 *
527 527 * Replenish descriptor with rx_buffer in RX buffer pool.
528 528 * The dtr should be post right away.
529 529 */
530 530 xge_hal_status_e
531 531 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
532 532 void *userdata, xge_hal_channel_reopen_e reopen)
533 533 {
534 534 xgell_rx_ring_t *ring = userdata;
535 535 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
536 536 xgell_rx_buffer_t *rx_buffer;
537 537 xgell_rxd_priv_t *rxd_priv;
538 538
539 539 mutex_enter(&bf_pool->pool_lock);
540 540 if (bf_pool->head == NULL) {
541 541 xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
542 542 return (XGE_HAL_FAIL);
543 543 }
544 544 rx_buffer = bf_pool->head;
545 545 xge_assert(rx_buffer);
546 546 xge_assert(rx_buffer->dma_addr);
547 547
548 548 bf_pool->head = rx_buffer->next;
549 549 bf_pool->free--;
550 550 mutex_exit(&bf_pool->pool_lock);
551 551
552 552 rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
553 553 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
554 554
555 555 rxd_priv->rx_buffer = rx_buffer;
556 556
557 557 return (XGE_HAL_OK);
558 558 }
559 559
560 560 /*
561 561 * xgell_get_ip_offset
562 562 *
563 563 * Calculate the offset to IP header.
564 564 */
565 565 static inline int
566 566 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
567 567 {
568 568 int ip_off;
569 569
570 570 /* get IP-header offset */
571 571 switch (ext_info->frame) {
572 572 case XGE_HAL_FRAME_TYPE_DIX:
573 573 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
574 574 break;
575 575 case XGE_HAL_FRAME_TYPE_IPX:
576 576 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
577 577 XGE_HAL_HEADER_802_2_SIZE +
578 578 XGE_HAL_HEADER_SNAP_SIZE);
579 579 break;
580 580 case XGE_HAL_FRAME_TYPE_LLC:
581 581 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
582 582 XGE_HAL_HEADER_802_2_SIZE);
583 583 break;
584 584 case XGE_HAL_FRAME_TYPE_SNAP:
585 585 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
586 586 XGE_HAL_HEADER_SNAP_SIZE);
587 587 break;
588 588 default:
589 589 ip_off = 0;
590 590 break;
591 591 }
592 592
593 593 if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
594 594 ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
595 595 (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
596 596 ip_off += XGE_HAL_HEADER_VLAN_SIZE;
597 597 }
598 598
599 599 return (ip_off);
600 600 }
601 601
602 602 /*
603 603 * xgell_rx_hcksum_assoc
604 604 *
605 605 * Judge the packet type and then call to hcksum_assoc() to associate
606 606 * h/w checksum information.
607 607 */
608 608 static inline void
609 609 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
610 610 xge_hal_dtr_info_t *ext_info)
611 611 {
612 612 int cksum_flags = 0;
613 613
614 614 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
615 615 if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
616 616 if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
617 617 cksum_flags |= HCK_IPV4_HDRCKSUM_OK;
618 618 }
619 619 if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
620 620 cksum_flags |= HCK_FULLCKSUM_OK;
621 621 }
622 622 if (cksum_flags != 0) {
623 623 mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
624 624 }
625 625 }
626 626 } else if (ext_info->proto &
627 627 (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
628 628 /*
629 629 * Just pass the partial cksum up to IP.
630 630 */
631 631 int ip_off = xgell_get_ip_offset(ext_info);
632 632 int start, end = pkt_length - ip_off;
633 633
634 634 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
635 635 struct ip *ip =
636 636 (struct ip *)(vaddr + ip_off);
637 637 start = ip->ip_hl * 4;
638 638 } else {
639 639 start = 40;
640 640 }
641 641 cksum_flags |= HCK_PARTIALCKSUM;
642 642 mac_hcksum_set(mp, start, 0, end,
643 643 ntohs(ext_info->l4_cksum), cksum_flags);
644 644 }
645 645 }
646 646
647 647 /*
648 648 * xgell_rx_1b_msg_alloc
649 649 *
650 650 * Allocate message header for data buffer, and decide if copy the packet to
651 651 * new data buffer to release big rx_buffer to save memory.
652 652 *
653 653 * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
654 654 * new message and copy the payload in.
655 655 */
656 656 static mblk_t *
657 657 xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
658 658 int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
659 659 {
660 660 xgelldev_t *lldev = ring->lldev;
661 661 mblk_t *mp;
662 662 char *vaddr;
663 663
664 664 vaddr = (char *)rx_buffer->vaddr + HEADROOM;
665 665 /*
666 666 * Copy packet into new allocated message buffer, if pkt_length
667 667 * is less than XGELL_RX_DMA_LOWAT
668 668 */
669 669 if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
670 670 if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
671 671 return (NULL);
672 672 }
673 673 mp->b_rptr += HEADROOM;
674 674 bcopy(vaddr, mp->b_rptr, pkt_length);
675 675 mp->b_wptr = mp->b_rptr + pkt_length;
676 676 *copyit = B_TRUE;
677 677 return (mp);
678 678 }
679 679
680 680 /*
681 681 * Just allocate mblk for current data buffer
682 682 */
683 683 if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
684 684 &rx_buffer->frtn)) == NULL) {
685 685 /* Drop it */
686 686 return (NULL);
687 687 }
688 688 /*
689 689 * Adjust the b_rptr/b_wptr in the mblk_t structure.
690 690 */
691 691 mp->b_wptr += pkt_length;
692 692
693 693 return (mp);
694 694 }
695 695
696 696 /*
697 697 * xgell_rx_1b_callback
698 698 *
699 699 * If the interrupt is because of a received frame or if the receive ring
700 700 * contains fresh as yet un-processed frames, this function is called.
701 701 */
702 702 static xge_hal_status_e
703 703 xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
704 704 void *userdata)
705 705 {
706 706 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
707 707 xgelldev_t *lldev = ring->lldev;
708 708 xgell_rx_buffer_t *rx_buffer;
709 709 mblk_t *mp_head = NULL;
710 710 mblk_t *mp_end = NULL;
711 711 int pkt_burst = 0;
712 712
713 713 xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
714 714
715 715 mutex_enter(&ring->bf_pool.pool_lock);
716 716 do {
717 717 int pkt_length;
718 718 dma_addr_t dma_data;
719 719 mblk_t *mp;
720 720 boolean_t copyit = B_FALSE;
721 721
722 722 xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
723 723 xge_hal_ring_dtr_private(channelh, dtr));
724 724 xge_hal_dtr_info_t ext_info;
725 725
726 726 rx_buffer = rxd_priv->rx_buffer;
727 727
728 728 xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
729 729 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
730 730
731 731 xge_assert(dma_data == rx_buffer->dma_addr);
732 732
733 733 if (t_code != 0) {
734 734 xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
735 735 " completed due to error t_code %01x", XGELL_IFNAME,
736 736 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
737 737
738 738 (void) xge_hal_device_handle_tcode(channelh, dtr,
739 739 t_code);
740 740 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
741 741 xgell_rx_buffer_release(rx_buffer);
742 742 continue;
743 743 }
744 744
745 745 /*
746 746 * Sync the DMA memory
747 747 */
748 748 if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
749 749 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
750 750 xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
751 751 XGELL_IFNAME, lldev->instance);
752 752 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
753 753 xgell_rx_buffer_release(rx_buffer);
754 754 continue;
755 755 }
756 756
757 757 /*
758 758 * Allocate message for the packet.
759 759 */
760 760 if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
761 761 copyit = B_TRUE;
762 762 } else {
763 763 copyit = B_FALSE;
764 764 }
765 765
766 766 mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
767 767 &ext_info, ©it);
768 768
769 769 xge_hal_ring_dtr_free(channelh, dtr);
770 770
771 771 /*
772 772 * Release the buffer and recycle it later
773 773 */
774 774 if ((mp == NULL) || copyit) {
775 775 xgell_rx_buffer_release(rx_buffer);
776 776 } else {
777 777 /*
778 778 * Count it since the buffer should be loaned up.
779 779 */
780 780 ring->bf_pool.post++;
781 781 }
782 782 if (mp == NULL) {
783 783 xge_debug_ll(XGE_ERR,
784 784 "%s%d: rx: can not allocate mp mblk",
785 785 XGELL_IFNAME, lldev->instance);
786 786 continue;
787 787 }
788 788
789 789 /*
790 790 * Associate cksum_flags per packet type and h/w
791 791 * cksum flags.
792 792 */
793 793 xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
794 794 pkt_length, &ext_info);
795 795
796 796 ring->rx_pkts++;
797 797 ring->rx_bytes += pkt_length;
798 798
799 799 if (mp_head == NULL) {
800 800 mp_head = mp;
801 801 mp_end = mp;
802 802 } else {
803 803 mp_end->b_next = mp;
804 804 mp_end = mp;
805 805 }
806 806
807 807 /*
808 808 * Inlined implemented polling function.
809 809 */
810 810 if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
811 811 ring->poll_mp = mp_head;
812 812 }
813 813 if (ring->poll_mp != NULL) {
814 814 if ((ring->poll_bytes -= pkt_length) <= 0) {
815 815 /* have polled enough packets. */
816 816 break;
817 817 } else {
818 818 /* continue polling packets. */
819 819 continue;
820 820 }
821 821 }
822 822
823 823 /*
824 824 * We're not in polling mode, so try to chain more messages
825 825 * or send the chain up according to pkt_burst.
826 826 */
827 827 if (++pkt_burst < lldev->config.rx_pkt_burst)
828 828 continue;
829 829
830 830 if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
831 831 /* Replenish rx buffers */
832 832 xgell_rx_buffer_replenish_all(ring);
833 833 }
834 834 mutex_exit(&ring->bf_pool.pool_lock);
835 835 if (mp_head != NULL) {
836 836 mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
837 837 ring->ring_gen_num);
838 838 }
839 839 mp_head = mp_end = NULL;
840 840 pkt_burst = 0;
841 841 mutex_enter(&ring->bf_pool.pool_lock);
842 842
843 843 } while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
844 844 XGE_HAL_OK);
845 845
846 846 /*
847 847 * Always call replenish_all to recycle rx_buffers.
848 848 */
849 849 xgell_rx_buffer_replenish_all(ring);
850 850 mutex_exit(&ring->bf_pool.pool_lock);
851 851
852 852 /*
853 853 * If we're not in polling cycle, call mac_rx(), otherwise
854 854 * just return while leaving packets chained to ring->poll_mp.
855 855 */
856 856 if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
857 857 mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
858 858 ring->ring_gen_num);
859 859 }
860 860
861 861 return (XGE_HAL_OK);
862 862 }
863 863
864 864 mblk_t *
865 865 xgell_rx_poll(void *arg, int bytes_to_pickup)
866 866 {
867 867 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
868 868 int got_rx = 0;
869 869 mblk_t *mp;
870 870
871 871 xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
872 872
873 873 ring->poll_mp = NULL;
874 874 ring->poll_bytes = bytes_to_pickup;
875 875 (void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
876 876
877 877 mp = ring->poll_mp;
878 878 ring->poll_bytes = -1;
879 879 ring->polled_bytes += got_rx;
880 880 ring->poll_mp = NULL;
881 881
882 882 return (mp);
883 883 }
884 884
885 885 /*
886 886 * xgell_xmit_compl
887 887 *
888 888 * If an interrupt was raised to indicate DMA complete of the Tx packet,
889 889 * this function is called. It identifies the last TxD whose buffer was
890 890 * freed and frees all skbs whose data have already DMA'ed into the NICs
891 891 * internal memory.
892 892 */
893 893 static xge_hal_status_e
894 894 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
895 895 void *userdata)
896 896 {
897 897 xgell_tx_ring_t *ring = userdata;
898 898 xgelldev_t *lldev = ring->lldev;
899 899
900 900 do {
901 901 xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
902 902 xge_hal_fifo_dtr_private(dtr));
903 903 int i;
904 904
905 905 if (t_code) {
906 906 xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
907 907 " completed due to error t_code %01x", XGELL_IFNAME,
908 908 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
909 909
910 910 (void) xge_hal_device_handle_tcode(channelh, dtr,
911 911 t_code);
912 912 }
913 913
914 914 for (i = 0; i < txd_priv->handle_cnt; i++) {
915 915 if (txd_priv->dma_handles[i] != NULL) {
916 916 xge_assert(txd_priv->dma_handles[i]);
917 917 (void) ddi_dma_unbind_handle(
918 918 txd_priv->dma_handles[i]);
919 919 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
920 920 txd_priv->dma_handles[i] = 0;
921 921 }
922 922 }
923 923 txd_priv->handle_cnt = 0;
924 924
925 925 xge_hal_fifo_dtr_free(channelh, dtr);
926 926
927 927 if (txd_priv->mblk != NULL) {
928 928 freemsg(txd_priv->mblk);
929 929 txd_priv->mblk = NULL;
930 930 }
931 931
932 932 } while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
933 933 XGE_HAL_OK);
934 934
935 935 if (ring->need_resched)
936 936 mac_tx_ring_update(lldev->mh, ring->ring_handle);
937 937
938 938 return (XGE_HAL_OK);
939 939 }
940 940
941 941 mblk_t *
942 942 xgell_ring_tx(void *arg, mblk_t *mp)
943 943 {
944 944 xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
945 945 mblk_t *bp;
946 946 xgelldev_t *lldev = ring->lldev;
947 947 xge_hal_device_t *hldev = lldev->devh;
948 948 xge_hal_status_e status;
949 949 xge_hal_dtr_h dtr;
950 950 xgell_txd_priv_t *txd_priv;
951 951 uint32_t hckflags;
952 952 uint32_t lsoflags;
953 953 uint32_t mss;
954 954 int handle_cnt, frag_cnt, ret, i, copied;
955 955 boolean_t used_copy;
956 956 uint64_t sent_bytes;
957 957
958 958 _begin:
959 959 handle_cnt = frag_cnt = 0;
960 960 sent_bytes = 0;
961 961
962 962 if (!lldev->is_initialized || lldev->in_reset)
963 963 return (mp);
964 964
965 965 /*
966 966 * If the free Tx dtrs count reaches the lower threshold,
967 967 * inform the gld to stop sending more packets till the free
968 968 * dtrs count exceeds higher threshold. Driver informs the
969 969 * gld through gld_sched call, when the free dtrs count exceeds
970 970 * the higher threshold.
971 971 */
972 972 if (xge_hal_channel_dtr_count(ring->channelh)
973 973 <= XGELL_TX_LEVEL_LOW) {
974 974 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
975 975 "free descriptors count at low threshold %d",
976 976 XGELL_IFNAME, lldev->instance,
977 977 ((xge_hal_channel_t *)ring->channelh)->post_qid,
978 978 XGELL_TX_LEVEL_LOW);
979 979 goto _exit;
980 980 }
981 981
982 982 status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
983 983 if (status != XGE_HAL_OK) {
984 984 switch (status) {
985 985 case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
986 986 xge_debug_ll(XGE_ERR,
987 987 "%s%d: channel %d is not ready.", XGELL_IFNAME,
988 988 lldev->instance,
989 989 ((xge_hal_channel_t *)
990 990 ring->channelh)->post_qid);
991 991 goto _exit;
992 992 case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
993 993 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
994 994 " out of descriptors.", XGELL_IFNAME,
995 995 lldev->instance,
996 996 ((xge_hal_channel_t *)
997 997 ring->channelh)->post_qid);
998 998 goto _exit;
999 999 default:
1000 1000 return (mp);
1001 1001 }
1002 1002 }
1003 1003
1004 1004 txd_priv = xge_hal_fifo_dtr_private(dtr);
1005 1005 txd_priv->mblk = mp;
1006 1006
1007 1007 /*
1008 1008 * VLAN tag should be passed down along with MAC header, so h/w needn't
1009 1009 * do insertion.
1010 1010 *
1011 1011 * For NIC driver that has to strip and re-insert VLAN tag, the example
1012 1012 * is the other implementation for xge. The driver can simple bcopy()
1013 1013 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1014 1014 * automatically, since it's impossible that GLD sends down mp(s) with
1015 1015 * splited ether_vlan_header.
1016 1016 *
1017 1017 * struct ether_vlan_header *evhp;
1018 1018 * uint16_t tci;
1019 1019 *
1020 1020 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1021 1021 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1022 1022 * tci = ntohs(evhp->ether_tci);
1023 1023 * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1024 1024 * 2 * ETHERADDRL);
1025 1025 * mp->b_rptr += VLAN_TAGSZ;
1026 1026 *
1027 1027 * xge_hal_fifo_dtr_vlan_set(dtr, tci);
1028 1028 * }
1029 1029 */
1030 1030
1031 1031 copied = 0;
1032 1032 used_copy = B_FALSE;
1033 1033 for (bp = mp; bp != NULL; bp = bp->b_cont) {
1034 1034 int mblen;
1035 1035 uint_t ncookies;
1036 1036 ddi_dma_cookie_t dma_cookie;
1037 1037 ddi_dma_handle_t dma_handle;
1038 1038
1039 1039 /* skip zero-length message blocks */
1040 1040 mblen = MBLKL(bp);
1041 1041 if (mblen == 0) {
1042 1042 continue;
1043 1043 }
1044 1044
1045 1045 sent_bytes += mblen;
1046 1046
1047 1047 /*
1048 1048 * Check the message length to decide to DMA or bcopy() data
1049 1049 * to tx descriptor(s).
1050 1050 */
1051 1051 if (mblen < lldev->config.tx_dma_lowat &&
1052 1052 (copied + mblen) < lldev->tx_copied_max) {
1053 1053 xge_hal_status_e rc;
1054 1054 rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
1055 1055 dtr, bp->b_rptr, mblen);
1056 1056 if (rc == XGE_HAL_OK) {
1057 1057 used_copy = B_TRUE;
1058 1058 copied += mblen;
1059 1059 continue;
1060 1060 } else if (used_copy) {
1061 1061 xge_hal_fifo_dtr_buffer_finalize(
1062 1062 ring->channelh, dtr, frag_cnt++);
1063 1063 used_copy = B_FALSE;
1064 1064 }
1065 1065 } else if (used_copy) {
1066 1066 xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
1067 1067 dtr, frag_cnt++);
1068 1068 used_copy = B_FALSE;
1069 1069 }
1070 1070
1071 1071 ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1072 1072 DDI_DMA_DONTWAIT, 0, &dma_handle);
1073 1073 if (ret != DDI_SUCCESS) {
1074 1074 xge_debug_ll(XGE_ERR,
1075 1075 "%s%d: can not allocate dma handle", XGELL_IFNAME,
1076 1076 lldev->instance);
1077 1077 goto _exit_cleanup;
1078 1078 }
1079 1079
1080 1080 ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1081 1081 (caddr_t)bp->b_rptr, mblen,
1082 1082 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1083 1083 &dma_cookie, &ncookies);
1084 1084
1085 1085 switch (ret) {
1086 1086 case DDI_DMA_MAPPED:
1087 1087 /* everything's fine */
1088 1088 break;
1089 1089
1090 1090 case DDI_DMA_NORESOURCES:
1091 1091 xge_debug_ll(XGE_ERR,
1092 1092 "%s%d: can not bind dma address",
1093 1093 XGELL_IFNAME, lldev->instance);
1094 1094 ddi_dma_free_handle(&dma_handle);
1095 1095 goto _exit_cleanup;
1096 1096
1097 1097 case DDI_DMA_NOMAPPING:
1098 1098 case DDI_DMA_INUSE:
1099 1099 case DDI_DMA_TOOBIG:
1100 1100 default:
1101 1101 /* drop packet, don't retry */
1102 1102 xge_debug_ll(XGE_ERR,
1103 1103 "%s%d: can not map message buffer",
1104 1104 XGELL_IFNAME, lldev->instance);
1105 1105 ddi_dma_free_handle(&dma_handle);
1106 1106 goto _exit_cleanup;
1107 1107 }
1108 1108
1109 1109 if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1110 1110 xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1111 1111 "requested c:%d+f:%d", XGELL_IFNAME,
1112 1112 lldev->instance, ncookies, frag_cnt);
1113 1113 (void) ddi_dma_unbind_handle(dma_handle);
1114 1114 ddi_dma_free_handle(&dma_handle);
1115 1115 goto _exit_cleanup;
1116 1116 }
1117 1117
1118 1118 /* setup the descriptors for this data buffer */
1119 1119 while (ncookies) {
1120 1120 xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1121 1121 frag_cnt++, dma_cookie.dmac_laddress,
1122 1122 dma_cookie.dmac_size);
1123 1123 if (--ncookies) {
1124 1124 ddi_dma_nextcookie(dma_handle, &dma_cookie);
1125 1125 }
1126 1126
1127 1127 }
1128 1128
1129 1129 txd_priv->dma_handles[handle_cnt++] = dma_handle;
1130 1130
1131 1131 if (bp->b_cont &&
1132 1132 (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1133 1133 hldev->config.fifo.max_frags)) {
1134 1134 mblk_t *nmp;
1135 1135
1136 1136 xge_debug_ll(XGE_TRACE,
1137 1137 "too many FRAGs [%d], pull up them", frag_cnt);
1138 1138
1139 1139 if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1140 1140 /* Drop packet, don't retry */
1141 1141 xge_debug_ll(XGE_ERR,
1142 1142 "%s%d: can not pullup message buffer",
1143 1143 XGELL_IFNAME, lldev->instance);
1144 1144 goto _exit_cleanup;
1145 1145 }
1146 1146 freemsg(bp->b_cont);
1147 1147 bp->b_cont = nmp;
1148 1148 }
1149 1149 }
1150 1150
1151 1151 /* finalize unfinished copies */
1152 1152 if (used_copy) {
1153 1153 xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
1154 1154 frag_cnt++);
1155 1155 }
1156 1156
1157 1157 txd_priv->handle_cnt = handle_cnt;
1158 1158
1159 1159 /*
1160 1160 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1161 1161 * do all necessary work.
1162 1162 */
1163 1163 mac_lso_get(mp, &mss, &lsoflags);
1164 1164
1165 1165 if (lsoflags & HW_LSO) {
1166 1166 xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
1167 1167 xge_hal_fifo_dtr_mss_set(dtr, mss);
1168 1168 }
1169 1169
1170 1170 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
1171 1171 if (hckflags & HCK_IPV4_HDRCKSUM) {
1172 1172 xge_hal_fifo_dtr_cksum_set_bits(dtr,
↓ open down ↓ |
1172 lines elided |
↑ open up ↑ |
1173 1173 XGE_HAL_TXD_TX_CKO_IPV4_EN);
1174 1174 }
1175 1175 if (hckflags & HCK_FULLCKSUM) {
1176 1176 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1177 1177 XGE_HAL_TXD_TX_CKO_UDP_EN);
1178 1178 }
1179 1179
1180 1180 xge_hal_fifo_dtr_post(ring->channelh, dtr);
1181 1181
1182 1182 /* Update per-ring tx statistics */
1183 - atomic_add_64(&ring->tx_pkts, 1);
1183 + atomic_inc_64(&ring->tx_pkts);
1184 1184 atomic_add_64(&ring->tx_bytes, sent_bytes);
1185 1185
1186 1186 return (NULL);
1187 1187
1188 1188 _exit_cleanup:
1189 1189 /*
1190 1190 * Could not successfully transmit but have changed the message,
1191 1191 * so just free it and return NULL
1192 1192 */
1193 1193 for (i = 0; i < handle_cnt; i++) {
1194 1194 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1195 1195 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1196 1196 txd_priv->dma_handles[i] = 0;
1197 1197 }
1198 1198
1199 1199 xge_hal_fifo_dtr_free(ring->channelh, dtr);
1200 1200
1201 1201 freemsg(mp);
1202 1202 return (NULL);
1203 1203
1204 1204 _exit:
1205 1205 ring->need_resched = B_TRUE;
1206 1206 return (mp);
1207 1207 }
1208 1208
1209 1209 /*
1210 1210 * xgell_ring_macaddr_init
1211 1211 */
1212 1212 static void
1213 1213 xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1214 1214 {
1215 1215 int i;
1216 1216 xgelldev_t *lldev = ring->lldev;
1217 1217 xge_hal_device_t *hldev = lldev->devh;
1218 1218 int slot_start;
1219 1219
1220 1220 xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1221 1221
1222 1222 ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1223 1223 ring->mmac.naddrfree = ring->mmac.naddr;
1224 1224
1225 1225 /*
1226 1226 * For the default rx ring, the first MAC address is the factory one.
1227 1227 * This will be set by the framework, so need to clear it for now.
1228 1228 */
1229 1229 (void) xge_hal_device_macaddr_clear(hldev, 0);
1230 1230
1231 1231 /*
1232 1232 * Read the MAC address Configuration Memory from HAL.
1233 1233 * The first slot will hold a factory MAC address, contents in other
1234 1234 * slots will be FF:FF:FF:FF:FF:FF.
1235 1235 */
1236 1236 slot_start = ring->index * 32;
1237 1237 for (i = 0; i < ring->mmac.naddr; i++) {
1238 1238 (void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1239 1239 ring->mmac.mac_addr + i);
1240 1240 ring->mmac.mac_addr_set[i] = B_FALSE;
1241 1241 }
1242 1242 }
1243 1243
1244 1244 static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1245 1245
1246 1246 static int
1247 1247 xgell_addmac(void *arg, const uint8_t *mac_addr)
1248 1248 {
1249 1249 xgell_rx_ring_t *ring = arg;
1250 1250 xgelldev_t *lldev = ring->lldev;
1251 1251 xge_hal_device_t *hldev = lldev->devh;
1252 1252 int slot;
1253 1253 int slot_start;
1254 1254
1255 1255 xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1256 1256
1257 1257 mutex_enter(&lldev->genlock);
1258 1258
1259 1259 if (ring->mmac.naddrfree == 0) {
1260 1260 mutex_exit(&lldev->genlock);
1261 1261 return (ENOSPC);
1262 1262 }
1263 1263
1264 1264 /* First slot is for factory MAC address */
1265 1265 for (slot = 0; slot < ring->mmac.naddr; slot++) {
1266 1266 if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1267 1267 break;
1268 1268 }
1269 1269 }
1270 1270
1271 1271 ASSERT(slot < ring->mmac.naddr);
1272 1272
1273 1273 slot_start = ring->index * 32;
1274 1274
1275 1275 if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1276 1276 0) {
1277 1277 mutex_exit(&lldev->genlock);
1278 1278 return (EIO);
1279 1279 }
1280 1280
1281 1281 /* Simply enable RTS for the whole section. */
1282 1282 (void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1283 1283
1284 1284 /*
1285 1285 * Read back the MAC address from HAL to keep the array up to date.
1286 1286 */
1287 1287 if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1288 1288 ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1289 1289 (void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1290 1290 return (EIO);
1291 1291 }
1292 1292
1293 1293 ring->mmac.mac_addr_set[slot] = B_TRUE;
1294 1294 ring->mmac.naddrfree--;
1295 1295
1296 1296 mutex_exit(&lldev->genlock);
1297 1297
1298 1298 return (0);
1299 1299 }
1300 1300
1301 1301 static int
1302 1302 xgell_remmac(void *arg, const uint8_t *mac_addr)
1303 1303 {
1304 1304 xgell_rx_ring_t *ring = arg;
1305 1305 xgelldev_t *lldev = ring->lldev;
1306 1306 xge_hal_device_t *hldev = lldev->devh;
1307 1307 xge_hal_status_e status;
1308 1308 int slot;
1309 1309 int slot_start;
1310 1310
1311 1311 xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1312 1312
1313 1313 slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1314 1314 if (slot == -1)
1315 1315 return (EINVAL);
1316 1316
1317 1317 slot_start = ring->index * 32;
1318 1318
1319 1319 /*
1320 1320 * Adjust slot to the offset in the MAC array of this ring (group).
1321 1321 */
1322 1322 slot -= slot_start;
1323 1323
1324 1324 /*
1325 1325 * Only can remove a pre-set MAC address for this ring (group).
1326 1326 */
1327 1327 if (slot < 0 || slot >= ring->mmac.naddr)
1328 1328 return (EINVAL);
1329 1329
1330 1330
1331 1331 xge_assert(ring->mmac.mac_addr_set[slot]);
1332 1332
1333 1333 mutex_enter(&lldev->genlock);
1334 1334 if (!ring->mmac.mac_addr_set[slot]) {
1335 1335 mutex_exit(&lldev->genlock);
1336 1336 /*
1337 1337 * The result will be unexpected when reach here. WARNING!
1338 1338 */
1339 1339 xge_debug_ll(XGE_ERR,
1340 1340 "%s%d: caller is trying to remove an unset MAC address",
1341 1341 XGELL_IFNAME, lldev->instance);
1342 1342 return (ENXIO);
1343 1343 }
1344 1344
1345 1345 status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1346 1346 if (status != XGE_HAL_OK) {
1347 1347 mutex_exit(&lldev->genlock);
1348 1348 return (EIO);
1349 1349 }
1350 1350
1351 1351 ring->mmac.mac_addr_set[slot] = B_FALSE;
1352 1352 ring->mmac.naddrfree++;
1353 1353
1354 1354 /*
1355 1355 * TODO: Disable MAC RTS if all addresses have been cleared.
1356 1356 */
1357 1357
1358 1358 /*
1359 1359 * Read back the MAC address from HAL to keep the array up to date.
1360 1360 */
1361 1361 (void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1362 1362 ring->mmac.mac_addr + slot);
1363 1363 mutex_exit(&lldev->genlock);
1364 1364
1365 1365 return (0);
1366 1366 }
1367 1367
1368 1368 /*
1369 1369 * Temporarily calling hal function.
1370 1370 *
1371 1371 * With MSI-X implementation, no lock is needed, so that the interrupt
1372 1372 * handling could be faster.
1373 1373 */
1374 1374 int
1375 1375 xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1376 1376 {
1377 1377 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1378 1378
1379 1379 mutex_enter(&ring->ring_lock);
1380 1380 xge_hal_device_rx_channel_disable_polling(ring->channelh);
1381 1381 mutex_exit(&ring->ring_lock);
1382 1382
1383 1383 return (0);
1384 1384 }
1385 1385
1386 1386 int
1387 1387 xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1388 1388 {
1389 1389 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1390 1390
1391 1391 mutex_enter(&ring->ring_lock);
1392 1392 xge_hal_device_rx_channel_enable_polling(ring->channelh);
1393 1393 mutex_exit(&ring->ring_lock);
1394 1394
1395 1395 return (0);
1396 1396 }
1397 1397
1398 1398 static int
1399 1399 xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1400 1400 {
1401 1401 xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1402 1402
1403 1403 rx_ring->ring_gen_num = mr_gen_num;
1404 1404
1405 1405 return (0);
1406 1406 }
1407 1407
1408 1408 /*ARGSUSED*/
1409 1409 static void
1410 1410 xgell_rx_ring_stop(mac_ring_driver_t rh)
1411 1411 {
1412 1412 }
1413 1413
1414 1414 /*ARGSUSED*/
1415 1415 static int
1416 1416 xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1417 1417 {
1418 1418 return (0);
1419 1419 }
1420 1420
1421 1421 /*ARGSUSED*/
1422 1422 static void
1423 1423 xgell_tx_ring_stop(mac_ring_driver_t rh)
1424 1424 {
1425 1425 }
1426 1426
1427 1427 /*
1428 1428 * Callback funtion for MAC layer to register all rings.
1429 1429 *
1430 1430 * Xframe hardware doesn't support grouping explicitly, so the driver needs
1431 1431 * to pretend having resource groups. We may also optionally group all 8 rx
1432 1432 * rings into a single group for increased scalability on CMT architectures,
1433 1433 * or group one rx ring per group for maximum virtualization.
1434 1434 *
1435 1435 * TX grouping is actually done by framework, so, just register all TX
1436 1436 * resources without grouping them.
1437 1437 */
1438 1438 void
1439 1439 xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1440 1440 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1441 1441 {
1442 1442 xgelldev_t *lldev = (xgelldev_t *)arg;
1443 1443 mac_intr_t *mintr;
1444 1444
1445 1445 switch (rtype) {
1446 1446 case MAC_RING_TYPE_RX: {
1447 1447 xgell_rx_ring_t *rx_ring;
1448 1448
1449 1449 xge_assert(index < lldev->init_rx_rings);
1450 1450 xge_assert(rg_index < lldev->init_rx_groups);
1451 1451
1452 1452 /*
1453 1453 * Performance vs. Virtualization
1454 1454 */
1455 1455 if (lldev->init_rx_rings == lldev->init_rx_groups)
1456 1456 rx_ring = lldev->rx_ring + rg_index;
1457 1457 else
1458 1458 rx_ring = lldev->rx_ring + index;
1459 1459
1460 1460 rx_ring->ring_handle = rh;
1461 1461
1462 1462 infop->mri_driver = (mac_ring_driver_t)rx_ring;
1463 1463 infop->mri_start = xgell_rx_ring_start;
1464 1464 infop->mri_stop = xgell_rx_ring_stop;
1465 1465 infop->mri_poll = xgell_rx_poll;
1466 1466 infop->mri_stat = xgell_rx_ring_stat;
1467 1467
1468 1468 mintr = &infop->mri_intr;
1469 1469 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1470 1470 mintr->mi_enable = xgell_rx_ring_intr_enable;
1471 1471 mintr->mi_disable = xgell_rx_ring_intr_disable;
1472 1472
1473 1473 break;
1474 1474 }
1475 1475 case MAC_RING_TYPE_TX: {
1476 1476 xgell_tx_ring_t *tx_ring;
1477 1477
1478 1478 xge_assert(rg_index == -1);
1479 1479
1480 1480 xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1481 1481
1482 1482 tx_ring = lldev->tx_ring + index;
1483 1483 tx_ring->ring_handle = rh;
1484 1484
1485 1485 infop->mri_driver = (mac_ring_driver_t)tx_ring;
1486 1486 infop->mri_start = xgell_tx_ring_start;
1487 1487 infop->mri_stop = xgell_tx_ring_stop;
1488 1488 infop->mri_tx = xgell_ring_tx;
1489 1489 infop->mri_stat = xgell_tx_ring_stat;
1490 1490
1491 1491 break;
1492 1492 }
1493 1493 default:
1494 1494 break;
1495 1495 }
1496 1496 }
1497 1497
1498 1498 void
1499 1499 xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1500 1500 mac_group_info_t *infop, mac_group_handle_t gh)
1501 1501 {
1502 1502 xgelldev_t *lldev = (xgelldev_t *)arg;
1503 1503
1504 1504 switch (rtype) {
1505 1505 case MAC_RING_TYPE_RX: {
1506 1506 xgell_rx_ring_t *rx_ring;
1507 1507
1508 1508 xge_assert(index < lldev->init_rx_groups);
1509 1509
1510 1510 rx_ring = lldev->rx_ring + index;
1511 1511
1512 1512 rx_ring->group_handle = gh;
1513 1513
1514 1514 infop->mgi_driver = (mac_group_driver_t)rx_ring;
1515 1515 infop->mgi_start = NULL;
1516 1516 infop->mgi_stop = NULL;
1517 1517 infop->mgi_addmac = xgell_addmac;
1518 1518 infop->mgi_remmac = xgell_remmac;
1519 1519 infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1520 1520
1521 1521 break;
1522 1522 }
1523 1523 case MAC_RING_TYPE_TX:
1524 1524 xge_assert(0);
1525 1525 break;
1526 1526 default:
1527 1527 break;
1528 1528 }
1529 1529 }
1530 1530
1531 1531 /*
1532 1532 * xgell_macaddr_set
1533 1533 */
1534 1534 static int
1535 1535 xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1536 1536 {
1537 1537 xge_hal_device_t *hldev = lldev->devh;
1538 1538 xge_hal_status_e status;
1539 1539
1540 1540 xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1541 1541
1542 1542 xge_debug_ll(XGE_TRACE,
1543 1543 "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1544 1544 macaddr[0], macaddr[1], macaddr[2],
1545 1545 macaddr[3], macaddr[4], macaddr[5]);
1546 1546
1547 1547 status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1548 1548
1549 1549 if (status != XGE_HAL_OK) {
1550 1550 xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1551 1551 XGELL_IFNAME, lldev->instance);
1552 1552 return (EIO);
1553 1553 }
1554 1554
1555 1555 return (0);
1556 1556 }
1557 1557
1558 1558 /*
1559 1559 * xgell_rx_dtr_term
1560 1560 *
1561 1561 * Function will be called by HAL to terminate all DTRs for
1562 1562 * Ring(s) type of channels.
1563 1563 */
1564 1564 static void
1565 1565 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1566 1566 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1567 1567 {
1568 1568 xgell_rxd_priv_t *rxd_priv =
1569 1569 ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1570 1570 xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1571 1571
1572 1572 if (state == XGE_HAL_DTR_STATE_POSTED) {
1573 1573 xgell_rx_ring_t *ring = rx_buffer->ring;
1574 1574
1575 1575 mutex_enter(&ring->bf_pool.pool_lock);
1576 1576 xge_hal_ring_dtr_free(channelh, dtrh);
1577 1577 xgell_rx_buffer_release(rx_buffer);
1578 1578 mutex_exit(&ring->bf_pool.pool_lock);
1579 1579 }
1580 1580 }
1581 1581
1582 1582 /*
1583 1583 * To open a rx ring.
1584 1584 */
1585 1585 static boolean_t
1586 1586 xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1587 1587 {
1588 1588 xge_hal_status_e status;
1589 1589 xge_hal_channel_attr_t attr;
1590 1590 xgelldev_t *lldev = rx_ring->lldev;
1591 1591 xge_hal_device_t *hldev = lldev->devh;
1592 1592
1593 1593 if (rx_ring->live)
1594 1594 return (B_TRUE);
1595 1595
1596 1596 /* Create the buffer pool first */
1597 1597 if (!xgell_rx_create_buffer_pool(rx_ring)) {
1598 1598 xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1599 1599 rx_ring->index);
1600 1600 return (B_FALSE);
1601 1601 }
1602 1602
1603 1603 /* Default ring initialization */
1604 1604 attr.post_qid = rx_ring->index;
1605 1605 attr.compl_qid = 0;
1606 1606 attr.callback = xgell_rx_1b_callback;
1607 1607 attr.per_dtr_space = sizeof (xgell_rxd_priv_t);
1608 1608 attr.flags = 0;
1609 1609 attr.type = XGE_HAL_CHANNEL_TYPE_RING;
1610 1610 attr.dtr_init = xgell_rx_dtr_replenish;
1611 1611 attr.dtr_term = xgell_rx_dtr_term;
1612 1612 attr.userdata = rx_ring;
1613 1613
1614 1614 status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1615 1615 XGE_HAL_CHANNEL_OC_NORMAL);
1616 1616 if (status != XGE_HAL_OK) {
1617 1617 xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1618 1618 " code %d", XGELL_IFNAME, lldev->instance, status);
1619 1619 (void) xgell_rx_destroy_buffer_pool(rx_ring);
1620 1620 return (B_FALSE);
1621 1621 }
1622 1622
1623 1623 xgell_rx_ring_maddr_init(rx_ring);
1624 1624
1625 1625 mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1626 1626 DDI_INTR_PRI(hldev->irqh));
1627 1627
1628 1628 rx_ring->poll_bytes = -1;
1629 1629 rx_ring->polled_bytes = 0;
1630 1630 rx_ring->poll_mp = NULL;
1631 1631 rx_ring->live = B_TRUE;
1632 1632
1633 1633 xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1634 1634 rx_ring->index);
1635 1635
1636 1636 return (B_TRUE);
1637 1637 }
1638 1638
1639 1639 static void
1640 1640 xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1641 1641 {
1642 1642 if (!rx_ring->live)
1643 1643 return;
1644 1644 xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1645 1645 rx_ring->channelh = NULL;
1646 1646 /* This may not clean up all used buffers, driver will handle it */
1647 1647 if (xgell_rx_destroy_buffer_pool(rx_ring))
1648 1648 rx_ring->live = B_FALSE;
1649 1649
1650 1650 mutex_destroy(&rx_ring->ring_lock);
1651 1651 }
1652 1652
1653 1653 /*
1654 1654 * xgell_rx_open
1655 1655 * @lldev: the link layer object
1656 1656 *
1657 1657 * Initialize and open all RX channels.
1658 1658 */
1659 1659 static boolean_t
1660 1660 xgell_rx_open(xgelldev_t *lldev)
1661 1661 {
1662 1662 xgell_rx_ring_t *rx_ring;
1663 1663 int i;
1664 1664
1665 1665 if (lldev->live_rx_rings != 0)
1666 1666 return (B_TRUE);
1667 1667
1668 1668 lldev->live_rx_rings = 0;
1669 1669
1670 1670 /*
1671 1671 * Initialize all rings
1672 1672 */
1673 1673 for (i = 0; i < lldev->init_rx_rings; i++) {
1674 1674 rx_ring = &lldev->rx_ring[i];
1675 1675 rx_ring->index = i;
1676 1676 rx_ring->lldev = lldev;
1677 1677 rx_ring->live = B_FALSE;
1678 1678
1679 1679 if (!xgell_rx_ring_open(rx_ring))
1680 1680 return (B_FALSE);
1681 1681
1682 1682 lldev->live_rx_rings++;
1683 1683 }
1684 1684
1685 1685 return (B_TRUE);
1686 1686 }
1687 1687
1688 1688 static void
1689 1689 xgell_rx_close(xgelldev_t *lldev)
1690 1690 {
1691 1691 xgell_rx_ring_t *rx_ring;
1692 1692 int i;
1693 1693
1694 1694 if (lldev->live_rx_rings == 0)
1695 1695 return;
1696 1696
1697 1697 /*
1698 1698 * Close all rx rings
1699 1699 */
1700 1700 for (i = 0; i < lldev->init_rx_rings; i++) {
1701 1701 rx_ring = &lldev->rx_ring[i];
1702 1702
1703 1703 if (rx_ring->live) {
1704 1704 xgell_rx_ring_close(rx_ring);
1705 1705 lldev->live_rx_rings--;
1706 1706 }
1707 1707 }
1708 1708
1709 1709 xge_assert(lldev->live_rx_rings == 0);
1710 1710 }
1711 1711
1712 1712 /*
1713 1713 * xgell_tx_term
1714 1714 *
1715 1715 * Function will be called by HAL to terminate all DTRs for
1716 1716 * Fifo(s) type of channels.
1717 1717 */
1718 1718 static void
1719 1719 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1720 1720 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1721 1721 {
1722 1722 xgell_txd_priv_t *txd_priv =
1723 1723 ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1724 1724 mblk_t *mp = txd_priv->mblk;
1725 1725 int i;
1726 1726
1727 1727 /*
1728 1728 * for Tx we must clean up the DTR *only* if it has been
1729 1729 * posted!
1730 1730 */
1731 1731 if (state != XGE_HAL_DTR_STATE_POSTED) {
1732 1732 return;
1733 1733 }
1734 1734
1735 1735 for (i = 0; i < txd_priv->handle_cnt; i++) {
1736 1736 xge_assert(txd_priv->dma_handles[i]);
1737 1737 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1738 1738 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1739 1739 txd_priv->dma_handles[i] = 0;
1740 1740 }
1741 1741
1742 1742 xge_hal_fifo_dtr_free(channelh, dtrh);
1743 1743
1744 1744 if (mp) {
1745 1745 txd_priv->mblk = NULL;
1746 1746 freemsg(mp);
1747 1747 }
1748 1748 }
1749 1749
1750 1750 static boolean_t
1751 1751 xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1752 1752 {
1753 1753 xge_hal_status_e status;
1754 1754 xge_hal_channel_attr_t attr;
1755 1755 xgelldev_t *lldev = tx_ring->lldev;
1756 1756
1757 1757 if (tx_ring->live)
1758 1758 return (B_TRUE);
1759 1759
1760 1760 attr.post_qid = tx_ring->index;
1761 1761 attr.compl_qid = 0;
1762 1762 attr.callback = xgell_xmit_compl;
1763 1763 attr.per_dtr_space = sizeof (xgell_txd_priv_t);
1764 1764 attr.flags = 0;
1765 1765 attr.type = XGE_HAL_CHANNEL_TYPE_FIFO;
1766 1766 attr.dtr_init = NULL;
1767 1767 attr.dtr_term = xgell_tx_term;
1768 1768 attr.userdata = tx_ring;
1769 1769
1770 1770 status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1771 1771 XGE_HAL_CHANNEL_OC_NORMAL);
1772 1772 if (status != XGE_HAL_OK) {
1773 1773 xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1774 1774 "code %d", XGELL_IFNAME, lldev->instance, status);
1775 1775 return (B_FALSE);
1776 1776 }
1777 1777
1778 1778 tx_ring->live = B_TRUE;
1779 1779
1780 1780 return (B_TRUE);
1781 1781 }
1782 1782
1783 1783 static void
1784 1784 xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
1785 1785 {
1786 1786 if (!tx_ring->live)
1787 1787 return;
1788 1788 xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1789 1789 tx_ring->live = B_FALSE;
1790 1790 }
1791 1791
1792 1792 /*
1793 1793 * xgell_tx_open
1794 1794 * @lldev: the link layer object
1795 1795 *
1796 1796 * Initialize and open all TX channels.
1797 1797 */
1798 1798 static boolean_t
1799 1799 xgell_tx_open(xgelldev_t *lldev)
1800 1800 {
1801 1801 xgell_tx_ring_t *tx_ring;
1802 1802 int i;
1803 1803
1804 1804 if (lldev->live_tx_rings != 0)
1805 1805 return (B_TRUE);
1806 1806
1807 1807 lldev->live_tx_rings = 0;
1808 1808
1809 1809 /*
1810 1810 * Enable rings by reserve sequence to match the h/w sequences.
1811 1811 */
1812 1812 for (i = 0; i < lldev->init_tx_rings; i++) {
1813 1813 tx_ring = &lldev->tx_ring[i];
1814 1814 tx_ring->index = i;
1815 1815 tx_ring->lldev = lldev;
1816 1816 tx_ring->live = B_FALSE;
1817 1817
1818 1818 if (!xgell_tx_ring_open(tx_ring))
1819 1819 return (B_FALSE);
1820 1820
1821 1821 lldev->live_tx_rings++;
1822 1822 }
1823 1823
1824 1824 return (B_TRUE);
1825 1825 }
1826 1826
1827 1827 static void
1828 1828 xgell_tx_close(xgelldev_t *lldev)
1829 1829 {
1830 1830 xgell_tx_ring_t *tx_ring;
1831 1831 int i;
1832 1832
1833 1833 if (lldev->live_tx_rings == 0)
1834 1834 return;
1835 1835
1836 1836 /*
1837 1837 * Enable rings by reserve sequence to match the h/w sequences.
1838 1838 */
1839 1839 for (i = 0; i < lldev->init_tx_rings; i++) {
1840 1840 tx_ring = &lldev->tx_ring[i];
1841 1841 if (tx_ring->live) {
1842 1842 xgell_tx_ring_close(tx_ring);
1843 1843 lldev->live_tx_rings--;
1844 1844 }
1845 1845 }
1846 1846 }
1847 1847
1848 1848 static int
1849 1849 xgell_initiate_start(xgelldev_t *lldev)
1850 1850 {
1851 1851 xge_hal_status_e status;
1852 1852 xge_hal_device_t *hldev = lldev->devh;
1853 1853 int maxpkt = hldev->config.mtu;
1854 1854
1855 1855 /* check initial mtu before enabling the device */
1856 1856 status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1857 1857 if (status != XGE_HAL_OK) {
1858 1858 xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1859 1859 XGELL_IFNAME, lldev->instance, maxpkt);
1860 1860 return (EINVAL);
1861 1861 }
1862 1862
1863 1863 /* set initial mtu before enabling the device */
1864 1864 status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1865 1865 if (status != XGE_HAL_OK) {
1866 1866 xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1867 1867 XGELL_IFNAME, lldev->instance, maxpkt);
1868 1868 return (EIO);
1869 1869 }
1870 1870
1871 1871 /* tune jumbo/normal frame UFC counters */
1872 1872 hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1873 1873 (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1874 1874 XGE_HAL_DEFAULT_RX_UFC_B_J :
1875 1875 XGE_HAL_DEFAULT_RX_UFC_B_N;
1876 1876
1877 1877 hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1878 1878 (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1879 1879 XGE_HAL_DEFAULT_RX_UFC_C_J :
1880 1880 XGE_HAL_DEFAULT_RX_UFC_C_N;
1881 1881
1882 1882 /* now, enable the device */
1883 1883 status = xge_hal_device_enable(lldev->devh);
1884 1884 if (status != XGE_HAL_OK) {
1885 1885 xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1886 1886 XGELL_IFNAME, lldev->instance);
1887 1887 return (EIO);
1888 1888 }
1889 1889
1890 1890 if (!xgell_rx_open(lldev)) {
1891 1891 status = xge_hal_device_disable(lldev->devh);
1892 1892 if (status != XGE_HAL_OK) {
1893 1893 u64 adapter_status;
1894 1894 (void) xge_hal_device_status(lldev->devh,
1895 1895 &adapter_status);
1896 1896 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1897 1897 "the device. adaper status 0x%"PRIx64
1898 1898 " returned status %d",
1899 1899 XGELL_IFNAME, lldev->instance,
1900 1900 (uint64_t)adapter_status, status);
1901 1901 }
1902 1902 xgell_rx_close(lldev);
1903 1903 xge_os_mdelay(1500);
1904 1904 return (ENOMEM);
1905 1905 }
1906 1906
1907 1907 if (!xgell_tx_open(lldev)) {
1908 1908 status = xge_hal_device_disable(lldev->devh);
1909 1909 if (status != XGE_HAL_OK) {
1910 1910 u64 adapter_status;
1911 1911 (void) xge_hal_device_status(lldev->devh,
1912 1912 &adapter_status);
1913 1913 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1914 1914 "the device. adaper status 0x%"PRIx64
1915 1915 " returned status %d",
1916 1916 XGELL_IFNAME, lldev->instance,
1917 1917 (uint64_t)adapter_status, status);
1918 1918 }
1919 1919 xgell_tx_close(lldev);
1920 1920 xgell_rx_close(lldev);
1921 1921 xge_os_mdelay(1500);
1922 1922 return (ENOMEM);
1923 1923 }
1924 1924
1925 1925 /* time to enable interrupts */
1926 1926 (void) xge_enable_intrs(lldev);
1927 1927 xge_hal_device_intr_enable(lldev->devh);
1928 1928
1929 1929 lldev->is_initialized = 1;
1930 1930
1931 1931 return (0);
1932 1932 }
1933 1933
1934 1934 static void
1935 1935 xgell_initiate_stop(xgelldev_t *lldev)
1936 1936 {
1937 1937 xge_hal_status_e status;
1938 1938
1939 1939 lldev->is_initialized = 0;
1940 1940
1941 1941 status = xge_hal_device_disable(lldev->devh);
1942 1942 if (status != XGE_HAL_OK) {
1943 1943 u64 adapter_status;
1944 1944 (void) xge_hal_device_status(lldev->devh, &adapter_status);
1945 1945 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1946 1946 "the device. adaper status 0x%"PRIx64" returned status %d",
1947 1947 XGELL_IFNAME, lldev->instance,
1948 1948 (uint64_t)adapter_status, status);
1949 1949 }
1950 1950 xge_hal_device_intr_disable(lldev->devh);
1951 1951 /* disable OS ISR's */
1952 1952 xge_disable_intrs(lldev);
1953 1953
1954 1954 xge_debug_ll(XGE_TRACE, "%s",
1955 1955 "waiting for device irq to become quiescent...");
1956 1956 xge_os_mdelay(1500);
1957 1957
1958 1958 xge_queue_flush(xge_hal_device_queue(lldev->devh));
1959 1959
1960 1960 xgell_rx_close(lldev);
1961 1961 xgell_tx_close(lldev);
1962 1962 }
1963 1963
1964 1964 /*
1965 1965 * xgell_m_start
1966 1966 * @arg: pointer to device private strucutre(hldev)
1967 1967 *
1968 1968 * This function is called by MAC Layer to enable the XFRAME
1969 1969 * firmware to generate interrupts and also prepare the
1970 1970 * driver to call mac_rx for delivering receive packets
1971 1971 * to MAC Layer.
1972 1972 */
1973 1973 static int
1974 1974 xgell_m_start(void *arg)
1975 1975 {
1976 1976 xgelldev_t *lldev = arg;
1977 1977 xge_hal_device_t *hldev = lldev->devh;
1978 1978 int ret;
1979 1979
1980 1980 xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1981 1981 lldev->instance);
1982 1982
1983 1983 mutex_enter(&lldev->genlock);
1984 1984
1985 1985 if (lldev->is_initialized) {
1986 1986 xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1987 1987 XGELL_IFNAME, lldev->instance);
1988 1988 mutex_exit(&lldev->genlock);
1989 1989 return (EINVAL);
1990 1990 }
1991 1991
1992 1992 hldev->terminating = 0;
1993 1993 if (ret = xgell_initiate_start(lldev)) {
1994 1994 mutex_exit(&lldev->genlock);
1995 1995 return (ret);
1996 1996 }
1997 1997
1998 1998 lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1999 1999
2000 2000 mutex_exit(&lldev->genlock);
2001 2001
2002 2002 return (0);
2003 2003 }
2004 2004
2005 2005 /*
2006 2006 * xgell_m_stop
2007 2007 * @arg: pointer to device private data (hldev)
2008 2008 *
2009 2009 * This function is called by the MAC Layer to disable
2010 2010 * the XFRAME firmware for generating any interrupts and
2011 2011 * also stop the driver from calling mac_rx() for
2012 2012 * delivering data packets to the MAC Layer.
2013 2013 */
2014 2014 static void
2015 2015 xgell_m_stop(void *arg)
2016 2016 {
2017 2017 xgelldev_t *lldev = arg;
2018 2018 xge_hal_device_t *hldev = lldev->devh;
2019 2019
2020 2020 xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2021 2021
2022 2022 mutex_enter(&lldev->genlock);
2023 2023 if (!lldev->is_initialized) {
2024 2024 xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2025 2025 mutex_exit(&lldev->genlock);
2026 2026 return;
2027 2027 }
2028 2028
2029 2029 xge_hal_device_terminating(hldev);
2030 2030 xgell_initiate_stop(lldev);
2031 2031
2032 2032 /* reset device */
2033 2033 (void) xge_hal_device_reset(lldev->devh);
2034 2034
2035 2035 mutex_exit(&lldev->genlock);
2036 2036
2037 2037 if (lldev->timeout_id != 0) {
2038 2038 (void) untimeout(lldev->timeout_id);
2039 2039 }
2040 2040
2041 2041 xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2042 2042 }
2043 2043
2044 2044 /*
2045 2045 * xgell_onerr_reset
2046 2046 * @lldev: pointer to xgelldev_t structure
2047 2047 *
2048 2048 * This function is called by HAL Event framework to reset the HW
2049 2049 * This function is must be called with genlock taken.
2050 2050 */
2051 2051 int
2052 2052 xgell_onerr_reset(xgelldev_t *lldev)
2053 2053 {
2054 2054 int rc = 0;
2055 2055
2056 2056 if (!lldev->is_initialized) {
2057 2057 xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2058 2058 XGELL_IFNAME, lldev->instance);
2059 2059 return (rc);
2060 2060 }
2061 2061
2062 2062 lldev->in_reset = 1;
2063 2063 xgell_initiate_stop(lldev);
2064 2064
2065 2065 /* reset device */
2066 2066 (void) xge_hal_device_reset(lldev->devh);
2067 2067
2068 2068 rc = xgell_initiate_start(lldev);
2069 2069 lldev->in_reset = 0;
2070 2070
2071 2071 return (rc);
2072 2072 }
2073 2073
2074 2074 /*
2075 2075 * xgell_m_multicst
2076 2076 * @arg: pointer to device private strucutre(hldev)
2077 2077 * @add:
2078 2078 * @mc_addr:
2079 2079 *
2080 2080 * This function is called by MAC Layer to enable or
2081 2081 * disable device-level reception of specific multicast addresses.
2082 2082 */
2083 2083 static int
2084 2084 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2085 2085 {
2086 2086 xge_hal_status_e status;
2087 2087 xgelldev_t *lldev = (xgelldev_t *)arg;
2088 2088 xge_hal_device_t *hldev = lldev->devh;
2089 2089
2090 2090 xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2091 2091
2092 2092 mutex_enter(&lldev->genlock);
2093 2093
2094 2094 if (!lldev->is_initialized) {
2095 2095 xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2096 2096 XGELL_IFNAME, lldev->instance);
2097 2097 mutex_exit(&lldev->genlock);
2098 2098 return (EIO);
2099 2099 }
2100 2100
2101 2101 /* FIXME: missing HAL functionality: enable_one() */
2102 2102
2103 2103 status = (add) ?
2104 2104 xge_hal_device_mcast_enable(hldev) :
2105 2105 xge_hal_device_mcast_disable(hldev);
2106 2106
2107 2107 if (status != XGE_HAL_OK) {
2108 2108 xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2109 2109 add ? "enable" : "disable", status);
2110 2110 mutex_exit(&lldev->genlock);
2111 2111 return (EIO);
2112 2112 }
2113 2113
2114 2114 mutex_exit(&lldev->genlock);
2115 2115
2116 2116 return (0);
2117 2117 }
2118 2118
2119 2119
2120 2120 /*
2121 2121 * xgell_m_promisc
2122 2122 * @arg: pointer to device private strucutre(hldev)
2123 2123 * @on:
2124 2124 *
2125 2125 * This function is called by MAC Layer to enable or
2126 2126 * disable the reception of all the packets on the medium
2127 2127 */
2128 2128 static int
2129 2129 xgell_m_promisc(void *arg, boolean_t on)
2130 2130 {
2131 2131 xgelldev_t *lldev = (xgelldev_t *)arg;
2132 2132 xge_hal_device_t *hldev = lldev->devh;
2133 2133
2134 2134 mutex_enter(&lldev->genlock);
2135 2135
2136 2136 xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2137 2137
2138 2138 if (!lldev->is_initialized) {
2139 2139 xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2140 2140 XGELL_IFNAME, lldev->instance);
2141 2141 mutex_exit(&lldev->genlock);
2142 2142 return (EIO);
2143 2143 }
2144 2144
2145 2145 if (on) {
2146 2146 xge_hal_device_promisc_enable(hldev);
2147 2147 } else {
2148 2148 xge_hal_device_promisc_disable(hldev);
2149 2149 }
2150 2150
2151 2151 mutex_exit(&lldev->genlock);
2152 2152
2153 2153 return (0);
2154 2154 }
2155 2155
2156 2156 /*
2157 2157 * xgell_m_stat
2158 2158 * @arg: pointer to device private strucutre(hldev)
2159 2159 *
2160 2160 * This function is called by MAC Layer to get network statistics
2161 2161 * from the driver.
2162 2162 */
2163 2163 static int
2164 2164 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2165 2165 {
2166 2166 xge_hal_stats_hw_info_t *hw_info;
2167 2167 xgelldev_t *lldev = (xgelldev_t *)arg;
2168 2168 xge_hal_device_t *hldev = lldev->devh;
2169 2169
2170 2170 xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2171 2171
2172 2172 mutex_enter(&lldev->genlock);
2173 2173
2174 2174 if (!lldev->is_initialized) {
2175 2175 mutex_exit(&lldev->genlock);
2176 2176 return (EAGAIN);
2177 2177 }
2178 2178
2179 2179 if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2180 2180 mutex_exit(&lldev->genlock);
2181 2181 return (EAGAIN);
2182 2182 }
2183 2183
2184 2184 switch (stat) {
2185 2185 case MAC_STAT_IFSPEED:
2186 2186 *val = 10000000000ull; /* 10G */
2187 2187 break;
2188 2188
2189 2189 case MAC_STAT_MULTIRCV:
2190 2190 *val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
2191 2191 hw_info->rmac_vld_mcst_frms;
2192 2192 break;
2193 2193
2194 2194 case MAC_STAT_BRDCSTRCV:
2195 2195 *val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
2196 2196 hw_info->rmac_vld_bcst_frms;
2197 2197 break;
2198 2198
2199 2199 case MAC_STAT_MULTIXMT:
2200 2200 *val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
2201 2201 hw_info->tmac_mcst_frms;
2202 2202 break;
2203 2203
2204 2204 case MAC_STAT_BRDCSTXMT:
2205 2205 *val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
2206 2206 hw_info->tmac_bcst_frms;
2207 2207 break;
2208 2208
2209 2209 case MAC_STAT_RBYTES:
2210 2210 *val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
2211 2211 hw_info->rmac_ttl_octets;
2212 2212 break;
2213 2213
2214 2214 case MAC_STAT_NORCVBUF:
2215 2215 *val = hw_info->rmac_drop_frms;
2216 2216 break;
2217 2217
2218 2218 case MAC_STAT_IERRORS:
2219 2219 *val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
2220 2220 hw_info->rmac_discarded_frms;
2221 2221 break;
2222 2222
2223 2223 case MAC_STAT_OBYTES:
2224 2224 *val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
2225 2225 hw_info->tmac_ttl_octets;
2226 2226 break;
2227 2227
2228 2228 case MAC_STAT_NOXMTBUF:
2229 2229 *val = hw_info->tmac_drop_frms;
2230 2230 break;
2231 2231
2232 2232 case MAC_STAT_OERRORS:
2233 2233 *val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
2234 2234 hw_info->tmac_any_err_frms;
2235 2235 break;
2236 2236
2237 2237 case MAC_STAT_IPACKETS:
2238 2238 *val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
2239 2239 hw_info->rmac_vld_frms;
2240 2240 break;
2241 2241
2242 2242 case MAC_STAT_OPACKETS:
2243 2243 *val = ((u64) hw_info->tmac_frms_oflow << 32) |
2244 2244 hw_info->tmac_frms;
2245 2245 break;
2246 2246
2247 2247 case ETHER_STAT_FCS_ERRORS:
2248 2248 *val = hw_info->rmac_fcs_err_frms;
2249 2249 break;
2250 2250
2251 2251 case ETHER_STAT_TOOLONG_ERRORS:
2252 2252 *val = hw_info->rmac_long_frms;
2253 2253 break;
2254 2254
2255 2255 case ETHER_STAT_LINK_DUPLEX:
2256 2256 *val = LINK_DUPLEX_FULL;
2257 2257 break;
2258 2258
2259 2259 default:
2260 2260 mutex_exit(&lldev->genlock);
2261 2261 return (ENOTSUP);
2262 2262 }
2263 2263
2264 2264 mutex_exit(&lldev->genlock);
2265 2265
2266 2266 return (0);
2267 2267 }
2268 2268
2269 2269 /*
2270 2270 * Retrieve a value for one of the statistics for a particular rx ring
2271 2271 */
2272 2272 int
2273 2273 xgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2274 2274 {
2275 2275 xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
2276 2276
2277 2277 switch (stat) {
2278 2278 case MAC_STAT_RBYTES:
2279 2279 *val = rx_ring->rx_bytes;
2280 2280 break;
2281 2281
2282 2282 case MAC_STAT_IPACKETS:
2283 2283 *val = rx_ring->rx_pkts;
2284 2284 break;
2285 2285
2286 2286 default:
2287 2287 *val = 0;
2288 2288 return (ENOTSUP);
2289 2289 }
2290 2290
2291 2291 return (0);
2292 2292 }
2293 2293
2294 2294 /*
2295 2295 * Retrieve a value for one of the statistics for a particular tx ring
2296 2296 */
2297 2297 int
2298 2298 xgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2299 2299 {
2300 2300 xgell_tx_ring_t *tx_ring = (xgell_tx_ring_t *)rh;
2301 2301
2302 2302 switch (stat) {
2303 2303 case MAC_STAT_OBYTES:
2304 2304 *val = tx_ring->tx_bytes;
2305 2305 break;
2306 2306
2307 2307 case MAC_STAT_OPACKETS:
2308 2308 *val = tx_ring->tx_pkts;
2309 2309 break;
2310 2310
2311 2311 default:
2312 2312 *val = 0;
2313 2313 return (ENOTSUP);
2314 2314 }
2315 2315
2316 2316 return (0);
2317 2317 }
2318 2318
2319 2319 /*
2320 2320 * xgell_device_alloc - Allocate new LL device
2321 2321 */
2322 2322 int
2323 2323 xgell_device_alloc(xge_hal_device_h devh,
2324 2324 dev_info_t *dev_info, xgelldev_t **lldev_out)
2325 2325 {
2326 2326 xgelldev_t *lldev;
2327 2327 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2328 2328 int instance = ddi_get_instance(dev_info);
2329 2329
2330 2330 *lldev_out = NULL;
2331 2331
2332 2332 xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2333 2333 XGELL_IFNAME, instance);
2334 2334
2335 2335 lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2336 2336
2337 2337 lldev->devh = hldev;
2338 2338 lldev->instance = instance;
2339 2339 lldev->dev_info = dev_info;
2340 2340
2341 2341 *lldev_out = lldev;
2342 2342
2343 2343 ddi_set_driver_private(dev_info, (caddr_t)hldev);
2344 2344
2345 2345 return (DDI_SUCCESS);
2346 2346 }
2347 2347
2348 2348 /*
2349 2349 * xgell_device_free
2350 2350 */
2351 2351 void
2352 2352 xgell_device_free(xgelldev_t *lldev)
2353 2353 {
2354 2354 xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2355 2355 XGELL_IFNAME, lldev->instance);
2356 2356
2357 2357 kmem_free(lldev, sizeof (xgelldev_t));
2358 2358 }
2359 2359
2360 2360 /*
2361 2361 * xgell_ioctl
2362 2362 */
2363 2363 static void
2364 2364 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2365 2365 {
2366 2366 xgelldev_t *lldev = arg;
2367 2367 struct iocblk *iocp;
2368 2368 int err = 0;
2369 2369 int cmd;
2370 2370 int need_privilege = 1;
2371 2371 int ret = 0;
2372 2372
2373 2373
2374 2374 iocp = (struct iocblk *)mp->b_rptr;
2375 2375 iocp->ioc_error = 0;
2376 2376 cmd = iocp->ioc_cmd;
2377 2377 xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2378 2378 switch (cmd) {
2379 2379 case ND_GET:
2380 2380 need_privilege = 0;
2381 2381 /* FALLTHRU */
2382 2382 case ND_SET:
2383 2383 break;
2384 2384 default:
2385 2385 xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2386 2386 miocnak(wq, mp, 0, EINVAL);
2387 2387 return;
2388 2388 }
2389 2389
2390 2390 if (need_privilege) {
2391 2391 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2392 2392 if (err != 0) {
2393 2393 xge_debug_ll(XGE_ERR,
2394 2394 "drv_priv(): rejected cmd 0x%x, err %d",
2395 2395 cmd, err);
2396 2396 miocnak(wq, mp, 0, err);
2397 2397 return;
2398 2398 }
2399 2399 }
2400 2400
2401 2401 switch (cmd) {
2402 2402 case ND_GET:
2403 2403 /*
2404 2404 * If nd_getset() returns B_FALSE, the command was
2405 2405 * not valid (e.g. unknown name), so we just tell the
2406 2406 * top-level ioctl code to send a NAK (with code EINVAL).
2407 2407 *
2408 2408 * Otherwise, nd_getset() will have built the reply to
2409 2409 * be sent (but not actually sent it), so we tell the
2410 2410 * caller to send the prepared reply.
2411 2411 */
2412 2412 ret = nd_getset(wq, lldev->ndp, mp);
2413 2413 xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2414 2414 break;
2415 2415
2416 2416 case ND_SET:
2417 2417 ret = nd_getset(wq, lldev->ndp, mp);
2418 2418 xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2419 2419 break;
2420 2420
2421 2421 default:
2422 2422 break;
2423 2423 }
2424 2424
2425 2425 if (ret == B_FALSE) {
2426 2426 xge_debug_ll(XGE_ERR,
2427 2427 "nd_getset(): rejected cmd 0x%x, err %d",
2428 2428 cmd, err);
2429 2429 miocnak(wq, mp, 0, EINVAL);
2430 2430 } else {
2431 2431 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2432 2432 M_IOCACK : M_IOCNAK;
2433 2433 qreply(wq, mp);
2434 2434 }
2435 2435 }
2436 2436
2437 2437
2438 2438 static boolean_t
2439 2439 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2440 2440 {
2441 2441 xgelldev_t *lldev = arg;
2442 2442
2443 2443 xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2444 2444
2445 2445 switch (cap) {
2446 2446 case MAC_CAPAB_HCKSUM: {
2447 2447 uint32_t *hcksum_txflags = cap_data;
2448 2448 *hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2449 2449 HCKSUM_IPHDRCKSUM;
2450 2450 break;
2451 2451 }
2452 2452 case MAC_CAPAB_LSO: {
2453 2453 mac_capab_lso_t *cap_lso = cap_data;
2454 2454
2455 2455 if (lldev->config.lso_enable) {
2456 2456 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2457 2457 cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2458 2458 break;
2459 2459 } else {
2460 2460 return (B_FALSE);
2461 2461 }
2462 2462 }
2463 2463 case MAC_CAPAB_RINGS: {
2464 2464 mac_capab_rings_t *cap_rings = cap_data;
2465 2465
2466 2466 switch (cap_rings->mr_type) {
2467 2467 case MAC_RING_TYPE_RX:
2468 2468 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2469 2469 cap_rings->mr_rnum = lldev->init_rx_rings;
2470 2470 cap_rings->mr_gnum = lldev->init_rx_groups;
2471 2471 cap_rings->mr_rget = xgell_fill_ring;
2472 2472 cap_rings->mr_gget = xgell_fill_group;
2473 2473 break;
2474 2474 case MAC_RING_TYPE_TX:
2475 2475 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2476 2476 cap_rings->mr_rnum = lldev->init_tx_rings;
2477 2477 cap_rings->mr_gnum = 0;
2478 2478 cap_rings->mr_rget = xgell_fill_ring;
2479 2479 cap_rings->mr_gget = NULL;
2480 2480 break;
2481 2481 default:
2482 2482 break;
2483 2483 }
2484 2484 break;
2485 2485 }
2486 2486 default:
2487 2487 return (B_FALSE);
2488 2488 }
2489 2489 return (B_TRUE);
2490 2490 }
2491 2491
2492 2492 static int
2493 2493 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2494 2494 {
2495 2495 xgelldev_t *lldev = (xgelldev_t *)cp;
2496 2496 xge_hal_status_e status;
2497 2497 int count = 0, retsize;
2498 2498 char *buf;
2499 2499
2500 2500 buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2501 2501 if (buf == NULL) {
2502 2502 return (ENOSPC);
2503 2503 }
2504 2504
2505 2505 status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2506 2506 buf, &retsize);
2507 2507 if (status != XGE_HAL_OK) {
2508 2508 kmem_free(buf, XGELL_STATS_BUFSIZE);
2509 2509 xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2510 2510 return (EINVAL);
2511 2511 }
2512 2512 count += retsize;
2513 2513
2514 2514 status = xge_hal_aux_stats_rmac_read(lldev->devh,
2515 2515 XGELL_STATS_BUFSIZE - count,
2516 2516 buf+count, &retsize);
2517 2517 if (status != XGE_HAL_OK) {
2518 2518 kmem_free(buf, XGELL_STATS_BUFSIZE);
2519 2519 xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2520 2520 return (EINVAL);
2521 2521 }
2522 2522 count += retsize;
2523 2523
2524 2524 status = xge_hal_aux_stats_pci_read(lldev->devh,
2525 2525 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2526 2526 if (status != XGE_HAL_OK) {
2527 2527 kmem_free(buf, XGELL_STATS_BUFSIZE);
2528 2528 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2529 2529 return (EINVAL);
2530 2530 }
2531 2531 count += retsize;
2532 2532
2533 2533 status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2534 2534 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2535 2535 if (status != XGE_HAL_OK) {
2536 2536 kmem_free(buf, XGELL_STATS_BUFSIZE);
2537 2537 xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2538 2538 return (EINVAL);
2539 2539 }
2540 2540 count += retsize;
2541 2541
2542 2542 status = xge_hal_aux_stats_hal_read(lldev->devh,
2543 2543 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2544 2544 if (status != XGE_HAL_OK) {
2545 2545 kmem_free(buf, XGELL_STATS_BUFSIZE);
2546 2546 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2547 2547 return (EINVAL);
2548 2548 }
2549 2549 count += retsize;
2550 2550
2551 2551 *(buf + count - 1) = '\0'; /* remove last '\n' */
2552 2552 (void) mi_mpprintf(mp, "%s", buf);
2553 2553 kmem_free(buf, XGELL_STATS_BUFSIZE);
2554 2554
2555 2555 return (0);
2556 2556 }
2557 2557
2558 2558 static int
2559 2559 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2560 2560 {
2561 2561 xgelldev_t *lldev = (xgelldev_t *)cp;
2562 2562 xge_hal_status_e status;
2563 2563 int retsize;
2564 2564 char *buf;
2565 2565
2566 2566 buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2567 2567 if (buf == NULL) {
2568 2568 return (ENOSPC);
2569 2569 }
2570 2570 status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2571 2571 buf, &retsize);
2572 2572 if (status != XGE_HAL_OK) {
2573 2573 kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2574 2574 xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2575 2575 return (EINVAL);
2576 2576 }
2577 2577 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2578 2578 (void) mi_mpprintf(mp, "%s", buf);
2579 2579 kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2580 2580
2581 2581 return (0);
2582 2582 }
2583 2583
2584 2584 static int
2585 2585 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2586 2586 {
2587 2587 xgelldev_t *lldev = (xgelldev_t *)cp;
2588 2588 xge_hal_status_e status;
2589 2589 int retsize;
2590 2590 char *buf;
2591 2591
2592 2592 buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2593 2593 if (buf == NULL) {
2594 2594 return (ENOSPC);
2595 2595 }
2596 2596 status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2597 2597 buf, &retsize);
2598 2598 if (status != XGE_HAL_OK) {
2599 2599 kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2600 2600 xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2601 2601 return (EINVAL);
2602 2602 }
2603 2603 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2604 2604 (void) mi_mpprintf(mp, "%s", buf);
2605 2605 kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2606 2606
2607 2607 return (0);
2608 2608 }
2609 2609
2610 2610 static unsigned long bar0_offset = 0x110; /* adapter_control */
2611 2611
2612 2612 static int
2613 2613 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2614 2614 {
2615 2615 xgelldev_t *lldev = (xgelldev_t *)cp;
2616 2616 xge_hal_status_e status;
2617 2617 int retsize;
2618 2618 char *buf;
2619 2619
2620 2620 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2621 2621 if (buf == NULL) {
2622 2622 return (ENOSPC);
2623 2623 }
2624 2624 status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2625 2625 XGELL_IOCTL_BUFSIZE, buf, &retsize);
2626 2626 if (status != XGE_HAL_OK) {
2627 2627 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2628 2628 xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2629 2629 return (EINVAL);
2630 2630 }
2631 2631 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2632 2632 (void) mi_mpprintf(mp, "%s", buf);
2633 2633 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2634 2634
2635 2635 return (0);
2636 2636 }
2637 2637
2638 2638 static int
2639 2639 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2640 2640 {
2641 2641 unsigned long old_offset = bar0_offset;
2642 2642 char *end;
2643 2643
2644 2644 if (value && *value == '0' &&
2645 2645 (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2646 2646 value += 2;
2647 2647 }
2648 2648
2649 2649 bar0_offset = mi_strtol(value, &end, 16);
2650 2650 if (end == value) {
2651 2651 bar0_offset = old_offset;
2652 2652 return (EINVAL);
2653 2653 }
2654 2654
2655 2655 xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2656 2656
2657 2657 return (0);
2658 2658 }
2659 2659
2660 2660 static int
2661 2661 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2662 2662 {
2663 2663 char *buf;
2664 2664
2665 2665 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2666 2666 if (buf == NULL) {
2667 2667 return (ENOSPC);
2668 2668 }
2669 2669 (void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2670 2670 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2671 2671
2672 2672 return (0);
2673 2673 }
2674 2674
2675 2675 static int
2676 2676 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2677 2677 cred_t *credp)
2678 2678 {
2679 2679 int level;
2680 2680 char *end;
2681 2681
2682 2682 level = mi_strtol(value, &end, 10);
2683 2683 if (level < XGE_NONE || level > XGE_ERR || end == value) {
2684 2684 return (EINVAL);
2685 2685 }
2686 2686
2687 2687 xge_hal_driver_debug_level_set(level);
2688 2688
2689 2689 return (0);
2690 2690 }
2691 2691
2692 2692 static int
2693 2693 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2694 2694 {
2695 2695 char *buf;
2696 2696
2697 2697 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2698 2698 if (buf == NULL) {
2699 2699 return (ENOSPC);
2700 2700 }
2701 2701 (void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2702 2702 xge_hal_driver_debug_module_mask());
2703 2703 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2704 2704
2705 2705 return (0);
2706 2706 }
2707 2707
2708 2708 static int
2709 2709 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2710 2710 cred_t *credp)
2711 2711 {
2712 2712 u32 mask;
2713 2713 char *end;
2714 2714
2715 2715 if (value && *value == '0' &&
2716 2716 (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2717 2717 value += 2;
2718 2718 }
2719 2719
2720 2720 mask = mi_strtol(value, &end, 16);
2721 2721 if (end == value) {
2722 2722 return (EINVAL);
2723 2723 }
2724 2724
2725 2725 xge_hal_driver_debug_module_mask_set(mask);
2726 2726
2727 2727 return (0);
2728 2728 }
2729 2729
2730 2730 static int
2731 2731 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2732 2732 {
2733 2733 xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2734 2734 xge_hal_status_e status;
2735 2735 int retsize;
2736 2736 char *buf;
2737 2737
2738 2738 buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2739 2739 if (buf == NULL) {
2740 2740 return (ENOSPC);
2741 2741 }
2742 2742 status = xge_hal_aux_device_config_read(lldev->devh,
2743 2743 XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2744 2744 if (status != XGE_HAL_OK) {
2745 2745 kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2746 2746 xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2747 2747 status);
2748 2748 return (EINVAL);
2749 2749 }
2750 2750 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2751 2751 (void) mi_mpprintf(mp, "%s", buf);
2752 2752 kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2753 2753
2754 2754 return (0);
2755 2755 }
2756 2756
2757 2757 /*
2758 2758 * xgell_device_register
2759 2759 * @devh: pointer on HAL device
2760 2760 * @config: pointer on this network device configuration
2761 2761 * @ll_out: output pointer. Will be assigned to valid LL device.
2762 2762 *
2763 2763 * This function will allocate and register network device
2764 2764 */
2765 2765 int
2766 2766 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2767 2767 {
2768 2768 mac_register_t *macp = NULL;
2769 2769 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2770 2770
2771 2771 /*
2772 2772 * Initialize some NDD interface for internal debug.
2773 2773 */
2774 2774 if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2775 2775 (caddr_t)lldev) == B_FALSE)
2776 2776 goto xgell_ndd_fail;
2777 2777
2778 2778 if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2779 2779 (caddr_t)lldev) == B_FALSE)
2780 2780 goto xgell_ndd_fail;
2781 2781
2782 2782 if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2783 2783 (caddr_t)lldev) == B_FALSE)
2784 2784 goto xgell_ndd_fail;
2785 2785
2786 2786 if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2787 2787 (caddr_t)lldev) == B_FALSE)
2788 2788 goto xgell_ndd_fail;
2789 2789
2790 2790 if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2791 2791 xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2792 2792 goto xgell_ndd_fail;
2793 2793
2794 2794 if (nd_load(&lldev->ndp, "debug_module_mask",
2795 2795 xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2796 2796 (caddr_t)lldev) == B_FALSE)
2797 2797 goto xgell_ndd_fail;
2798 2798
2799 2799 if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2800 2800 (caddr_t)lldev) == B_FALSE)
2801 2801 goto xgell_ndd_fail;
2802 2802
2803 2803 bcopy(config, &lldev->config, sizeof (xgell_config_t));
2804 2804
2805 2805 mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2806 2806 DDI_INTR_PRI(hldev->irqh));
2807 2807
2808 2808 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2809 2809 goto xgell_register_fail;
2810 2810 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2811 2811 macp->m_driver = lldev;
2812 2812 macp->m_dip = lldev->dev_info;
2813 2813 macp->m_src_addr = hldev->macaddr[0];
2814 2814 macp->m_callbacks = &xgell_m_callbacks;
2815 2815 macp->m_min_sdu = 0;
2816 2816 macp->m_max_sdu = hldev->config.mtu;
2817 2817 macp->m_margin = VLAN_TAGSZ;
2818 2818 macp->m_v12n = MAC_VIRT_LEVEL1;
2819 2819
2820 2820 /*
2821 2821 * MAC Registration.
2822 2822 */
2823 2823 if (mac_register(macp, &lldev->mh) != 0)
2824 2824 goto xgell_register_fail;
2825 2825
2826 2826 /* Always free the macp after register */
2827 2827 if (macp != NULL)
2828 2828 mac_free(macp);
2829 2829
2830 2830 /* Calculate tx_copied_max here ??? */
2831 2831 lldev->tx_copied_max = hldev->config.fifo.max_frags *
2832 2832 hldev->config.fifo.alignment_size *
2833 2833 hldev->config.fifo.max_aligned_frags;
2834 2834
2835 2835 xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2836 2836 XGELL_IFNAME, lldev->instance);
2837 2837
2838 2838 return (DDI_SUCCESS);
2839 2839
2840 2840 xgell_ndd_fail:
2841 2841 nd_free(&lldev->ndp);
2842 2842 xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2843 2843 return (DDI_FAILURE);
2844 2844
2845 2845 xgell_register_fail:
2846 2846 if (macp != NULL)
2847 2847 mac_free(macp);
2848 2848 nd_free(&lldev->ndp);
2849 2849 mutex_destroy(&lldev->genlock);
2850 2850 xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2851 2851 return (DDI_FAILURE);
2852 2852 }
2853 2853
2854 2854 /*
2855 2855 * xgell_device_unregister
2856 2856 * @devh: pointer on HAL device
2857 2857 * @lldev: pointer to valid LL device.
2858 2858 *
2859 2859 * This function will unregister and free network device
2860 2860 */
2861 2861 int
2862 2862 xgell_device_unregister(xgelldev_t *lldev)
2863 2863 {
2864 2864 if (mac_unregister(lldev->mh) != 0) {
2865 2865 xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2866 2866 XGELL_IFNAME, lldev->instance);
2867 2867 return (DDI_FAILURE);
2868 2868 }
2869 2869
2870 2870 mutex_destroy(&lldev->genlock);
2871 2871
2872 2872 nd_free(&lldev->ndp);
2873 2873
2874 2874 xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2875 2875 XGELL_IFNAME, lldev->instance);
2876 2876
2877 2877 return (DDI_SUCCESS);
2878 2878 }
↓ open down ↓ |
1685 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX