Print this page
5976 e1000g use after free on start failure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/e1000g/e1000g_alloc.c
+++ new/usr/src/uts/common/io/e1000g/e1000g_alloc.c
1 1 /*
2 2 * This file is provided under a CDDLv1 license. When using or
3 3 * redistributing this file, you may do so under this license.
4 4 * In redistributing this file this license must be included
5 5 * and no other modification of this header file is permitted.
6 6 *
7 7 * CDDL LICENSE SUMMARY
8 8 *
9 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 10 *
11 11 * The contents of this file are subject to the terms of Version
12 12 * 1.0 of the Common Development and Distribution License (the "License").
13 13 *
14 14 * You should have received a copy of the License with this software.
15 15 * You can obtain a copy of the License at
16 16 * http://www.opensolaris.org/os/licensing.
17 17 * See the License for the specific language governing permissions
18 18 * and limitations under the License.
19 19 */
20 20
21 21 /*
22 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * **********************************************************************
27 27 * Module Name: *
28 28 * e1000g_alloc.c *
29 29 * *
30 30 * Abstract: *
31 31 * This file contains some routines that take care of *
32 32 * memory allocation for descriptors and buffers. *
33 33 * *
34 34 * **********************************************************************
35 35 */
36 36
37 37 #include "e1000g_sw.h"
38 38 #include "e1000g_debug.h"
39 39
40 40 #define TX_SW_PKT_AREA_SZ \
41 41 (sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
42 42
43 43 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
44 44 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t *);
45 45 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
46 46 static void e1000g_free_rx_descriptors(e1000g_rx_data_t *);
47 47 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
48 48 static int e1000g_alloc_rx_packets(e1000g_rx_data_t *);
49 49 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
50 50 static void e1000g_free_rx_packets(e1000g_rx_data_t *, boolean_t);
51 51 static int e1000g_alloc_dma_buffer(struct e1000g *,
52 52 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
53 53
54 54 /*
55 55 * In order to avoid address error crossing 64KB boundary
56 56 * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
57 57 * is used by some necessary adapter types.
58 58 */
59 59 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
60 60 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
61 61 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
62 62 size_t size, size_t *len);
63 63 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
64 64
65 65 static void e1000g_free_dma_buffer(dma_buffer_t *);
66 66 #ifdef __sparc
67 67 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
68 68 static void e1000g_free_dvma_buffer(dma_buffer_t *);
69 69 #endif
70 70 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
71 71 static void e1000g_free_descriptors(struct e1000g *Adapter);
72 72 static int e1000g_alloc_packets(struct e1000g *Adapter);
73 73 static void e1000g_free_packets(struct e1000g *Adapter);
74 74 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *,
75 75 ddi_dma_attr_t *p_dma_attr);
76 76
77 77 /* DMA access attributes for descriptors <Little Endian> */
78 78 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
79 79 DDI_DEVICE_ATTR_V0,
80 80 DDI_STRUCTURE_LE_ACC,
81 81 DDI_STRICTORDER_ACC
82 82 };
83 83
84 84 /* DMA access attributes for DMA buffers */
85 85 #ifdef __sparc
86 86 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
87 87 DDI_DEVICE_ATTR_V0,
88 88 DDI_STRUCTURE_BE_ACC,
89 89 DDI_STRICTORDER_ACC,
90 90 };
91 91 #else
92 92 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
93 93 DDI_DEVICE_ATTR_V0,
94 94 DDI_STRUCTURE_LE_ACC,
95 95 DDI_STRICTORDER_ACC,
96 96 };
97 97 #endif
98 98
99 99 /* DMA attributes for tx mblk buffers */
100 100 static ddi_dma_attr_t e1000g_tx_dma_attr = {
101 101 DMA_ATTR_V0, /* version of this structure */
102 102 0, /* lowest usable address */
103 103 0xffffffffffffffffULL, /* highest usable address */
104 104 0x7fffffff, /* maximum DMAable byte count */
105 105 1, /* alignment in bytes */
106 106 0x7ff, /* burst sizes (any?) */
107 107 1, /* minimum transfer */
108 108 0xffffffffU, /* maximum transfer */
109 109 0xffffffffffffffffULL, /* maximum segment length */
110 110 MAX_COOKIES, /* maximum number of segments */
111 111 1, /* granularity */
112 112 DDI_DMA_FLAGERR, /* dma_attr_flags */
113 113 };
114 114
115 115 /* DMA attributes for pre-allocated rx/tx buffers */
116 116 static ddi_dma_attr_t e1000g_buf_dma_attr = {
117 117 DMA_ATTR_V0, /* version of this structure */
118 118 0, /* lowest usable address */
119 119 0xffffffffffffffffULL, /* highest usable address */
120 120 0x7fffffff, /* maximum DMAable byte count */
121 121 1, /* alignment in bytes */
122 122 0x7ff, /* burst sizes (any?) */
123 123 1, /* minimum transfer */
124 124 0xffffffffU, /* maximum transfer */
125 125 0xffffffffffffffffULL, /* maximum segment length */
126 126 1, /* maximum number of segments */
127 127 1, /* granularity */
128 128 DDI_DMA_FLAGERR, /* dma_attr_flags */
129 129 };
130 130
131 131 /* DMA attributes for rx/tx descriptors */
132 132 static ddi_dma_attr_t e1000g_desc_dma_attr = {
133 133 DMA_ATTR_V0, /* version of this structure */
134 134 0, /* lowest usable address */
135 135 0xffffffffffffffffULL, /* highest usable address */
136 136 0x7fffffff, /* maximum DMAable byte count */
137 137 E1000_MDALIGN, /* default alignment is 4k but can be changed */
138 138 0x7ff, /* burst sizes (any?) */
139 139 1, /* minimum transfer */
140 140 0xffffffffU, /* maximum transfer */
141 141 0xffffffffffffffffULL, /* maximum segment length */
142 142 1, /* maximum number of segments */
143 143 1, /* granularity */
144 144 DDI_DMA_FLAGERR, /* dma_attr_flags */
145 145 };
146 146
147 147 #ifdef __sparc
148 148 static ddi_dma_lim_t e1000g_dma_limits = {
149 149 (uint_t)0, /* dlim_addr_lo */
150 150 (uint_t)0xffffffff, /* dlim_addr_hi */
151 151 (uint_t)0xffffffff, /* dlim_cntr_max */
152 152 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
153 153 0x1, /* dlim_minxfer */
154 154 1024 /* dlim_speed */
155 155 };
156 156 #endif
157 157
158 158 #ifdef __sparc
159 159 static dma_type_t e1000g_dma_type = USE_DVMA;
160 160 #else
161 161 static dma_type_t e1000g_dma_type = USE_DMA;
162 162 #endif
163 163
164 164 extern krwlock_t e1000g_dma_type_lock;
165 165
166 166
167 167 int
168 168 e1000g_alloc_dma_resources(struct e1000g *Adapter)
169 169 {
170 170 int result;
171 171
172 172 result = DDI_FAILURE;
173 173
174 174 while ((result != DDI_SUCCESS) &&
175 175 (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
176 176 (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
177 177 (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST)) {
178 178
179 179 result = e1000g_alloc_descriptors(Adapter);
180 180
181 181 if (result == DDI_SUCCESS) {
182 182 result = e1000g_alloc_packets(Adapter);
183 183
184 184 if (result != DDI_SUCCESS)
185 185 e1000g_free_descriptors(Adapter);
186 186 }
187 187
188 188 /*
189 189 * If the allocation fails due to resource shortage,
190 190 * we'll reduce the numbers of descriptors/buffers by
191 191 * half, and try the allocation again.
192 192 */
193 193 if (result != DDI_SUCCESS) {
194 194 /*
195 195 * We must ensure the number of descriptors
196 196 * is always a multiple of 8.
197 197 */
198 198 Adapter->tx_desc_num =
199 199 (Adapter->tx_desc_num >> 4) << 3;
200 200 Adapter->rx_desc_num =
201 201 (Adapter->rx_desc_num >> 4) << 3;
202 202
203 203 Adapter->tx_freelist_num >>= 1;
204 204 }
205 205 }
206 206
207 207 return (result);
208 208 }
209 209
210 210 /*
211 211 * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
212 212 *
213 213 * This routine allocates neccesary DMA buffers for
214 214 * Transmit Descriptor Area
215 215 * Receive Descrpitor Area
216 216 */
217 217 static int
218 218 e1000g_alloc_descriptors(struct e1000g *Adapter)
219 219 {
220 220 int result;
221 221 e1000g_tx_ring_t *tx_ring;
222 222 e1000g_rx_data_t *rx_data;
223 223
224 224 if (Adapter->mem_workaround_82546 &&
225 225 ((Adapter->shared.mac.type == e1000_82545) ||
226 226 (Adapter->shared.mac.type == e1000_82546) ||
227 227 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
228 228 /* Align on a 64k boundary for these adapter types */
229 229 Adapter->desc_align = E1000_MDALIGN_82546;
230 230 } else {
231 231 /* Align on a 4k boundary for all other adapter types */
232 232 Adapter->desc_align = E1000_MDALIGN;
233 233 }
234 234
235 235 tx_ring = Adapter->tx_ring;
236 236
237 237 result = e1000g_alloc_tx_descriptors(tx_ring);
238 238 if (result != DDI_SUCCESS)
239 239 return (DDI_FAILURE);
240 240
241 241 rx_data = Adapter->rx_ring->rx_data;
242 242
243 243 result = e1000g_alloc_rx_descriptors(rx_data);
244 244 if (result != DDI_SUCCESS) {
245 245 e1000g_free_tx_descriptors(tx_ring);
246 246 return (DDI_FAILURE);
247 247 }
248 248
249 249 return (DDI_SUCCESS);
250 250 }
251 251
252 252 static void
253 253 e1000g_free_descriptors(struct e1000g *Adapter)
254 254 {
255 255 e1000g_tx_ring_t *tx_ring;
256 256 e1000g_rx_data_t *rx_data;
257 257
258 258 tx_ring = Adapter->tx_ring;
259 259 rx_data = Adapter->rx_ring->rx_data;
260 260
261 261 e1000g_free_tx_descriptors(tx_ring);
262 262 e1000g_free_rx_descriptors(rx_data);
263 263 }
264 264
265 265 static int
266 266 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
267 267 {
268 268 int mystat;
269 269 boolean_t alloc_flag;
270 270 size_t size;
271 271 size_t len;
272 272 uintptr_t templong;
273 273 uint_t cookie_count;
274 274 dev_info_t *devinfo;
275 275 ddi_dma_cookie_t cookie;
276 276 struct e1000g *Adapter;
277 277 ddi_dma_attr_t dma_attr;
278 278
279 279 Adapter = tx_ring->adapter;
280 280 devinfo = Adapter->dip;
281 281
282 282 alloc_flag = B_FALSE;
283 283 dma_attr = e1000g_desc_dma_attr;
284 284
285 285 /*
286 286 * Solaris 7 has a problem with allocating physically contiguous memory
287 287 * that is aligned on a 4K boundary. The transmit and rx descriptors
288 288 * need to aligned on a 4kbyte boundary. We first try to allocate the
289 289 * memory with DMA attributes set to 4K alignment and also no scatter/
290 290 * gather mechanism specified. In most cases, this does not allocate
291 291 * memory aligned at a 4Kbyte boundary. We then try asking for memory
292 292 * aligned on 4K boundary with scatter/gather set to 2. This works when
293 293 * the amount of memory is less than 4k i.e a page size. If neither of
294 294 * these options work or if the number of descriptors is greater than
295 295 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
296 296 * and then align the memory at a 4k boundary.
297 297 */
298 298 size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
299 299
300 300 /*
301 301 * Memory allocation for the transmit buffer descriptors.
302 302 */
303 303 dma_attr.dma_attr_sgllen = 1;
304 304 dma_attr.dma_attr_align = Adapter->desc_align;
305 305
306 306 /*
307 307 * Allocate a new DMA handle for the transmit descriptor
308 308 * memory area.
309 309 */
310 310 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
311 311 DDI_DMA_DONTWAIT, 0,
312 312 &tx_ring->tbd_dma_handle);
313 313
314 314 if (mystat != DDI_SUCCESS) {
315 315 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
316 316 "Could not allocate tbd dma handle: %d", mystat);
317 317 tx_ring->tbd_dma_handle = NULL;
318 318 return (DDI_FAILURE);
319 319 }
320 320
321 321 /*
322 322 * Allocate memory to DMA data to and from the transmit
323 323 * descriptors.
324 324 */
325 325 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
326 326 size,
327 327 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
328 328 DDI_DMA_DONTWAIT, 0,
329 329 (caddr_t *)&tx_ring->tbd_area,
330 330 &len, &tx_ring->tbd_acc_handle);
331 331
332 332 if ((mystat != DDI_SUCCESS) ||
333 333 ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
334 334 if (mystat == DDI_SUCCESS) {
335 335 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
336 336 tx_ring->tbd_acc_handle = NULL;
337 337 tx_ring->tbd_area = NULL;
338 338 }
339 339 if (tx_ring->tbd_dma_handle != NULL) {
340 340 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
341 341 tx_ring->tbd_dma_handle = NULL;
342 342 }
343 343 alloc_flag = B_FALSE;
344 344 } else
345 345 alloc_flag = B_TRUE;
346 346
347 347 /*
348 348 * Initialize the entire transmit buffer descriptor area to zero
349 349 */
350 350 if (alloc_flag)
351 351 bzero(tx_ring->tbd_area, len);
352 352
353 353 /*
354 354 * If the previous DMA attributes setting could not give us contiguous
355 355 * memory or the number of descriptors is greater than the page size,
356 356 * we allocate extra memory and then align it at appropriate boundary.
357 357 */
358 358 if (!alloc_flag) {
359 359 size = size + Adapter->desc_align;
360 360
361 361 /*
362 362 * DMA attributes set to no scatter/gather and 16 bit alignment
363 363 */
364 364 dma_attr.dma_attr_align = 1;
365 365 dma_attr.dma_attr_sgllen = 1;
366 366
367 367 /*
368 368 * Allocate a new DMA handle for the transmit descriptor memory
369 369 * area.
370 370 */
371 371 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
372 372 DDI_DMA_DONTWAIT, 0,
373 373 &tx_ring->tbd_dma_handle);
374 374
375 375 if (mystat != DDI_SUCCESS) {
376 376 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
377 377 "Could not re-allocate tbd dma handle: %d", mystat);
378 378 tx_ring->tbd_dma_handle = NULL;
379 379 return (DDI_FAILURE);
380 380 }
381 381
382 382 /*
383 383 * Allocate memory to DMA data to and from the transmit
384 384 * descriptors.
385 385 */
386 386 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
387 387 size,
388 388 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
389 389 DDI_DMA_DONTWAIT, 0,
390 390 (caddr_t *)&tx_ring->tbd_area,
391 391 &len, &tx_ring->tbd_acc_handle);
392 392
393 393 if (mystat != DDI_SUCCESS) {
394 394 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
395 395 "Could not allocate tbd dma memory: %d", mystat);
396 396 tx_ring->tbd_acc_handle = NULL;
397 397 tx_ring->tbd_area = NULL;
398 398 if (tx_ring->tbd_dma_handle != NULL) {
399 399 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
400 400 tx_ring->tbd_dma_handle = NULL;
401 401 }
402 402 return (DDI_FAILURE);
403 403 } else
404 404 alloc_flag = B_TRUE;
405 405
406 406 /*
407 407 * Initialize the entire transmit buffer descriptor area to zero
408 408 */
409 409 bzero(tx_ring->tbd_area, len);
410 410 /*
411 411 * Memory has been allocated with the ddi_dma_mem_alloc call,
412 412 * but has not been aligned.
413 413 * We now align it on the appropriate boundary.
414 414 */
415 415 templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
416 416 Adapter->desc_align);
417 417 len = size - templong;
418 418 templong += (uintptr_t)tx_ring->tbd_area;
419 419 tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
420 420 } /* alignment workaround */
421 421
422 422 /*
423 423 * Transmit buffer descriptor memory allocation succeeded
424 424 */
425 425 ASSERT(alloc_flag);
426 426
427 427 /*
428 428 * Allocates DMA resources for the memory that was allocated by
429 429 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
430 430 * the memory address
431 431 */
432 432 mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
433 433 (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
434 434 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
435 435 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
436 436
437 437 if (mystat != DDI_SUCCESS) {
438 438 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
439 439 "Could not bind tbd dma resource: %d", mystat);
440 440 if (tx_ring->tbd_acc_handle != NULL) {
441 441 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
442 442 tx_ring->tbd_acc_handle = NULL;
443 443 tx_ring->tbd_area = NULL;
444 444 }
445 445 if (tx_ring->tbd_dma_handle != NULL) {
446 446 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
447 447 tx_ring->tbd_dma_handle = NULL;
448 448 }
449 449 return (DDI_FAILURE);
450 450 }
451 451
452 452 ASSERT(cookie_count == 1); /* 1 cookie */
453 453
454 454 if (cookie_count != 1) {
455 455 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
456 456 "Could not bind tbd dma resource in a single frag. "
457 457 "Count - %d Len - %d", cookie_count, len);
458 458 e1000g_free_tx_descriptors(tx_ring);
459 459 return (DDI_FAILURE);
460 460 }
461 461
462 462 tx_ring->tbd_dma_addr = cookie.dmac_laddress;
463 463 tx_ring->tbd_first = tx_ring->tbd_area;
464 464 tx_ring->tbd_last = tx_ring->tbd_first +
465 465 (Adapter->tx_desc_num - 1);
466 466
467 467 return (DDI_SUCCESS);
468 468 }
469 469
470 470 static int
471 471 e1000g_alloc_rx_descriptors(e1000g_rx_data_t *rx_data)
472 472 {
473 473 int mystat;
474 474 boolean_t alloc_flag;
475 475 size_t size;
476 476 size_t len;
477 477 uintptr_t templong;
478 478 uint_t cookie_count;
479 479 dev_info_t *devinfo;
480 480 ddi_dma_cookie_t cookie;
481 481 struct e1000g *Adapter;
482 482 ddi_dma_attr_t dma_attr;
483 483
484 484 Adapter = rx_data->rx_ring->adapter;
485 485 devinfo = Adapter->dip;
486 486
487 487 alloc_flag = B_FALSE;
488 488 dma_attr = e1000g_desc_dma_attr;
489 489
490 490 /*
491 491 * Memory allocation for the receive buffer descriptors.
492 492 */
493 493 size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
494 494
495 495 /*
496 496 * Asking for aligned memory with DMA attributes set for suitable value
497 497 */
498 498 dma_attr.dma_attr_sgllen = 1;
499 499 dma_attr.dma_attr_align = Adapter->desc_align;
500 500
501 501 /*
502 502 * Allocate a new DMA handle for the receive descriptors
503 503 */
504 504 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
505 505 DDI_DMA_DONTWAIT, 0,
506 506 &rx_data->rbd_dma_handle);
507 507
508 508 if (mystat != DDI_SUCCESS) {
509 509 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
510 510 "Could not allocate rbd dma handle: %d", mystat);
511 511 rx_data->rbd_dma_handle = NULL;
512 512 return (DDI_FAILURE);
513 513 }
514 514 /*
515 515 * Allocate memory to DMA data to and from the receive
516 516 * descriptors.
517 517 */
518 518 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
519 519 size,
520 520 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
521 521 DDI_DMA_DONTWAIT, 0,
522 522 (caddr_t *)&rx_data->rbd_area,
523 523 &len, &rx_data->rbd_acc_handle);
524 524
525 525 /*
526 526 * Check if memory allocation succeeded and also if the
527 527 * allocated memory is aligned correctly.
528 528 */
529 529 if ((mystat != DDI_SUCCESS) ||
530 530 ((uintptr_t)rx_data->rbd_area & (Adapter->desc_align - 1))) {
531 531 if (mystat == DDI_SUCCESS) {
532 532 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
533 533 rx_data->rbd_acc_handle = NULL;
534 534 rx_data->rbd_area = NULL;
535 535 }
536 536 if (rx_data->rbd_dma_handle != NULL) {
537 537 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
538 538 rx_data->rbd_dma_handle = NULL;
539 539 }
540 540 alloc_flag = B_FALSE;
541 541 } else
542 542 alloc_flag = B_TRUE;
543 543
544 544 /*
545 545 * Initialize the allocated receive descriptor memory to zero.
546 546 */
547 547 if (alloc_flag)
548 548 bzero((caddr_t)rx_data->rbd_area, len);
549 549
550 550 /*
551 551 * If memory allocation did not succeed, do the alignment ourselves
552 552 */
553 553 if (!alloc_flag) {
554 554 dma_attr.dma_attr_align = 1;
555 555 dma_attr.dma_attr_sgllen = 1;
556 556 size = size + Adapter->desc_align;
557 557 /*
558 558 * Allocate a new DMA handle for the receive descriptor.
559 559 */
560 560 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
561 561 DDI_DMA_DONTWAIT, 0,
562 562 &rx_data->rbd_dma_handle);
563 563
564 564 if (mystat != DDI_SUCCESS) {
565 565 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
566 566 "Could not re-allocate rbd dma handle: %d", mystat);
567 567 rx_data->rbd_dma_handle = NULL;
568 568 return (DDI_FAILURE);
569 569 }
570 570 /*
571 571 * Allocate memory to DMA data to and from the receive
572 572 * descriptors.
573 573 */
574 574 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
575 575 size,
576 576 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
577 577 DDI_DMA_DONTWAIT, 0,
578 578 (caddr_t *)&rx_data->rbd_area,
579 579 &len, &rx_data->rbd_acc_handle);
580 580
581 581 if (mystat != DDI_SUCCESS) {
582 582 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
583 583 "Could not allocate rbd dma memory: %d", mystat);
584 584 rx_data->rbd_acc_handle = NULL;
585 585 rx_data->rbd_area = NULL;
586 586 if (rx_data->rbd_dma_handle != NULL) {
587 587 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
588 588 rx_data->rbd_dma_handle = NULL;
589 589 }
590 590 return (DDI_FAILURE);
591 591 } else
592 592 alloc_flag = B_TRUE;
593 593
594 594 /*
595 595 * Initialize the allocated receive descriptor memory to zero.
596 596 */
597 597 bzero((caddr_t)rx_data->rbd_area, len);
598 598 templong = P2NPHASE((uintptr_t)rx_data->rbd_area,
599 599 Adapter->desc_align);
600 600 len = size - templong;
601 601 templong += (uintptr_t)rx_data->rbd_area;
602 602 rx_data->rbd_area = (struct e1000_rx_desc *)templong;
603 603 } /* alignment workaround */
604 604
605 605 /*
606 606 * The memory allocation of the receive descriptors succeeded
607 607 */
608 608 ASSERT(alloc_flag);
609 609
610 610 /*
611 611 * Allocates DMA resources for the memory that was allocated by
612 612 * the ddi_dma_mem_alloc call.
613 613 */
614 614 mystat = ddi_dma_addr_bind_handle(rx_data->rbd_dma_handle,
615 615 (struct as *)NULL, (caddr_t)rx_data->rbd_area,
616 616 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
617 617 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
618 618
619 619 if (mystat != DDI_SUCCESS) {
620 620 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
621 621 "Could not bind rbd dma resource: %d", mystat);
622 622 if (rx_data->rbd_acc_handle != NULL) {
623 623 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
624 624 rx_data->rbd_acc_handle = NULL;
625 625 rx_data->rbd_area = NULL;
626 626 }
627 627 if (rx_data->rbd_dma_handle != NULL) {
628 628 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
629 629 rx_data->rbd_dma_handle = NULL;
630 630 }
631 631 return (DDI_FAILURE);
632 632 }
633 633
634 634 ASSERT(cookie_count == 1);
635 635 if (cookie_count != 1) {
636 636 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
637 637 "Could not bind rbd dma resource in a single frag. "
638 638 "Count - %d Len - %d", cookie_count, len);
639 639 e1000g_free_rx_descriptors(rx_data);
640 640 return (DDI_FAILURE);
641 641 }
642 642
643 643 rx_data->rbd_dma_addr = cookie.dmac_laddress;
644 644 rx_data->rbd_first = rx_data->rbd_area;
645 645 rx_data->rbd_last = rx_data->rbd_first +
646 646 (Adapter->rx_desc_num - 1);
647 647
648 648 return (DDI_SUCCESS);
649 649 }
650 650
651 651 static void
652 652 e1000g_free_rx_descriptors(e1000g_rx_data_t *rx_data)
653 653 {
654 654 if (rx_data->rbd_dma_handle != NULL) {
655 655 (void) ddi_dma_unbind_handle(rx_data->rbd_dma_handle);
656 656 }
657 657 if (rx_data->rbd_acc_handle != NULL) {
658 658 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
659 659 rx_data->rbd_acc_handle = NULL;
660 660 rx_data->rbd_area = NULL;
661 661 }
662 662 if (rx_data->rbd_dma_handle != NULL) {
663 663 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
664 664 rx_data->rbd_dma_handle = NULL;
665 665 }
666 666 rx_data->rbd_dma_addr = NULL;
667 667 rx_data->rbd_first = NULL;
668 668 rx_data->rbd_last = NULL;
669 669 }
670 670
671 671 static void
672 672 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
673 673 {
674 674 if (tx_ring->tbd_dma_handle != NULL) {
675 675 (void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
676 676 }
677 677 if (tx_ring->tbd_acc_handle != NULL) {
678 678 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
679 679 tx_ring->tbd_acc_handle = NULL;
680 680 tx_ring->tbd_area = NULL;
681 681 }
682 682 if (tx_ring->tbd_dma_handle != NULL) {
683 683 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
684 684 tx_ring->tbd_dma_handle = NULL;
685 685 }
686 686 tx_ring->tbd_dma_addr = NULL;
687 687 tx_ring->tbd_first = NULL;
688 688 tx_ring->tbd_last = NULL;
689 689 }
690 690
691 691
692 692 /*
693 693 * e1000g_alloc_packets - allocate DMA buffers for rx/tx
694 694 *
695 695 * This routine allocates neccesary buffers for
696 696 * Transmit sw packet structure
697 697 * DMA handle for Transmit
698 698 * DMA buffer for Transmit
699 699 * Receive sw packet structure
700 700 * DMA buffer for Receive
701 701 */
702 702 static int
703 703 e1000g_alloc_packets(struct e1000g *Adapter)
704 704 {
705 705 int result;
706 706 e1000g_tx_ring_t *tx_ring;
707 707 e1000g_rx_data_t *rx_data;
708 708
709 709 tx_ring = Adapter->tx_ring;
710 710 rx_data = Adapter->rx_ring->rx_data;
711 711
712 712 again:
713 713 rw_enter(&e1000g_dma_type_lock, RW_READER);
714 714
715 715 result = e1000g_alloc_tx_packets(tx_ring);
716 716 if (result != DDI_SUCCESS) {
717 717 if (e1000g_dma_type == USE_DVMA) {
718 718 rw_exit(&e1000g_dma_type_lock);
719 719
720 720 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
721 721 e1000g_dma_type = USE_DMA;
722 722 rw_exit(&e1000g_dma_type_lock);
723 723
724 724 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
725 725 "No enough dvma resource for Tx packets, "
726 726 "trying to allocate dma buffers...\n");
727 727 goto again;
728 728 }
729 729 rw_exit(&e1000g_dma_type_lock);
730 730
731 731 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
732 732 "Failed to allocate dma buffers for Tx packets\n");
733 733 return (DDI_FAILURE);
734 734 }
735 735
736 736 result = e1000g_alloc_rx_packets(rx_data);
737 737 if (result != DDI_SUCCESS) {
738 738 e1000g_free_tx_packets(tx_ring);
739 739 if (e1000g_dma_type == USE_DVMA) {
740 740 rw_exit(&e1000g_dma_type_lock);
741 741
742 742 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
743 743 e1000g_dma_type = USE_DMA;
744 744 rw_exit(&e1000g_dma_type_lock);
745 745
746 746 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
747 747 "No enough dvma resource for Rx packets, "
748 748 "trying to allocate dma buffers...\n");
749 749 goto again;
750 750 }
751 751 rw_exit(&e1000g_dma_type_lock);
752 752
753 753 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
754 754 "Failed to allocate dma buffers for Rx packets\n");
755 755 return (DDI_FAILURE);
756 756 }
757 757
758 758 rw_exit(&e1000g_dma_type_lock);
759 759
760 760 return (DDI_SUCCESS);
761 761 }
762 762
763 763 static void
764 764 e1000g_free_packets(struct e1000g *Adapter)
765 765 {
766 766 e1000g_tx_ring_t *tx_ring;
767 767 e1000g_rx_data_t *rx_data;
768 768
769 769 tx_ring = Adapter->tx_ring;
770 770 rx_data = Adapter->rx_ring->rx_data;
771 771
772 772 e1000g_free_tx_packets(tx_ring);
773 773 e1000g_free_rx_packets(rx_data, B_FALSE);
774 774 }
775 775
776 776 #ifdef __sparc
777 777 static int
778 778 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
779 779 dma_buffer_t *buf, size_t size)
780 780 {
781 781 int mystat;
782 782 dev_info_t *devinfo;
783 783 ddi_dma_cookie_t cookie;
784 784
785 785 if (e1000g_force_detach)
786 786 devinfo = Adapter->priv_dip;
787 787 else
788 788 devinfo = Adapter->dip;
789 789
790 790 mystat = dvma_reserve(devinfo,
791 791 &e1000g_dma_limits,
792 792 Adapter->dvma_page_num,
793 793 &buf->dma_handle);
794 794
795 795 if (mystat != DDI_SUCCESS) {
796 796 buf->dma_handle = NULL;
797 797 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
798 798 "Could not allocate dvma buffer handle: %d\n", mystat);
799 799 return (DDI_FAILURE);
800 800 }
801 801
802 802 buf->address = kmem_alloc(size, KM_NOSLEEP);
803 803
804 804 if (buf->address == NULL) {
805 805 if (buf->dma_handle != NULL) {
806 806 dvma_release(buf->dma_handle);
807 807 buf->dma_handle = NULL;
808 808 }
809 809 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
810 810 "Could not allocate dvma buffer memory\n");
811 811 return (DDI_FAILURE);
812 812 }
813 813
814 814 dvma_kaddr_load(buf->dma_handle,
815 815 buf->address, size, 0, &cookie);
816 816
817 817 buf->dma_address = cookie.dmac_laddress;
818 818 buf->size = size;
819 819 buf->len = 0;
820 820
821 821 return (DDI_SUCCESS);
822 822 }
823 823
824 824 static void
825 825 e1000g_free_dvma_buffer(dma_buffer_t *buf)
826 826 {
827 827 if (buf->dma_handle != NULL) {
828 828 dvma_unload(buf->dma_handle, 0, -1);
829 829 } else {
830 830 return;
831 831 }
832 832
833 833 buf->dma_address = NULL;
834 834
835 835 if (buf->address != NULL) {
836 836 kmem_free(buf->address, buf->size);
837 837 buf->address = NULL;
838 838 }
839 839
840 840 if (buf->dma_handle != NULL) {
841 841 dvma_release(buf->dma_handle);
842 842 buf->dma_handle = NULL;
843 843 }
844 844
845 845 buf->size = 0;
846 846 buf->len = 0;
847 847 }
848 848 #endif
849 849
850 850 static int
851 851 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
852 852 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
853 853 {
854 854 int mystat;
855 855 dev_info_t *devinfo;
856 856 ddi_dma_cookie_t cookie;
857 857 size_t len;
858 858 uint_t count;
859 859
860 860 if (e1000g_force_detach)
861 861 devinfo = Adapter->priv_dip;
862 862 else
863 863 devinfo = Adapter->dip;
864 864
865 865 mystat = ddi_dma_alloc_handle(devinfo,
866 866 p_dma_attr,
867 867 DDI_DMA_DONTWAIT, 0,
868 868 &buf->dma_handle);
869 869
870 870 if (mystat != DDI_SUCCESS) {
871 871 buf->dma_handle = NULL;
872 872 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
873 873 "Could not allocate dma buffer handle: %d\n", mystat);
874 874 return (DDI_FAILURE);
875 875 }
876 876
877 877 mystat = ddi_dma_mem_alloc(buf->dma_handle,
878 878 size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
879 879 DDI_DMA_DONTWAIT, 0,
880 880 &buf->address,
881 881 &len, &buf->acc_handle);
882 882
883 883 if (mystat != DDI_SUCCESS) {
884 884 buf->acc_handle = NULL;
885 885 buf->address = NULL;
886 886 if (buf->dma_handle != NULL) {
887 887 ddi_dma_free_handle(&buf->dma_handle);
888 888 buf->dma_handle = NULL;
889 889 }
890 890 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
891 891 "Could not allocate dma buffer memory: %d\n", mystat);
892 892 return (DDI_FAILURE);
893 893 }
894 894
895 895 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
896 896 (struct as *)NULL,
897 897 buf->address,
898 898 len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
899 899 DDI_DMA_DONTWAIT, 0, &cookie, &count);
900 900
901 901 if (mystat != DDI_SUCCESS) {
902 902 if (buf->acc_handle != NULL) {
903 903 ddi_dma_mem_free(&buf->acc_handle);
904 904 buf->acc_handle = NULL;
905 905 buf->address = NULL;
906 906 }
907 907 if (buf->dma_handle != NULL) {
908 908 ddi_dma_free_handle(&buf->dma_handle);
909 909 buf->dma_handle = NULL;
910 910 }
911 911 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
912 912 "Could not bind buffer dma handle: %d\n", mystat);
913 913 return (DDI_FAILURE);
914 914 }
915 915
916 916 ASSERT(count == 1);
917 917 if (count != 1) {
918 918 if (buf->dma_handle != NULL) {
919 919 (void) ddi_dma_unbind_handle(buf->dma_handle);
920 920 }
921 921 if (buf->acc_handle != NULL) {
922 922 ddi_dma_mem_free(&buf->acc_handle);
923 923 buf->acc_handle = NULL;
924 924 buf->address = NULL;
925 925 }
926 926 if (buf->dma_handle != NULL) {
927 927 ddi_dma_free_handle(&buf->dma_handle);
928 928 buf->dma_handle = NULL;
929 929 }
930 930 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
931 931 "Could not bind buffer as a single frag. "
932 932 "Count = %d\n", count);
933 933 return (DDI_FAILURE);
934 934 }
935 935
936 936 buf->dma_address = cookie.dmac_laddress;
937 937 buf->size = len;
938 938 buf->len = 0;
939 939
940 940 return (DDI_SUCCESS);
941 941 }
942 942
943 943 /*
944 944 * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
945 945 * necessary handles. Same as e1000g_alloc_dma_buffer() except ensure
946 946 * that buffer that doesn't cross a 64k boundary.
947 947 */
948 948 static int
949 949 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
950 950 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
951 951 {
952 952 int mystat;
953 953 dev_info_t *devinfo;
954 954 ddi_dma_cookie_t cookie;
955 955 size_t len;
956 956 uint_t count;
957 957
958 958 if (e1000g_force_detach)
959 959 devinfo = Adapter->priv_dip;
960 960 else
961 961 devinfo = Adapter->dip;
962 962
963 963 mystat = ddi_dma_alloc_handle(devinfo,
964 964 p_dma_attr,
965 965 DDI_DMA_DONTWAIT, 0,
966 966 &buf->dma_handle);
967 967
968 968 if (mystat != DDI_SUCCESS) {
969 969 buf->dma_handle = NULL;
970 970 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
971 971 "Could not allocate dma buffer handle: %d\n", mystat);
972 972 return (DDI_FAILURE);
973 973 }
974 974
975 975 mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
976 976 if (mystat != DDI_SUCCESS) {
977 977 buf->acc_handle = NULL;
978 978 buf->address = NULL;
979 979 if (buf->dma_handle != NULL) {
980 980 ddi_dma_free_handle(&buf->dma_handle);
981 981 buf->dma_handle = NULL;
982 982 }
983 983 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
984 984 "Could not allocate dma buffer memory: %d\n", mystat);
985 985 return (DDI_FAILURE);
986 986 }
987 987
988 988 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
989 989 (struct as *)NULL,
990 990 buf->address,
991 991 len, DDI_DMA_READ | DDI_DMA_STREAMING,
992 992 DDI_DMA_DONTWAIT, 0, &cookie, &count);
993 993
994 994 if (mystat != DDI_SUCCESS) {
995 995 if (buf->acc_handle != NULL) {
996 996 ddi_dma_mem_free(&buf->acc_handle);
997 997 buf->acc_handle = NULL;
998 998 buf->address = NULL;
999 999 }
1000 1000 if (buf->dma_handle != NULL) {
1001 1001 ddi_dma_free_handle(&buf->dma_handle);
1002 1002 buf->dma_handle = NULL;
1003 1003 }
1004 1004 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1005 1005 "Could not bind buffer dma handle: %d\n", mystat);
1006 1006 return (DDI_FAILURE);
1007 1007 }
1008 1008
1009 1009 ASSERT(count == 1);
1010 1010 if (count != 1) {
1011 1011 if (buf->dma_handle != NULL) {
1012 1012 (void) ddi_dma_unbind_handle(buf->dma_handle);
1013 1013 }
1014 1014 if (buf->acc_handle != NULL) {
1015 1015 ddi_dma_mem_free(&buf->acc_handle);
1016 1016 buf->acc_handle = NULL;
1017 1017 buf->address = NULL;
1018 1018 }
1019 1019 if (buf->dma_handle != NULL) {
1020 1020 ddi_dma_free_handle(&buf->dma_handle);
1021 1021 buf->dma_handle = NULL;
1022 1022 }
1023 1023 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1024 1024 "Could not bind buffer as a single frag. "
1025 1025 "Count = %d\n", count);
1026 1026 return (DDI_FAILURE);
1027 1027 }
1028 1028
1029 1029 buf->dma_address = cookie.dmac_laddress;
1030 1030 buf->size = len;
1031 1031 buf->len = 0;
1032 1032
1033 1033 return (DDI_SUCCESS);
1034 1034 }
1035 1035
1036 1036 /*
1037 1037 * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1038 1038 * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1039 1039 */
1040 1040 static int
1041 1041 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1042 1042 {
1043 1043 #define ALLOC_RETRY 10
1044 1044 int stat;
1045 1045 int cnt = 0;
1046 1046 ddi_acc_handle_t hold[ALLOC_RETRY];
1047 1047
1048 1048 while (cnt < ALLOC_RETRY) {
1049 1049 hold[cnt] = NULL;
1050 1050
1051 1051 /* allocate memory */
1052 1052 stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1053 1053 &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1054 1054 0, &buf->address, len, &buf->acc_handle);
1055 1055
1056 1056 if (stat != DDI_SUCCESS) {
1057 1057 break;
1058 1058 }
1059 1059
1060 1060 /*
1061 1061 * Check 64k bounday:
1062 1062 * if it is bad, hold it and retry
1063 1063 * if it is good, exit loop
1064 1064 */
1065 1065 if (e1000g_cross_64k_bound(buf->address, *len)) {
1066 1066 hold[cnt] = buf->acc_handle;
1067 1067 stat = DDI_FAILURE;
1068 1068 } else {
1069 1069 break;
1070 1070 }
1071 1071
1072 1072 cnt++;
1073 1073 }
1074 1074
1075 1075 /* Release any held buffers crossing 64k bounday */
1076 1076 for (--cnt; cnt >= 0; cnt--) {
1077 1077 if (hold[cnt])
1078 1078 ddi_dma_mem_free(&hold[cnt]);
1079 1079 }
1080 1080
1081 1081 return (stat);
1082 1082 }
1083 1083
1084 1084 /*
1085 1085 * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1086 1086 * return true; otherwise return false
1087 1087 */
1088 1088 static boolean_t
1089 1089 e1000g_cross_64k_bound(void *addr, uintptr_t len)
1090 1090 {
1091 1091 uintptr_t start = (uintptr_t)addr;
1092 1092 uintptr_t end = start + len - 1;
1093 1093
1094 1094 return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1095 1095 }
1096 1096
1097 1097 static void
1098 1098 e1000g_free_dma_buffer(dma_buffer_t *buf)
1099 1099 {
1100 1100 if (buf->dma_handle != NULL) {
1101 1101 (void) ddi_dma_unbind_handle(buf->dma_handle);
1102 1102 } else {
1103 1103 return;
1104 1104 }
1105 1105
1106 1106 buf->dma_address = NULL;
1107 1107
1108 1108 if (buf->acc_handle != NULL) {
1109 1109 ddi_dma_mem_free(&buf->acc_handle);
1110 1110 buf->acc_handle = NULL;
1111 1111 buf->address = NULL;
1112 1112 }
1113 1113
1114 1114 if (buf->dma_handle != NULL) {
1115 1115 ddi_dma_free_handle(&buf->dma_handle);
1116 1116 buf->dma_handle = NULL;
1117 1117 }
1118 1118
1119 1119 buf->size = 0;
1120 1120 buf->len = 0;
1121 1121 }
1122 1122
1123 1123 static int
1124 1124 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1125 1125 {
1126 1126 int j;
1127 1127 p_tx_sw_packet_t packet;
1128 1128 int mystat;
1129 1129 dma_buffer_t *tx_buf;
1130 1130 struct e1000g *Adapter;
1131 1131 dev_info_t *devinfo;
1132 1132 ddi_dma_attr_t dma_attr;
1133 1133
1134 1134 Adapter = tx_ring->adapter;
1135 1135 devinfo = Adapter->dip;
1136 1136 dma_attr = e1000g_buf_dma_attr;
1137 1137
1138 1138 /*
1139 1139 * Memory allocation for the Transmit software structure, the transmit
1140 1140 * software packet. This structure stores all the relevant information
1141 1141 * for transmitting a single packet.
1142 1142 */
1143 1143 tx_ring->packet_area =
1144 1144 kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1145 1145
1146 1146 if (tx_ring->packet_area == NULL)
1147 1147 return (DDI_FAILURE);
1148 1148
1149 1149 for (j = 0, packet = tx_ring->packet_area;
1150 1150 j < Adapter->tx_freelist_num; j++, packet++) {
1151 1151
1152 1152 ASSERT(packet != NULL);
1153 1153
1154 1154 /*
1155 1155 * Pre-allocate dma handles for transmit. These dma handles
1156 1156 * will be dynamically bound to the data buffers passed down
1157 1157 * from the upper layers at the time of transmitting. The
1158 1158 * dynamic binding only applies for the packets that are larger
1159 1159 * than the tx_bcopy_thresh.
1160 1160 */
1161 1161 switch (e1000g_dma_type) {
1162 1162 #ifdef __sparc
1163 1163 case USE_DVMA:
1164 1164 mystat = dvma_reserve(devinfo,
1165 1165 &e1000g_dma_limits,
1166 1166 Adapter->dvma_page_num,
1167 1167 &packet->tx_dma_handle);
1168 1168 break;
1169 1169 #endif
1170 1170 case USE_DMA:
1171 1171 mystat = ddi_dma_alloc_handle(devinfo,
1172 1172 &e1000g_tx_dma_attr,
1173 1173 DDI_DMA_DONTWAIT, 0,
1174 1174 &packet->tx_dma_handle);
1175 1175 break;
1176 1176 default:
1177 1177 ASSERT(B_FALSE);
1178 1178 break;
1179 1179 }
1180 1180 if (mystat != DDI_SUCCESS) {
1181 1181 packet->tx_dma_handle = NULL;
1182 1182 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1183 1183 "Could not allocate tx dma handle: %d\n", mystat);
1184 1184 goto tx_pkt_fail;
1185 1185 }
1186 1186
1187 1187 /*
1188 1188 * Pre-allocate transmit buffers for small packets that the
1189 1189 * size is less than tx_bcopy_thresh. The data of those small
1190 1190 * packets will be bcopy() to the transmit buffers instead of
1191 1191 * using dynamical DMA binding. For small packets, bcopy will
1192 1192 * bring better performance than DMA binding.
1193 1193 */
1194 1194 tx_buf = packet->tx_buf;
1195 1195
1196 1196 switch (e1000g_dma_type) {
1197 1197 #ifdef __sparc
1198 1198 case USE_DVMA:
1199 1199 mystat = e1000g_alloc_dvma_buffer(Adapter,
1200 1200 tx_buf, Adapter->tx_buffer_size);
1201 1201 break;
1202 1202 #endif
1203 1203 case USE_DMA:
1204 1204 mystat = e1000g_alloc_dma_buffer(Adapter,
1205 1205 tx_buf, Adapter->tx_buffer_size, &dma_attr);
1206 1206 break;
1207 1207 default:
1208 1208 ASSERT(B_FALSE);
1209 1209 break;
1210 1210 }
1211 1211 if (mystat != DDI_SUCCESS) {
1212 1212 ASSERT(packet->tx_dma_handle != NULL);
1213 1213 switch (e1000g_dma_type) {
1214 1214 #ifdef __sparc
1215 1215 case USE_DVMA:
1216 1216 dvma_release(packet->tx_dma_handle);
1217 1217 break;
1218 1218 #endif
1219 1219 case USE_DMA:
1220 1220 ddi_dma_free_handle(&packet->tx_dma_handle);
1221 1221 break;
1222 1222 default:
1223 1223 ASSERT(B_FALSE);
1224 1224 break;
1225 1225 }
1226 1226 packet->tx_dma_handle = NULL;
1227 1227 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1228 1228 "Allocate Tx buffer fail\n");
1229 1229 goto tx_pkt_fail;
1230 1230 }
1231 1231
1232 1232 packet->dma_type = e1000g_dma_type;
1233 1233 } /* for */
1234 1234
1235 1235 return (DDI_SUCCESS);
1236 1236
1237 1237 tx_pkt_fail:
1238 1238 e1000g_free_tx_packets(tx_ring);
1239 1239
1240 1240 return (DDI_FAILURE);
1241 1241 }
1242 1242
1243 1243
1244 1244 int
1245 1245 e1000g_increase_rx_packets(e1000g_rx_data_t *rx_data)
1246 1246 {
1247 1247 int i;
1248 1248 p_rx_sw_packet_t packet;
1249 1249 p_rx_sw_packet_t cur, next;
1250 1250 struct e1000g *Adapter;
1251 1251 ddi_dma_attr_t dma_attr;
1252 1252
1253 1253 Adapter = rx_data->rx_ring->adapter;
1254 1254 dma_attr = e1000g_buf_dma_attr;
1255 1255 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1256 1256 cur = NULL;
1257 1257
1258 1258 for (i = 0; i < RX_FREELIST_INCREASE_SIZE; i++) {
1259 1259 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1260 1260 if (packet == NULL)
1261 1261 break;
1262 1262 packet->next = cur;
1263 1263 cur = packet;
1264 1264 }
1265 1265 Adapter->rx_freelist_num += i;
1266 1266 rx_data->avail_freepkt += i;
1267 1267
1268 1268 while (cur != NULL) {
1269 1269 QUEUE_PUSH_TAIL(&rx_data->free_list, &cur->Link);
1270 1270 next = cur->next;
1271 1271 cur->next = rx_data->packet_area;
1272 1272 rx_data->packet_area = cur;
1273 1273
1274 1274 cur = next;
1275 1275 }
1276 1276
1277 1277 return (DDI_SUCCESS);
1278 1278 }
1279 1279
1280 1280
1281 1281 static int
1282 1282 e1000g_alloc_rx_packets(e1000g_rx_data_t *rx_data)
1283 1283 {
1284 1284 int i;
1285 1285 p_rx_sw_packet_t packet;
1286 1286 struct e1000g *Adapter;
1287 1287 uint32_t packet_num;
1288 1288 ddi_dma_attr_t dma_attr;
1289 1289
1290 1290 Adapter = rx_data->rx_ring->adapter;
1291 1291 dma_attr = e1000g_buf_dma_attr;
1292 1292 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1293 1293
1294 1294 /*
1295 1295 * Allocate memory for the rx_sw_packet structures. Each one of these
1296 1296 * structures will contain a virtual and physical address to an actual
1297 1297 * receive buffer in host memory. Since we use one rx_sw_packet per
1298 1298 * received packet, the maximum number of rx_sw_packet that we'll
1299 1299 * need is equal to the number of receive descriptors plus the freelist
1300 1300 * size.
1301 1301 */
1302 1302 packet_num = Adapter->rx_desc_num + RX_FREELIST_INCREASE_SIZE;
1303 1303 rx_data->packet_area = NULL;
1304 1304
1305 1305 for (i = 0; i < packet_num; i++) {
1306 1306 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1307 1307 if (packet == NULL)
1308 1308 goto rx_pkt_fail;
1309 1309
1310 1310 packet->next = rx_data->packet_area;
1311 1311 rx_data->packet_area = packet;
1312 1312 }
1313 1313
1314 1314 Adapter->rx_freelist_num = RX_FREELIST_INCREASE_SIZE;
1315 1315 return (DDI_SUCCESS);
1316 1316
1317 1317 rx_pkt_fail:
1318 1318 e1000g_free_rx_packets(rx_data, B_TRUE);
1319 1319 return (DDI_FAILURE);
1320 1320 }
1321 1321
1322 1322
1323 1323 static p_rx_sw_packet_t
1324 1324 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *rx_data, ddi_dma_attr_t *p_dma_attr)
1325 1325 {
1326 1326 int mystat;
1327 1327 p_rx_sw_packet_t packet;
1328 1328 dma_buffer_t *rx_buf;
1329 1329 struct e1000g *Adapter;
1330 1330
1331 1331 Adapter = rx_data->rx_ring->adapter;
1332 1332
1333 1333 packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1334 1334 if (packet == NULL) {
1335 1335 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1336 1336 "Cound not allocate memory for Rx SwPacket\n");
1337 1337 return (NULL);
1338 1338 }
1339 1339
1340 1340 rx_buf = packet->rx_buf;
1341 1341
1342 1342 switch (e1000g_dma_type) {
1343 1343 #ifdef __sparc
1344 1344 case USE_DVMA:
1345 1345 mystat = e1000g_alloc_dvma_buffer(Adapter,
1346 1346 rx_buf, Adapter->rx_buffer_size);
1347 1347 break;
1348 1348 #endif
1349 1349 case USE_DMA:
1350 1350 if (Adapter->mem_workaround_82546 &&
1351 1351 ((Adapter->shared.mac.type == e1000_82545) ||
1352 1352 (Adapter->shared.mac.type == e1000_82546) ||
1353 1353 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
1354 1354 mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1355 1355 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1356 1356 } else {
1357 1357 mystat = e1000g_alloc_dma_buffer(Adapter,
1358 1358 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1359 1359 }
1360 1360 break;
1361 1361 default:
1362 1362 ASSERT(B_FALSE);
1363 1363 break;
1364 1364 }
1365 1365
1366 1366 if (mystat != DDI_SUCCESS) {
1367 1367 if (packet != NULL)
1368 1368 kmem_free(packet, sizeof (rx_sw_packet_t));
1369 1369
1370 1370 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1371 1371 "Failed to allocate Rx buffer\n");
1372 1372 return (NULL);
1373 1373 }
1374 1374
1375 1375 rx_buf->size -= E1000G_IPALIGNROOM;
1376 1376 rx_buf->address += E1000G_IPALIGNROOM;
1377 1377 rx_buf->dma_address += E1000G_IPALIGNROOM;
1378 1378
1379 1379 packet->rx_data = (caddr_t)rx_data;
1380 1380 packet->free_rtn.free_func = e1000g_rxfree_func;
1381 1381 packet->free_rtn.free_arg = (char *)packet;
1382 1382 /*
1383 1383 * esballoc is changed to desballoc which
1384 1384 * is undocumented call but as per sun,
1385 1385 * we can use it. It gives better efficiency.
1386 1386 */
1387 1387 packet->mp = desballoc((unsigned char *)
1388 1388 rx_buf->address,
1389 1389 rx_buf->size,
1390 1390 BPRI_MED, &packet->free_rtn);
1391 1391
1392 1392 packet->dma_type = e1000g_dma_type;
1393 1393 packet->ref_cnt = 1;
1394 1394
1395 1395 return (packet);
1396 1396 }
1397 1397
1398 1398 void
1399 1399 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet, boolean_t full_release)
1400 1400 {
1401 1401 dma_buffer_t *rx_buf;
1402 1402
1403 1403 if (packet->mp != NULL) {
1404 1404 freemsg(packet->mp);
1405 1405 packet->mp = NULL;
1406 1406 }
1407 1407
1408 1408 rx_buf = packet->rx_buf;
1409 1409
1410 1410 switch (packet->dma_type) {
1411 1411 #ifdef __sparc
1412 1412 case USE_DVMA:
1413 1413 if (rx_buf->address != NULL) {
1414 1414 rx_buf->size += E1000G_IPALIGNROOM;
1415 1415 rx_buf->address -= E1000G_IPALIGNROOM;
1416 1416 }
1417 1417 e1000g_free_dvma_buffer(rx_buf);
1418 1418 break;
1419 1419 #endif
1420 1420 case USE_DMA:
1421 1421 e1000g_free_dma_buffer(rx_buf);
1422 1422 break;
1423 1423 default:
1424 1424 break;
1425 1425 }
1426 1426
1427 1427 packet->dma_type = USE_NONE;
1428 1428
1429 1429 if (!full_release)
1430 1430 return;
1431 1431
1432 1432 kmem_free(packet, sizeof (rx_sw_packet_t));
1433 1433 }
1434 1434
1435 1435 static void
1436 1436 e1000g_free_rx_packets(e1000g_rx_data_t *rx_data, boolean_t full_release)
1437 1437 {
1438 1438 p_rx_sw_packet_t packet, next_packet;
1439 1439 uint32_t ref_cnt;
1440 1440
1441 1441 mutex_enter(&e1000g_rx_detach_lock);
1442 1442
1443 1443 packet = rx_data->packet_area;
1444 1444 while (packet != NULL) {
1445 1445 next_packet = packet->next;
1446 1446
1447 1447 ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
↓ open down ↓ |
1447 lines elided |
↑ open up ↑ |
1448 1448 if (ref_cnt > 0) {
1449 1449 atomic_inc_32(&rx_data->pending_count);
1450 1450 atomic_inc_32(&e1000g_mblks_pending);
1451 1451 } else {
1452 1452 e1000g_free_rx_sw_packet(packet, full_release);
1453 1453 }
1454 1454
1455 1455 packet = next_packet;
1456 1456 }
1457 1457
1458 + if (full_release)
1459 + rx_data->packet_area = NULL;
1460 +
1458 1461 mutex_exit(&e1000g_rx_detach_lock);
1459 1462 }
1460 1463
1461 1464
1462 1465 static void
1463 1466 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1464 1467 {
1465 1468 int j;
1466 1469 struct e1000g *Adapter;
1467 1470 p_tx_sw_packet_t packet;
1468 1471 dma_buffer_t *tx_buf;
1469 1472
1470 1473 Adapter = tx_ring->adapter;
1471 1474
1472 1475 for (j = 0, packet = tx_ring->packet_area;
1473 1476 j < Adapter->tx_freelist_num; j++, packet++) {
1474 1477
1475 1478 if (packet == NULL)
1476 1479 break;
1477 1480
1478 1481 /* Free the Tx DMA handle for dynamical binding */
1479 1482 if (packet->tx_dma_handle != NULL) {
1480 1483 switch (packet->dma_type) {
1481 1484 #ifdef __sparc
1482 1485 case USE_DVMA:
1483 1486 dvma_release(packet->tx_dma_handle);
1484 1487 break;
1485 1488 #endif
1486 1489 case USE_DMA:
1487 1490 ddi_dma_free_handle(&packet->tx_dma_handle);
1488 1491 break;
1489 1492 default:
1490 1493 ASSERT(B_FALSE);
1491 1494 break;
1492 1495 }
1493 1496 packet->tx_dma_handle = NULL;
1494 1497 } else {
1495 1498 /*
1496 1499 * If the dma handle is NULL, then we don't
1497 1500 * need to check the packets left. For they
1498 1501 * have not been initialized or have been freed.
1499 1502 */
1500 1503 break;
1501 1504 }
1502 1505
1503 1506 tx_buf = packet->tx_buf;
1504 1507
1505 1508 switch (packet->dma_type) {
1506 1509 #ifdef __sparc
1507 1510 case USE_DVMA:
1508 1511 e1000g_free_dvma_buffer(tx_buf);
1509 1512 break;
1510 1513 #endif
1511 1514 case USE_DMA:
1512 1515 e1000g_free_dma_buffer(tx_buf);
1513 1516 break;
1514 1517 default:
1515 1518 ASSERT(B_FALSE);
1516 1519 break;
1517 1520 }
1518 1521
1519 1522 packet->dma_type = USE_NONE;
1520 1523 }
1521 1524 if (tx_ring->packet_area != NULL) {
1522 1525 kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1523 1526 tx_ring->packet_area = NULL;
1524 1527 }
1525 1528 }
1526 1529
1527 1530 /*
1528 1531 * e1000g_release_dma_resources - release allocated DMA resources
1529 1532 *
1530 1533 * This function releases any pending buffers that has been
1531 1534 * previously allocated
1532 1535 */
1533 1536 void
1534 1537 e1000g_release_dma_resources(struct e1000g *Adapter)
1535 1538 {
1536 1539 e1000g_free_descriptors(Adapter);
1537 1540 e1000g_free_packets(Adapter);
1538 1541 }
1539 1542
1540 1543 /* ARGSUSED */
1541 1544 void
1542 1545 e1000g_set_fma_flags(int dma_flag)
1543 1546 {
1544 1547 if (dma_flag) {
1545 1548 e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1546 1549 e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1547 1550 e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1548 1551 } else {
1549 1552 e1000g_tx_dma_attr.dma_attr_flags = 0;
1550 1553 e1000g_buf_dma_attr.dma_attr_flags = 0;
1551 1554 e1000g_desc_dma_attr.dma_attr_flags = 0;
1552 1555 }
1553 1556 }
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX