Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_ioctl.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_ioctl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright 2010 QLogic Corporation */
23 23
24 24 /*
25 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27
28 28 #pragma ident "Copyright 2010 QLogic Corporation; ql_ioctl.c"
29 29
30 30 /*
31 31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 32 * Fibre Channel Adapter (FCA) driver IOCTL source file.
33 33 *
34 34 * ***********************************************************************
35 35 * * **
36 36 * * NOTICE **
37 37 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
38 38 * * ALL RIGHTS RESERVED **
39 39 * * **
40 40 * ***********************************************************************
41 41 *
42 42 */
43 43
44 44 #include <ql_apps.h>
45 45 #include <ql_api.h>
46 46 #include <ql_debug.h>
47 47 #include <ql_init.h>
48 48 #include <ql_ioctl.h>
49 49 #include <ql_mbx.h>
50 50 #include <ql_xioctl.h>
51 51
52 52 /*
53 53 * Local Function Prototypes.
54 54 */
55 55 static int ql_busy_notification(ql_adapter_state_t *);
56 56 static int ql_idle_notification(ql_adapter_state_t *);
57 57 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
58 58 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
59 59 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
60 60 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
61 61 uint16_t value);
62 62 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
63 63 static int ql_adm_op(ql_adapter_state_t *, void *, int);
64 64 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
65 65 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
66 66 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
67 67 static int ql_adm_update_properties(ql_adapter_state_t *);
68 68 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
69 69 static int ql_adm_loop_reset(ql_adapter_state_t *);
70 70 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
71 71 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
72 72 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
73 73 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
74 74 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
75 75 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
76 76 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
77 77 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
78 78 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
79 79
80 80 /* ************************************************************************ */
81 81 /* cb_ops functions */
82 82 /* ************************************************************************ */
83 83
84 84 /*
85 85 * ql_open
86 86 * opens device
87 87 *
88 88 * Input:
89 89 * dev_p = device pointer
90 90 * flags = open flags
91 91 * otype = open type
92 92 * cred_p = credentials pointer
93 93 *
94 94 * Returns:
95 95 * 0 = success
96 96 *
97 97 * Context:
98 98 * Kernel context.
99 99 */
100 100 /* ARGSUSED */
101 101 int
102 102 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
103 103 {
104 104 ql_adapter_state_t *ha;
105 105 int rval = 0;
106 106
107 107 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
108 108 if (ha == NULL) {
109 109 QL_PRINT_2(CE_CONT, "failed, no adapter\n");
110 110 return (ENXIO);
111 111 }
112 112
113 113 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
114 114
115 115 /* Allow only character opens */
116 116 if (otyp != OTYP_CHR) {
117 117 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
118 118 ha->instance);
119 119 return (EINVAL);
120 120 }
121 121
122 122 ADAPTER_STATE_LOCK(ha);
123 123 if (flags & FEXCL && ha->flags & QL_OPENED) {
124 124 ADAPTER_STATE_UNLOCK(ha);
125 125 rval = EBUSY;
126 126 } else {
127 127 ha->flags |= QL_OPENED;
128 128 ADAPTER_STATE_UNLOCK(ha);
129 129 }
130 130
131 131 if (rval != 0) {
132 132 EL(ha, "failed, rval = %xh\n", rval);
133 133 } else {
134 134 /*EMPTY*/
135 135 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
136 136 }
137 137 return (rval);
138 138 }
139 139
140 140 /*
141 141 * ql_close
142 142 * opens device
143 143 *
144 144 * Input:
145 145 * dev_p = device pointer
146 146 * flags = open flags
147 147 * otype = open type
148 148 * cred_p = credentials pointer
149 149 *
150 150 * Returns:
151 151 * 0 = success
152 152 *
153 153 * Context:
154 154 * Kernel context.
155 155 */
156 156 /* ARGSUSED */
157 157 int
158 158 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
159 159 {
160 160 ql_adapter_state_t *ha;
161 161 int rval = 0;
162 162
163 163 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
164 164 if (ha == NULL) {
165 165 QL_PRINT_2(CE_CONT, "failed, no adapter\n");
166 166 return (ENXIO);
167 167 }
168 168
169 169 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
170 170
171 171 if (otyp != OTYP_CHR) {
172 172 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
173 173 ha->instance);
174 174 return (EINVAL);
175 175 }
176 176
177 177 ADAPTER_STATE_LOCK(ha);
178 178 ha->flags &= ~QL_OPENED;
179 179 ADAPTER_STATE_UNLOCK(ha);
180 180
181 181 if (rval != 0) {
182 182 EL(ha, "failed, rval = %xh\n", rval);
183 183 } else {
184 184 /*EMPTY*/
185 185 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
186 186 }
187 187 return (rval);
188 188 }
189 189
190 190 /*
191 191 * ql_ioctl
192 192 * control a character device
193 193 *
194 194 * Input:
195 195 * dev = device number
196 196 * cmd = function to perform
197 197 * arg = data type varies with request
198 198 * mode = flags
199 199 * cred_p = credentials pointer
200 200 * rval_p = pointer to result value
201 201 *
202 202 * Returns:
203 203 * 0 = success
204 204 *
205 205 * Context:
206 206 * Kernel context.
207 207 */
208 208 /* ARGSUSED */
209 209 int
210 210 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
211 211 int *rval_p)
212 212 {
213 213 ql_adapter_state_t *ha;
214 214 int rval = 0;
215 215
216 216 if (ddi_in_panic()) {
217 217 QL_PRINT_2(CE_CONT, "ql_ioctl: ddi_in_panic exit\n");
218 218 return (ENOPROTOOPT);
219 219 }
220 220
221 221 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
222 222 if (ha == NULL) {
223 223 QL_PRINT_2(CE_CONT, "failed, no adapter\n");
224 224 return (ENXIO);
225 225 }
226 226
227 227 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
228 228
229 229 /*
230 230 * Quick clean exit for qla2x00 foapi calls which are
231 231 * not supported in qlc.
232 232 */
233 233 if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
234 234 QL_PRINT_9(CE_CONT, "failed, fo api not supported\n");
235 235 return (ENOTTY);
236 236 }
237 237
238 238 /* PWR management busy. */
239 239 rval = ql_busy_notification(ha);
240 240 if (rval != FC_SUCCESS) {
241 241 EL(ha, "failed, ql_busy_notification\n");
242 242 return (ENXIO);
243 243 }
244 244
245 245 rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
246 246 if (rval == ENOPROTOOPT || rval == EINVAL) {
247 247 switch (cmd) {
248 248 case QL_GET_ADAPTER_FEATURE_BITS: {
249 249 uint16_t bits;
250 250
251 251 rval = ql_get_feature_bits(ha, &bits);
252 252
253 253 if (!rval && ddi_copyout((void *)&bits, (void *)arg,
254 254 sizeof (bits), mode)) {
255 255 rval = EFAULT;
256 256 }
257 257 break;
258 258 }
259 259
260 260 case QL_SET_ADAPTER_FEATURE_BITS: {
261 261 uint16_t bits;
262 262
263 263 if (ddi_copyin((void *)arg, (void *)&bits,
264 264 sizeof (bits), mode)) {
265 265 rval = EFAULT;
266 266 break;
267 267 }
268 268
269 269 rval = ql_set_feature_bits(ha, bits);
270 270 break;
271 271 }
272 272
273 273 case QL_SET_ADAPTER_NVRAM_DEFAULTS:
274 274 rval = ql_set_nvram_adapter_defaults(ha);
275 275 break;
276 276
277 277 case QL_UTIL_LOAD:
278 278 rval = ql_nv_util_load(ha, (void *)arg, mode);
279 279 break;
280 280
281 281 case QL_UTIL_DUMP:
282 282 rval = ql_nv_util_dump(ha, (void *)arg, mode);
283 283 break;
284 284
285 285 case QL_ADM_OP:
286 286 rval = ql_adm_op(ha, (void *)arg, mode);
287 287 break;
288 288
289 289 default:
290 290 EL(ha, "unknown command = %d\n", cmd);
291 291 rval = ENOTTY;
292 292 break;
293 293 }
294 294 }
295 295
296 296 /* PWR management idle. */
297 297 (void) ql_idle_notification(ha);
298 298
299 299 if (rval != 0) {
300 300 /*
301 301 * Don't show failures caused by pps polling for
302 302 * non-existant virtual ports.
303 303 */
304 304 if (cmd != EXT_CC_VPORT_CMD) {
305 305 EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval);
306 306 }
307 307 } else {
308 308 /*EMPTY*/
309 309 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
310 310 }
311 311 return (rval);
312 312 }
313 313
314 314 /*
315 315 * ql_busy_notification
316 316 * Adapter busy notification.
317 317 *
318 318 * Input:
319 319 * ha = adapter state pointer.
320 320 *
321 321 * Returns:
322 322 * FC_SUCCESS
323 323 * FC_FAILURE
324 324 *
325 325 * Context:
326 326 * Kernel context.
327 327 */
328 328 static int
329 329 ql_busy_notification(ql_adapter_state_t *ha)
330 330 {
331 331 if (!ha->pm_capable) {
332 332 return (FC_SUCCESS);
333 333 }
334 334
335 335 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
336 336
337 337 QL_PM_LOCK(ha);
338 338 ha->busy++;
339 339 QL_PM_UNLOCK(ha);
340 340
341 341 if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
342 342 QL_PM_LOCK(ha);
343 343 ha->busy--;
344 344 QL_PM_UNLOCK(ha);
345 345
346 346 EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
347 347 return (FC_FAILURE);
348 348 }
349 349
350 350 QL_PM_LOCK(ha);
351 351 if (ha->power_level != PM_LEVEL_D0) {
352 352 QL_PM_UNLOCK(ha);
353 353 if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
354 354 QL_PM_LOCK(ha);
355 355 ha->busy--;
356 356 QL_PM_UNLOCK(ha);
357 357 return (FC_FAILURE);
358 358 }
359 359 } else {
360 360 QL_PM_UNLOCK(ha);
361 361 }
362 362
363 363 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
364 364
365 365 return (FC_SUCCESS);
366 366 }
367 367
368 368 /*
369 369 * ql_idle_notification
370 370 * Adapter idle notification.
371 371 *
372 372 * Input:
373 373 * ha = adapter state pointer.
374 374 *
375 375 * Returns:
376 376 * FC_SUCCESS
377 377 * FC_FAILURE
378 378 *
379 379 * Context:
380 380 * Kernel context.
381 381 */
382 382 static int
383 383 ql_idle_notification(ql_adapter_state_t *ha)
384 384 {
385 385 if (!ha->pm_capable) {
386 386 return (FC_SUCCESS);
387 387 }
388 388
389 389 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
390 390
391 391 if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
392 392 EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
393 393 return (FC_FAILURE);
394 394 }
395 395
396 396 QL_PM_LOCK(ha);
397 397 ha->busy--;
398 398 QL_PM_UNLOCK(ha);
399 399
400 400 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
401 401
402 402 return (FC_SUCCESS);
403 403 }
404 404
405 405 /*
406 406 * Get adapter feature bits from NVRAM
407 407 */
408 408 static int
409 409 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
410 410 {
411 411 int count;
412 412 volatile uint16_t data;
413 413 uint32_t nv_cmd;
414 414 uint32_t start_addr;
415 415 int rval;
416 416 uint32_t offset = offsetof(nvram_t, adapter_features);
417 417
418 418 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
419 419
420 420 if (CFG_IST(ha, CFG_CTRL_24258081)) {
421 421 EL(ha, "Not supported for 24xx\n");
422 422 return (EINVAL);
423 423 }
424 424
425 425 /*
426 426 * The offset can't be greater than max of 8 bits and
427 427 * the following code breaks if the offset isn't at
428 428 * 2 byte boundary.
429 429 */
430 430 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
431 431 if (rval != QL_SUCCESS) {
432 432 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
433 433 return (EIO);
434 434 }
435 435
436 436 /*
437 437 * Have the most significant 3 bits represent the read operation
438 438 * followed by the 8 bits representing the offset at which we
439 439 * are going to perform the read operation
440 440 */
441 441 offset >>= 1;
442 442 offset += start_addr;
443 443 nv_cmd = (offset << 16) | NV_READ_OP;
444 444 nv_cmd <<= 5;
445 445
446 446 /*
447 447 * Select the chip and feed the command and address
448 448 */
449 449 for (count = 0; count < 11; count++) {
450 450 if (nv_cmd & BIT_31) {
451 451 ql_nv_write(ha, NV_DATA_OUT);
452 452 } else {
453 453 ql_nv_write(ha, 0);
454 454 }
455 455 nv_cmd <<= 1;
456 456 }
457 457
458 458 *features = 0;
459 459 for (count = 0; count < 16; count++) {
460 460 WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
461 461 ql_nv_delay();
462 462
463 463 data = RD16_IO_REG(ha, nvram);
464 464 *features <<= 1;
465 465 if (data & NV_DATA_IN) {
466 466 *features = (uint16_t)(*features | 0x1);
467 467 }
468 468
469 469 WRT16_IO_REG(ha, nvram, NV_SELECT);
470 470 ql_nv_delay();
471 471 }
472 472
473 473 /*
474 474 * Deselect the chip
475 475 */
476 476 WRT16_IO_REG(ha, nvram, NV_DESELECT);
477 477
478 478 ql_release_nvram(ha);
479 479
480 480 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
481 481
482 482 return (0);
483 483 }
484 484
485 485 /*
486 486 * Set adapter feature bits in NVRAM
487 487 */
488 488 static int
489 489 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
490 490 {
491 491 int rval;
492 492 uint32_t count;
493 493 nvram_t *nv;
494 494 uint16_t *wptr;
495 495 uint8_t *bptr;
496 496 uint8_t csum;
↓ open down ↓ |
496 lines elided |
↑ open up ↑ |
497 497 uint32_t start_addr;
498 498
499 499 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
500 500
501 501 if (CFG_IST(ha, CFG_CTRL_24258081)) {
502 502 EL(ha, "Not supported for 24xx\n");
503 503 return (EINVAL);
504 504 }
505 505
506 506 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
507 - if (nv == NULL) {
508 - EL(ha, "failed, kmem_zalloc\n");
509 - return (ENOMEM);
510 - }
511 507
512 508 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
513 509 if (rval != QL_SUCCESS) {
514 510 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
515 511 kmem_free(nv, sizeof (*nv));
516 512 return (EIO);
517 513 }
518 514 rval = 0;
519 515
520 516 /*
521 517 * Read off the whole NVRAM
522 518 */
523 519 wptr = (uint16_t *)nv;
524 520 csum = 0;
525 521 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
526 522 *wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
527 523 csum = (uint8_t)(csum + (uint8_t)*wptr);
528 524 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
529 525 wptr++;
530 526 }
531 527
532 528 /*
533 529 * If the checksum is BAD then fail it right here.
534 530 */
535 531 if (csum) {
536 532 kmem_free(nv, sizeof (*nv));
537 533 ql_release_nvram(ha);
538 534 return (EBADF);
539 535 }
540 536
541 537 nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
542 538 nv->adapter_features[1] = (uint8_t)(features & 0xFF);
543 539
544 540 /*
545 541 * Recompute the chesksum now
546 542 */
547 543 bptr = (uint8_t *)nv;
548 544 for (count = 0; count < sizeof (nvram_t) - 1; count++) {
549 545 csum = (uint8_t)(csum + *bptr++);
550 546 }
551 547 csum = (uint8_t)(~csum + 1);
552 548 nv->checksum = csum;
553 549
554 550 /*
555 551 * Now load the NVRAM
556 552 */
557 553 wptr = (uint16_t *)nv;
558 554 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
559 555 ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
560 556 }
561 557
562 558 /*
563 559 * Read NVRAM and verify the contents
564 560 */
565 561 wptr = (uint16_t *)nv;
566 562 csum = 0;
567 563 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
568 564 if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
569 565 rval = EIO;
570 566 break;
571 567 }
572 568 csum = (uint8_t)(csum + (uint8_t)*wptr);
573 569 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
574 570 wptr++;
575 571 }
576 572
577 573 if (csum) {
578 574 rval = EINVAL;
579 575 }
580 576
581 577 kmem_free(nv, sizeof (*nv));
582 578 ql_release_nvram(ha);
583 579
584 580 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
585 581
586 582 return (rval);
587 583 }
588 584
589 585 /*
590 586 * Fix this function to update just feature bits and checksum in NVRAM
591 587 */
592 588 static int
593 589 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
594 590 {
595 591 int rval;
596 592 uint32_t count;
597 593 uint32_t start_addr;
598 594
599 595 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
600 596
601 597 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
602 598 if (rval != QL_SUCCESS) {
603 599 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
604 600 return (EIO);
605 601 }
606 602 rval = 0;
607 603
608 604 if (CFG_IST(ha, CFG_CTRL_24258081)) {
609 605 nvram_24xx_t *nv;
610 606 uint32_t *longptr;
611 607 uint32_t csum = 0;
612 608
613 609 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
614 - if (nv == NULL) {
615 - EL(ha, "failed, kmem_zalloc\n");
616 - return (ENOMEM);
617 - }
618 610
619 611 nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
620 612 nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
621 613
622 614 nv->version[0] = 1;
623 615 nv->max_frame_length[1] = 8;
624 616 nv->execution_throttle[0] = 16;
625 617 nv->login_retry_count[0] = 8;
626 618
627 619 nv->firmware_options_1[0] = BIT_2 | BIT_1;
628 620 nv->firmware_options_1[1] = BIT_5;
629 621 nv->firmware_options_2[0] = BIT_5;
630 622 nv->firmware_options_2[1] = BIT_4;
631 623 nv->firmware_options_3[1] = BIT_6;
632 624
633 625 /*
634 626 * Set default host adapter parameters
635 627 */
636 628 nv->host_p[0] = BIT_4 | BIT_1;
637 629 nv->host_p[1] = BIT_3 | BIT_2;
638 630 nv->reset_delay = 5;
639 631 nv->max_luns_per_target[0] = 128;
640 632 nv->port_down_retry_count[0] = 30;
641 633 nv->link_down_timeout[0] = 30;
642 634
643 635 /*
644 636 * compute the chesksum now
645 637 */
646 638 longptr = (uint32_t *)nv;
647 639 csum = 0;
648 640 for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) {
649 641 csum += *longptr;
650 642 longptr++;
651 643 }
652 644 csum = (uint32_t)(~csum + 1);
653 645 LITTLE_ENDIAN_32((long)csum);
654 646 *longptr = csum;
655 647
656 648 /*
657 649 * Now load the NVRAM
658 650 */
659 651 longptr = (uint32_t *)nv;
660 652 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
661 653 (void) ql_24xx_load_nvram(ha,
662 654 (uint32_t)(count + start_addr), *longptr++);
663 655 }
664 656
665 657 /*
666 658 * Read NVRAM and verify the contents
667 659 */
668 660 csum = 0;
669 661 longptr = (uint32_t *)nv;
670 662 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
671 663 rval = ql_24xx_read_flash(ha, count + start_addr,
672 664 longptr);
673 665 if (rval != QL_SUCCESS) {
674 666 EL(ha, "24xx_read_flash failed=%xh\n", rval);
675 667 break;
676 668 }
677 669 csum += *longptr;
678 670 }
679 671
680 672 if (csum) {
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
681 673 rval = EINVAL;
682 674 }
683 675 kmem_free(nv, sizeof (nvram_24xx_t));
684 676 } else {
685 677 nvram_t *nv;
686 678 uint16_t *wptr;
687 679 uint8_t *bptr;
688 680 uint8_t csum;
689 681
690 682 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
691 - if (nv == NULL) {
692 - EL(ha, "failed, kmem_zalloc\n");
693 - return (ENOMEM);
694 - }
695 683 /*
696 684 * Set default initialization control block.
697 685 */
698 686 nv->parameter_block_version = ICB_VERSION;
699 687 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
700 688 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
701 689
702 690 nv->max_frame_length[1] = 4;
703 691 nv->max_iocb_allocation[1] = 1;
704 692 nv->execution_throttle[0] = 16;
705 693 nv->login_retry_count = 8;
706 694 nv->port_name[0] = 33;
707 695 nv->port_name[3] = 224;
708 696 nv->port_name[4] = 139;
709 697 nv->login_timeout = 4;
710 698
711 699 /*
712 700 * Set default host adapter parameters
713 701 */
714 702 nv->host_p[0] = BIT_1;
715 703 nv->host_p[1] = BIT_2;
716 704 nv->reset_delay = 5;
717 705 nv->port_down_retry_count = 8;
718 706 nv->maximum_luns_per_target[0] = 8;
719 707
720 708 /*
721 709 * compute the chesksum now
722 710 */
723 711 bptr = (uint8_t *)nv;
724 712 csum = 0;
725 713 for (count = 0; count < sizeof (nvram_t) - 1; count++) {
726 714 csum = (uint8_t)(csum + *bptr++);
727 715 }
728 716 csum = (uint8_t)(~csum + 1);
729 717 nv->checksum = csum;
730 718
731 719 /*
732 720 * Now load the NVRAM
733 721 */
734 722 wptr = (uint16_t *)nv;
735 723 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
736 724 ql_load_nvram(ha, (uint8_t)(count + start_addr),
737 725 *wptr++);
738 726 }
739 727
740 728 /*
741 729 * Read NVRAM and verify the contents
742 730 */
743 731 wptr = (uint16_t *)nv;
744 732 csum = 0;
745 733 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
746 734 if (ql_get_nvram_word(ha, count + start_addr) !=
747 735 *wptr) {
748 736 rval = EIO;
749 737 break;
750 738 }
751 739 csum = (uint8_t)(csum + (uint8_t)*wptr);
752 740 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
753 741 wptr++;
754 742 }
755 743 if (csum) {
756 744 rval = EINVAL;
757 745 }
758 746 kmem_free(nv, sizeof (*nv));
759 747 }
760 748 ql_release_nvram(ha);
761 749
762 750 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
763 751
764 752 return (rval);
765 753 }
766 754
767 755 static void
768 756 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
769 757 {
770 758 int count;
771 759 volatile uint16_t word;
772 760 volatile uint32_t nv_cmd;
773 761
774 762 ql_nv_write(ha, NV_DATA_OUT);
775 763 ql_nv_write(ha, 0);
776 764 ql_nv_write(ha, 0);
777 765
778 766 for (word = 0; word < 8; word++) {
779 767 ql_nv_write(ha, NV_DATA_OUT);
780 768 }
781 769
782 770 /*
783 771 * Deselect the chip
784 772 */
785 773 WRT16_IO_REG(ha, nvram, NV_DESELECT);
786 774 ql_nv_delay();
787 775
788 776 /*
789 777 * Erase Location
790 778 */
791 779 nv_cmd = (addr << 16) | NV_ERASE_OP;
792 780 nv_cmd <<= 5;
793 781 for (count = 0; count < 11; count++) {
794 782 if (nv_cmd & BIT_31) {
795 783 ql_nv_write(ha, NV_DATA_OUT);
796 784 } else {
797 785 ql_nv_write(ha, 0);
798 786 }
799 787 nv_cmd <<= 1;
800 788 }
801 789
802 790 /*
803 791 * Wait for Erase to Finish
804 792 */
805 793 WRT16_IO_REG(ha, nvram, NV_DESELECT);
806 794 ql_nv_delay();
807 795 WRT16_IO_REG(ha, nvram, NV_SELECT);
808 796 word = 0;
809 797 while ((word & NV_DATA_IN) == 0) {
810 798 ql_nv_delay();
811 799 word = RD16_IO_REG(ha, nvram);
812 800 }
813 801 WRT16_IO_REG(ha, nvram, NV_DESELECT);
814 802 ql_nv_delay();
815 803
816 804 /*
817 805 * Write data now
818 806 */
819 807 nv_cmd = (addr << 16) | NV_WRITE_OP;
820 808 nv_cmd |= value;
821 809 nv_cmd <<= 5;
822 810 for (count = 0; count < 27; count++) {
823 811 if (nv_cmd & BIT_31) {
824 812 ql_nv_write(ha, NV_DATA_OUT);
825 813 } else {
826 814 ql_nv_write(ha, 0);
827 815 }
828 816 nv_cmd <<= 1;
829 817 }
830 818
831 819 /*
832 820 * Wait for NVRAM to become ready
833 821 */
834 822 WRT16_IO_REG(ha, nvram, NV_DESELECT);
835 823 ql_nv_delay();
836 824 WRT16_IO_REG(ha, nvram, NV_SELECT);
837 825 word = 0;
838 826 while ((word & NV_DATA_IN) == 0) {
839 827 ql_nv_delay();
840 828 word = RD16_IO_REG(ha, nvram);
841 829 }
842 830 WRT16_IO_REG(ha, nvram, NV_DESELECT);
843 831 ql_nv_delay();
844 832
845 833 /*
846 834 * Disable writes
847 835 */
848 836 ql_nv_write(ha, NV_DATA_OUT);
849 837 for (count = 0; count < 10; count++) {
850 838 ql_nv_write(ha, 0);
851 839 }
852 840
853 841 /*
854 842 * Deselect the chip now
855 843 */
856 844 WRT16_IO_REG(ha, nvram, NV_DESELECT);
857 845 }
858 846
859 847 /*
860 848 * ql_24xx_load_nvram
861 849 * Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
862 850 *
863 851 * Input:
864 852 * ha: adapter state pointer.
865 853 * addr: NVRAM address.
866 854 * value: data.
867 855 *
868 856 * Returns:
869 857 * ql local function return status code.
870 858 *
871 859 * Context:
872 860 * Kernel context.
873 861 */
874 862 static int
875 863 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
876 864 {
877 865 int rval;
878 866
879 867 /* Enable flash write. */
880 868 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
881 869 WRT32_IO_REG(ha, ctrl_status,
882 870 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
883 871 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
884 872 }
885 873
886 874 /* Disable NVRAM write-protection. */
887 875 if (CFG_IST(ha, CFG_CTRL_2422)) {
888 876 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
889 877 } else {
890 878 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
891 879 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
892 880 return (rval);
893 881 }
894 882 }
895 883
896 884 /* Write to flash. */
897 885 rval = ql_24xx_write_flash(ha, addr, value);
898 886
899 887 /* Enable NVRAM write-protection. */
900 888 if (CFG_IST(ha, CFG_CTRL_2422)) {
901 889 /* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
902 890 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
903 891 } else {
904 892 ql_24xx_protect_flash(ha);
905 893 }
906 894
907 895 /* Disable flash write. */
908 896 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
909 897 WRT32_IO_REG(ha, ctrl_status,
910 898 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
911 899 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
912 900 }
913 901
914 902 return (rval);
915 903 }
916 904
917 905 /*
918 906 * ql_nv_util_load
919 907 * Loads NVRAM from application.
920 908 *
921 909 * Input:
922 910 * ha = adapter state pointer.
923 911 * bp = user buffer address.
924 912 *
925 913 * Returns:
926 914 *
927 915 * Context:
928 916 * Kernel context.
929 917 */
930 918 int
931 919 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
932 920 {
↓ open down ↓ |
228 lines elided |
↑ open up ↑ |
933 921 uint8_t cnt;
934 922 void *nv;
935 923 uint16_t *wptr;
936 924 uint16_t data;
937 925 uint32_t start_addr, *lptr, data32;
938 926 nvram_t *nptr;
939 927 int rval;
940 928
941 929 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
942 930
943 - if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
944 - EL(ha, "failed, kmem_zalloc\n");
945 - return (ENOMEM);
946 - }
931 + nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
947 932
948 933 if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
949 934 EL(ha, "Buffer copy failed\n");
950 935 kmem_free(nv, ha->nvram_cache->size);
951 936 return (EFAULT);
952 937 }
953 938
954 939 /* See if the buffer passed to us looks sane */
955 940 nptr = (nvram_t *)nv;
956 941 if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
957 942 nptr->id[3] != ' ') {
958 943 EL(ha, "failed, buffer sanity check\n");
959 944 kmem_free(nv, ha->nvram_cache->size);
960 945 return (EINVAL);
961 946 }
962 947
963 948 /* Quiesce I/O */
964 949 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
965 950 EL(ha, "ql_stall_driver failed\n");
966 951 kmem_free(nv, ha->nvram_cache->size);
967 952 return (EBUSY);
968 953 }
969 954
970 955 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
971 956 if (rval != QL_SUCCESS) {
972 957 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
973 958 kmem_free(nv, ha->nvram_cache->size);
974 959 ql_restart_driver(ha);
975 960 return (EIO);
976 961 }
977 962
978 963 /* Load NVRAM. */
979 964 if (CFG_IST(ha, CFG_CTRL_258081)) {
980 965 GLOBAL_HW_UNLOCK();
981 966 start_addr &= ~ha->flash_data_addr;
982 967 start_addr <<= 2;
983 968 if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
984 969 start_addr, mode)) != QL_SUCCESS) {
985 970 EL(ha, "nvram load failed, rval = %0xh\n", rval);
986 971 }
987 972 GLOBAL_HW_LOCK();
988 973 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
989 974 lptr = (uint32_t *)nv;
990 975 for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
991 976 data32 = *lptr++;
992 977 LITTLE_ENDIAN_32(&data32);
993 978 rval = ql_24xx_load_nvram(ha, cnt + start_addr,
994 979 data32);
995 980 if (rval != QL_SUCCESS) {
996 981 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
997 982 break;
998 983 }
999 984 }
1000 985 } else {
1001 986 wptr = (uint16_t *)nv;
1002 987 for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
1003 988 data = *wptr++;
1004 989 LITTLE_ENDIAN_16(&data);
1005 990 ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1006 991 }
1007 992 }
1008 993 /* switch to the new one */
1009 994 NVRAM_CACHE_LOCK(ha);
1010 995
1011 996 kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1012 997 ha->nvram_cache->cache = (void *)nptr;
1013 998
1014 999 NVRAM_CACHE_UNLOCK(ha);
1015 1000
1016 1001 ql_release_nvram(ha);
1017 1002 ql_restart_driver(ha);
1018 1003
1019 1004 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1020 1005
1021 1006 if (rval == QL_SUCCESS) {
1022 1007 return (0);
1023 1008 }
1024 1009
1025 1010 return (EFAULT);
1026 1011 }
1027 1012
1028 1013 /*
1029 1014 * ql_nv_util_dump
1030 1015 * Dumps NVRAM to application.
1031 1016 *
1032 1017 * Input:
1033 1018 * ha = adapter state pointer.
1034 1019 * bp = user buffer address.
1035 1020 *
1036 1021 * Returns:
1037 1022 *
1038 1023 * Context:
1039 1024 * Kernel context.
1040 1025 */
1041 1026 int
1042 1027 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1043 1028 {
1044 1029 uint32_t start_addr;
1045 1030 int rval2, rval = 0;
1046 1031
1047 1032 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1048 1033
1049 1034 if (ha->nvram_cache == NULL ||
1050 1035 ha->nvram_cache->size == NULL ||
1051 1036 ha->nvram_cache->cache == NULL) {
1052 1037 EL(ha, "failed, kmem_zalloc\n");
1053 1038 return (ENOMEM);
1054 1039 } else if (ha->nvram_cache->valid != 1) {
1055 1040
1056 1041 /* Quiesce I/O */
1057 1042 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1058 1043 EL(ha, "ql_stall_driver failed\n");
1059 1044 return (EBUSY);
1060 1045 }
1061 1046
1062 1047 rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1063 1048 if (rval2 != QL_SUCCESS) {
1064 1049 EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1065 1050 ql_restart_driver(ha);
1066 1051 return (EIO);
1067 1052 }
1068 1053 NVRAM_CACHE_LOCK(ha);
1069 1054
1070 1055 rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1071 1056 start_addr, ha->nvram_cache->size);
1072 1057 if (rval2 != QL_SUCCESS) {
1073 1058 rval = rval2;
1074 1059 } else {
1075 1060 ha->nvram_cache->valid = 1;
1076 1061 EL(ha, "nvram cache now valid.");
1077 1062 }
1078 1063
1079 1064 NVRAM_CACHE_UNLOCK(ha);
1080 1065
1081 1066 ql_release_nvram(ha);
1082 1067 ql_restart_driver(ha);
1083 1068
1084 1069 if (rval != 0) {
1085 1070 EL(ha, "failed to dump nvram, rval=%x\n", rval);
1086 1071 return (rval);
1087 1072 }
1088 1073 }
1089 1074
1090 1075 if (ddi_copyout(ha->nvram_cache->cache, bp,
1091 1076 ha->nvram_cache->size, mode) != 0) {
1092 1077 EL(ha, "Buffer copy failed\n");
1093 1078 return (EFAULT);
1094 1079 }
1095 1080
1096 1081 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1097 1082
1098 1083 return (0);
1099 1084 }
1100 1085
1101 1086 int
1102 1087 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1103 1088 uint32_t size)
1104 1089 {
1105 1090 int rval = QL_SUCCESS;
1106 1091 int cnt;
1107 1092 /* Dump NVRAM. */
1108 1093 if (CFG_IST(ha, CFG_CTRL_24258081)) {
1109 1094 uint32_t *lptr = (uint32_t *)dest_addr;
1110 1095
1111 1096 for (cnt = 0; cnt < size / 4; cnt++) {
1112 1097 rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1113 1098 if (rval != QL_SUCCESS) {
1114 1099 EL(ha, "read_flash failed=%xh\n", rval);
1115 1100 rval = EAGAIN;
1116 1101 break;
1117 1102 }
1118 1103 LITTLE_ENDIAN_32(lptr);
1119 1104 lptr++;
1120 1105 }
1121 1106 } else {
1122 1107 uint16_t data;
1123 1108 uint16_t *wptr = (uint16_t *)dest_addr;
1124 1109
1125 1110 for (cnt = 0; cnt < size / 2; cnt++) {
1126 1111 data = (uint16_t)ql_get_nvram_word(ha, cnt +
1127 1112 src_addr);
1128 1113 LITTLE_ENDIAN_16(&data);
1129 1114 *wptr++ = data;
1130 1115 }
1131 1116 }
1132 1117 return (rval);
1133 1118 }
1134 1119
1135 1120 /*
1136 1121 * ql_vpd_load
1137 1122 * Loads VPD from application.
1138 1123 *
1139 1124 * Input:
1140 1125 * ha = adapter state pointer.
1141 1126 * bp = user buffer address.
1142 1127 *
1143 1128 * Returns:
1144 1129 *
1145 1130 * Context:
1146 1131 * Kernel context.
1147 1132 */
1148 1133 int
1149 1134 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1150 1135 {
1151 1136 uint8_t cnt;
1152 1137 uint8_t *vpd, *vpdptr, *vbuf;
1153 1138 uint32_t start_addr, vpd_size, *lptr, data32;
1154 1139 int rval;
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
1155 1140
1156 1141 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1157 1142
1158 1143 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1159 1144 EL(ha, "unsupported adapter feature\n");
1160 1145 return (ENOTSUP);
1161 1146 }
1162 1147
1163 1148 vpd_size = QL_24XX_VPD_SIZE;
1164 1149
1165 - if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1166 - EL(ha, "failed, kmem_zalloc\n");
1167 - return (ENOMEM);
1168 - }
1150 + vpd = kmem_zalloc(vpd_size, KM_SLEEP);
1169 1151
1170 1152 if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1171 1153 EL(ha, "Buffer copy failed\n");
1172 1154 kmem_free(vpd, vpd_size);
1173 1155 return (EFAULT);
1174 1156 }
1175 1157
1176 1158 /* Sanity check the user supplied data via checksum */
1177 1159 if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1178 1160 EL(ha, "vpd RV tag missing\n");
1179 1161 kmem_free(vpd, vpd_size);
1180 1162 return (EINVAL);
1181 1163 }
1182 1164
1183 1165 vpdptr += 3;
1184 1166 cnt = 0;
1185 1167 vbuf = vpd;
1186 1168 while (vbuf <= vpdptr) {
1187 1169 cnt += *vbuf++;
1188 1170 }
1189 1171 if (cnt != 0) {
1190 1172 EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1191 1173 (uint8_t)cnt, (uintptr_t)vpdptr);
1192 1174 kmem_free(vpd, vpd_size);
1193 1175 return (EINVAL);
1194 1176 }
1195 1177
1196 1178 /* Quiesce I/O */
1197 1179 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1198 1180 EL(ha, "ql_stall_driver failed\n");
1199 1181 kmem_free(vpd, vpd_size);
1200 1182 return (EBUSY);
1201 1183 }
1202 1184
1203 1185 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1204 1186 if (rval != QL_SUCCESS) {
1205 1187 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1206 1188 kmem_free(vpd, vpd_size);
1207 1189 ql_restart_driver(ha);
1208 1190 return (EIO);
1209 1191 }
1210 1192
1211 1193 /* Load VPD. */
1212 1194 if (CFG_IST(ha, CFG_CTRL_258081)) {
1213 1195 GLOBAL_HW_UNLOCK();
1214 1196 start_addr &= ~ha->flash_data_addr;
1215 1197 start_addr <<= 2;
1216 1198 if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1217 1199 mode)) != QL_SUCCESS) {
1218 1200 EL(ha, "vpd load error: %xh\n", rval);
1219 1201 }
1220 1202 GLOBAL_HW_LOCK();
1221 1203 } else {
1222 1204 lptr = (uint32_t *)vpd;
1223 1205 for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1224 1206 data32 = *lptr++;
1225 1207 LITTLE_ENDIAN_32(&data32);
1226 1208 rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1227 1209 data32);
1228 1210 if (rval != QL_SUCCESS) {
1229 1211 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1230 1212 break;
1231 1213 }
1232 1214 }
1233 1215 }
1234 1216
1235 1217 kmem_free(vpd, vpd_size);
1236 1218
1237 1219 /* Update the vcache */
1238 1220 CACHE_LOCK(ha);
1239 1221
1240 1222 if (rval != QL_SUCCESS) {
1241 1223 EL(ha, "failed, load\n");
1242 1224 } else if ((ha->vcache == NULL) && ((ha->vcache =
1243 1225 kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1244 1226 EL(ha, "failed, kmem_zalloc2\n");
1245 1227 } else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1246 1228 EL(ha, "Buffer copy2 failed\n");
1247 1229 kmem_free(ha->vcache, vpd_size);
1248 1230 ha->vcache = NULL;
1249 1231 }
1250 1232
1251 1233 CACHE_UNLOCK(ha);
1252 1234
1253 1235 ql_release_nvram(ha);
1254 1236 ql_restart_driver(ha);
1255 1237
1256 1238 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1257 1239
1258 1240 if (rval == QL_SUCCESS) {
1259 1241 return (0);
1260 1242 }
1261 1243
1262 1244 return (EFAULT);
1263 1245 }
1264 1246
1265 1247 /*
1266 1248 * ql_vpd_dump
1267 1249 * Dumps VPD to application buffer.
1268 1250 *
1269 1251 * Input:
1270 1252 * ha = adapter state pointer.
1271 1253 * bp = user buffer address.
1272 1254 *
1273 1255 * Returns:
1274 1256 *
1275 1257 * Context:
1276 1258 * Kernel context.
1277 1259 */
1278 1260 int
1279 1261 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1280 1262 {
1281 1263 uint8_t cnt;
1282 1264 void *vpd;
1283 1265 uint32_t start_addr, vpd_size, *lptr;
1284 1266 int rval = 0;
1285 1267
1286 1268 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1287 1269
1288 1270 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1289 1271 EL(ha, "unsupported adapter feature\n");
1290 1272 return (EACCES);
1291 1273 }
1292 1274
1293 1275 vpd_size = QL_24XX_VPD_SIZE;
1294 1276
1295 1277 CACHE_LOCK(ha);
1296 1278
↓ open down ↓ |
118 lines elided |
↑ open up ↑ |
1297 1279 if (ha->vcache != NULL) {
1298 1280 /* copy back the vpd cache data */
1299 1281 if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1300 1282 EL(ha, "Buffer copy failed\n");
1301 1283 rval = EFAULT;
1302 1284 }
1303 1285 CACHE_UNLOCK(ha);
1304 1286 return (rval);
1305 1287 }
1306 1288
1307 - if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1308 - CACHE_UNLOCK(ha);
1309 - EL(ha, "failed, kmem_zalloc\n");
1310 - return (ENOMEM);
1311 - }
1289 + vpd = kmem_zalloc(vpd_size, KM_SLEEP);
1312 1290
1313 1291 /* Quiesce I/O */
1314 1292 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1315 1293 CACHE_UNLOCK(ha);
1316 1294 EL(ha, "ql_stall_driver failed\n");
1317 1295 kmem_free(vpd, vpd_size);
1318 1296 return (EBUSY);
1319 1297 }
1320 1298
1321 1299 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1322 1300 if (rval != QL_SUCCESS) {
1323 1301 CACHE_UNLOCK(ha);
1324 1302 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1325 1303 kmem_free(vpd, vpd_size);
1326 1304 ql_restart_driver(ha);
1327 1305 return (EIO);
1328 1306 }
1329 1307
1330 1308 /* Dump VPD. */
1331 1309 lptr = (uint32_t *)vpd;
1332 1310
1333 1311 for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1334 1312 rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1335 1313 if (rval != QL_SUCCESS) {
1336 1314 EL(ha, "read_flash failed=%xh\n", rval);
1337 1315 rval = EAGAIN;
1338 1316 break;
1339 1317 }
1340 1318 LITTLE_ENDIAN_32(lptr);
1341 1319 lptr++;
1342 1320 }
1343 1321
1344 1322 ql_release_nvram(ha);
1345 1323 ql_restart_driver(ha);
1346 1324
1347 1325 if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1348 1326 CACHE_UNLOCK(ha);
1349 1327 EL(ha, "Buffer copy failed\n");
1350 1328 kmem_free(vpd, vpd_size);
1351 1329 return (EFAULT);
1352 1330 }
1353 1331
1354 1332 ha->vcache = vpd;
1355 1333
1356 1334 CACHE_UNLOCK(ha);
1357 1335
1358 1336 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1359 1337
1360 1338 if (rval != QL_SUCCESS) {
1361 1339 return (EFAULT);
1362 1340 } else {
1363 1341 return (0);
1364 1342 }
1365 1343 }
1366 1344
1367 1345 /*
1368 1346 * ql_vpd_findtag
1369 1347 * Search the passed vpd buffer for the requested VPD tag type.
1370 1348 *
1371 1349 * Input:
1372 1350 * ha = adapter state pointer.
1373 1351 * vpdbuf = Pointer to start of the buffer to search
1374 1352 * op = VPD opcode to find (must be NULL terminated).
1375 1353 *
1376 1354 * Returns:
1377 1355 * Pointer to the opcode in the buffer if opcode found.
1378 1356 * NULL if opcode is not found.
1379 1357 *
1380 1358 * Context:
1381 1359 * Kernel context.
1382 1360 */
1383 1361 static uint8_t *
1384 1362 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1385 1363 {
1386 1364 uint8_t *vpd = vpdbuf;
1387 1365 uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE;
1388 1366 uint32_t found = 0;
1389 1367
1390 1368 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1391 1369
1392 1370 if (vpdbuf == NULL || opcode == NULL) {
1393 1371 EL(ha, "null parameter passed!\n");
1394 1372 return (NULL);
1395 1373 }
1396 1374
1397 1375 while (vpd < end) {
1398 1376
1399 1377 /* check for end of vpd */
1400 1378 if (vpd[0] == VPD_TAG_END) {
1401 1379 if (opcode[0] == VPD_TAG_END) {
1402 1380 found = 1;
1403 1381 } else {
1404 1382 found = 0;
1405 1383 }
1406 1384 break;
1407 1385 }
1408 1386
1409 1387 /* check opcode */
1410 1388 if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1411 1389 /* found opcode requested */
1412 1390 found = 1;
1413 1391 break;
1414 1392 }
1415 1393
1416 1394 /*
1417 1395 * Didn't find the opcode, so calculate start of
1418 1396 * next tag. Depending on the current tag type,
1419 1397 * the length field can be 1 or 2 bytes
1420 1398 */
1421 1399 if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1422 1400 vpd += (vpd[2] << 8) + vpd[1] + 3;
1423 1401 } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1424 1402 vpd += 3;
1425 1403 } else {
1426 1404 vpd += vpd[2] +3;
1427 1405 }
1428 1406 }
1429 1407
1430 1408 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1431 1409
1432 1410 return (found == 1 ? vpd : NULL);
1433 1411 }
1434 1412
1435 1413 /*
1436 1414 * ql_vpd_lookup
1437 1415 * Return the VPD data for the request VPD tag
1438 1416 *
1439 1417 * Input:
1440 1418 * ha = adapter state pointer.
1441 1419 * opcode = VPD opcode to find (must be NULL terminated).
1442 1420 * bp = Pointer to returned data buffer.
1443 1421 * bplen = Length of returned data buffer.
1444 1422 *
1445 1423 * Returns:
1446 1424 * Length of data copied into returned data buffer.
1447 1425 * >0 = VPD data field (NULL terminated)
1448 1426 * 0 = no data.
1449 1427 * -1 = Could not find opcode in vpd buffer / error.
1450 1428 *
1451 1429 * Context:
1452 1430 * Kernel context.
1453 1431 *
1454 1432 * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1455 1433 *
1456 1434 */
1457 1435 int32_t
1458 1436 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1459 1437 int32_t bplen)
1460 1438 {
1461 1439 uint8_t *vpd;
1462 1440 uint8_t *vpdbuf;
1463 1441 int32_t len = -1;
1464 1442
1465 1443 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1466 1444
1467 1445 if (opcode == NULL || bp == NULL || bplen < 1) {
1468 1446 EL(ha, "invalid parameter passed: opcode=%ph, "
1469 1447 "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1470 1448 return (len);
1471 1449 }
1472 1450
1473 1451 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1474 1452 return (len);
1475 1453 }
1476 1454
1477 1455 if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1478 1456 KM_SLEEP)) == NULL) {
1479 1457 EL(ha, "unable to allocate vpd memory\n");
1480 1458 return (len);
1481 1459 }
1482 1460
1483 1461 if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1484 1462 kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1485 1463 EL(ha, "unable to retrieve VPD data\n");
1486 1464 return (len);
1487 1465 }
1488 1466
1489 1467 if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1490 1468 /*
1491 1469 * Found the tag
1492 1470 */
1493 1471 if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1494 1472 *opcode == VPD_TAG_LRTC) {
1495 1473 /*
1496 1474 * we found it, but the tag doesn't have a data
1497 1475 * field.
1498 1476 */
1499 1477 len = 0;
1500 1478 } else if (!(strncmp((char *)vpd, (char *)
1501 1479 VPD_TAG_PRODID, 1))) {
1502 1480 len = vpd[2] << 8;
1503 1481 len += vpd[1];
1504 1482 } else {
1505 1483 len = vpd[2];
1506 1484 }
1507 1485
1508 1486 /*
1509 1487 * make sure that the vpd len doesn't exceed the
1510 1488 * vpd end
1511 1489 */
1512 1490 if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1513 1491 EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1514 1492 "length\n", len);
1515 1493 len = -1;
1516 1494 }
1517 1495 }
1518 1496
1519 1497 if (len >= 0) {
1520 1498 /*
1521 1499 * make sure we don't exceed callers buffer space len
1522 1500 */
1523 1501 if (len > bplen) {
1524 1502 len = bplen-1;
1525 1503 }
1526 1504
1527 1505 /* copy the data back */
1528 1506 (void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1529 1507 bp[len] = NULL;
1530 1508 } else {
1531 1509 /* error -- couldn't find tag */
1532 1510 bp[0] = NULL;
1533 1511 if (opcode[1] != NULL) {
1534 1512 EL(ha, "unable to find tag '%s'\n", opcode);
1535 1513 } else {
1536 1514 EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1537 1515 }
1538 1516 }
1539 1517
1540 1518 kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1541 1519
1542 1520 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1543 1521
1544 1522 return (len);
1545 1523 }
1546 1524
1547 1525 /*
1548 1526 * ql_r_m_w_flash
1549 1527 * Read modify write from user space to flash.
1550 1528 *
1551 1529 * Input:
1552 1530 * ha: adapter state pointer.
1553 1531 * dp: source byte pointer.
1554 1532 * bc: byte count.
1555 1533 * faddr: flash byte address.
1556 1534 * mode: flags.
1557 1535 *
1558 1536 * Returns:
1559 1537 * ql local function return status code.
1560 1538 *
1561 1539 * Context:
1562 1540 * Kernel context.
1563 1541 */
1564 1542 int
1565 1543 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1566 1544 int mode)
1567 1545 {
1568 1546 uint8_t *bp;
↓ open down ↓ |
247 lines elided |
↑ open up ↑ |
1569 1547 uint32_t xfer, bsize, saddr, ofst;
1570 1548 int rval = 0;
1571 1549
1572 1550 QL_PRINT_9(CE_CONT, "(%d): started, dp=%ph, faddr=%xh, bc=%xh\n",
1573 1551 ha->instance, (void *)dp, faddr, bc);
1574 1552
1575 1553 bsize = ha->xioctl->fdesc.block_size;
1576 1554 saddr = faddr & ~(bsize - 1);
1577 1555 ofst = faddr & (bsize - 1);
1578 1556
1579 - if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1580 - EL(ha, "kmem_zalloc=null\n");
1581 - return (QL_MEMORY_ALLOC_FAILED);
1582 - }
1557 + bp = kmem_zalloc(bsize, KM_SLEEP);
1583 1558
1584 1559 while (bc) {
1585 1560 xfer = bc > bsize ? bsize : bc;
1586 1561 if (ofst + xfer > bsize) {
1587 1562 xfer = bsize - ofst;
1588 1563 }
1589 1564 QL_PRINT_9(CE_CONT, "(%d): dp=%ph, saddr=%xh, bc=%xh, "
1590 1565 "ofst=%xh, xfer=%xh\n", ha->instance, (void *)dp, saddr,
1591 1566 bc, ofst, xfer);
1592 1567
1593 1568 if (ofst || xfer < bsize) {
1594 1569 /* Dump Flash sector. */
1595 1570 if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1596 1571 QL_SUCCESS) {
1597 1572 EL(ha, "dump_flash status=%x\n", rval);
1598 1573 break;
1599 1574 }
1600 1575 }
1601 1576
1602 1577 /* Set new data. */
1603 1578 if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1604 1579 mode)) != 0) {
1605 1580 EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1606 1581 "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1607 1582 rval = QL_FUNCTION_FAILED;
1608 1583 break;
1609 1584 }
1610 1585
1611 1586 /* Write to flash. */
1612 1587 if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1613 1588 QL_SUCCESS) {
1614 1589 EL(ha, "load_flash status=%x\n", rval);
1615 1590 break;
1616 1591 }
1617 1592 bc -= xfer;
1618 1593 dp += xfer;
1619 1594 saddr += bsize;
1620 1595 ofst = 0;
1621 1596 }
1622 1597
1623 1598 kmem_free(bp, bsize);
1624 1599
1625 1600 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1626 1601
1627 1602 return (rval);
1628 1603 }
1629 1604
1630 1605 /*
1631 1606 * ql_adm_op
1632 1607 * Performs qladm utility operations
1633 1608 *
1634 1609 * Input:
1635 1610 * ha: adapter state pointer.
1636 1611 * arg: driver_op_t structure pointer.
1637 1612 * mode: flags.
1638 1613 *
1639 1614 * Returns:
1640 1615 *
1641 1616 * Context:
1642 1617 * Kernel context.
1643 1618 */
1644 1619 static int
1645 1620 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1646 1621 {
1647 1622 ql_adm_op_t dop;
1648 1623 int rval = 0;
1649 1624
1650 1625 if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1651 1626 EL(ha, "failed, driver_op_t ddi_copyin\n");
1652 1627 return (EFAULT);
1653 1628 }
1654 1629
1655 1630 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%xh, buffer=%llx,"
1656 1631 " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer,
1657 1632 dop.length, dop.option);
1658 1633
1659 1634 switch (dop.cmd) {
1660 1635 case QL_ADAPTER_INFO:
1661 1636 rval = ql_adm_adapter_info(ha, &dop, mode);
1662 1637 break;
1663 1638
1664 1639 case QL_EXTENDED_LOGGING:
1665 1640 rval = ql_adm_extended_logging(ha, &dop);
1666 1641 break;
1667 1642
1668 1643 case QL_LOOP_RESET:
1669 1644 rval = ql_adm_loop_reset(ha);
1670 1645 break;
1671 1646
1672 1647 case QL_DEVICE_LIST:
1673 1648 rval = ql_adm_device_list(ha, &dop, mode);
1674 1649 break;
1675 1650
1676 1651 case QL_PROP_UPDATE_INT:
1677 1652 rval = ql_adm_prop_update_int(ha, &dop, mode);
1678 1653 break;
1679 1654
1680 1655 case QL_UPDATE_PROPERTIES:
1681 1656 rval = ql_adm_update_properties(ha);
1682 1657 break;
1683 1658
1684 1659 case QL_FW_DUMP:
1685 1660 rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1686 1661 break;
1687 1662
1688 1663 case QL_NVRAM_LOAD:
1689 1664 rval = ql_adm_nvram_load(ha, &dop, mode);
1690 1665 break;
1691 1666
1692 1667 case QL_NVRAM_DUMP:
1693 1668 rval = ql_adm_nvram_dump(ha, &dop, mode);
1694 1669 break;
1695 1670
1696 1671 case QL_FLASH_LOAD:
1697 1672 rval = ql_adm_flash_load(ha, &dop, mode);
1698 1673 break;
1699 1674
1700 1675 case QL_VPD_LOAD:
1701 1676 rval = ql_adm_vpd_load(ha, &dop, mode);
1702 1677 break;
1703 1678
1704 1679 case QL_VPD_DUMP:
1705 1680 rval = ql_adm_vpd_dump(ha, &dop, mode);
1706 1681 break;
1707 1682
1708 1683 case QL_VPD_GETTAG:
1709 1684 rval = ql_adm_vpd_gettag(ha, &dop, mode);
1710 1685 break;
1711 1686
1712 1687 case QL_UPD_FWMODULE:
1713 1688 rval = ql_adm_updfwmodule(ha, &dop, mode);
1714 1689 break;
1715 1690
1716 1691 default:
1717 1692 EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1718 1693 return (EINVAL);
1719 1694 }
1720 1695
1721 1696 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1722 1697
1723 1698 return (rval);
1724 1699 }
1725 1700
1726 1701 /*
1727 1702 * ql_adm_adapter_info
1728 1703 * Performs qladm QL_ADAPTER_INFO command
1729 1704 *
1730 1705 * Input:
1731 1706 * ha: adapter state pointer.
1732 1707 * dop: ql_adm_op_t structure pointer.
1733 1708 * mode: flags.
1734 1709 *
1735 1710 * Returns:
1736 1711 *
1737 1712 * Context:
1738 1713 * Kernel context.
1739 1714 */
1740 1715 static int
1741 1716 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1742 1717 {
1743 1718 ql_adapter_info_t hba;
1744 1719 uint8_t *dp;
1745 1720 uint32_t length;
1746 1721 int rval, i;
1747 1722
1748 1723 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1749 1724
1750 1725 hba.device_id = ha->device_id;
1751 1726
1752 1727 dp = CFG_IST(ha, CFG_CTRL_24258081) ?
1753 1728 &ha->init_ctrl_blk.cb24.port_name[0] :
1754 1729 &ha->init_ctrl_blk.cb.port_name[0];
1755 1730 bcopy(dp, hba.wwpn, 8);
1756 1731
1757 1732 hba.d_id = ha->d_id.b24;
1758 1733
1759 1734 if (ha->xioctl->fdesc.flash_size == 0 &&
1760 1735 !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) {
1761 1736 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1762 1737 EL(ha, "ql_stall_driver failed\n");
1763 1738 return (EBUSY);
1764 1739 }
1765 1740
1766 1741 if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1767 1742 EL(ha, "ql_setup_flash failed=%xh\n", rval);
1768 1743 if (rval == QL_FUNCTION_TIMEOUT) {
1769 1744 return (EBUSY);
1770 1745 }
1771 1746 return (EIO);
1772 1747 }
1773 1748
1774 1749 /* Resume I/O */
1775 1750 if (CFG_IST(ha, CFG_CTRL_24258081)) {
1776 1751 ql_restart_driver(ha);
1777 1752 } else {
1778 1753 EL(ha, "isp_abort_needed for restart\n");
1779 1754 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1780 1755 DRIVER_STALL);
1781 1756 }
1782 1757 }
1783 1758 hba.flash_size = ha->xioctl->fdesc.flash_size;
1784 1759
1785 1760 (void) strcpy(hba.driver_ver, QL_VERSION);
1786 1761
1787 1762 (void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1788 1763 ha->fw_minor_version, ha->fw_subminor_version);
1789 1764
1790 1765 bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1791 1766
1792 1767 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1793 1768 rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1794 1769 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1795 1770 length = i;
1796 1771 if (rval != DDI_PROP_SUCCESS) {
1797 1772 EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1798 1773 } else {
1799 1774 if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1800 1775 length = sizeof (hba.fcode_ver) - 1;
1801 1776 }
1802 1777 bcopy((void *)dp, (void *)hba.fcode_ver, length);
1803 1778 kmem_free(dp, length);
1804 1779 }
1805 1780
1806 1781 if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1807 1782 dop->length, mode) != 0) {
1808 1783 EL(ha, "failed, ddi_copyout\n");
1809 1784 return (EFAULT);
1810 1785 }
1811 1786
1812 1787 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1813 1788
1814 1789 return (0);
1815 1790 }
1816 1791
1817 1792 /*
1818 1793 * ql_adm_extended_logging
1819 1794 * Performs qladm QL_EXTENDED_LOGGING command
1820 1795 *
1821 1796 * Input:
1822 1797 * ha: adapter state pointer.
1823 1798 * dop: ql_adm_op_t structure pointer.
1824 1799 *
1825 1800 * Returns:
1826 1801 *
1827 1802 * Context:
1828 1803 * Kernel context.
1829 1804 */
1830 1805 static int
1831 1806 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1832 1807 {
1833 1808 char prop_name[MAX_PROP_LENGTH];
1834 1809 int rval;
1835 1810
1836 1811 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1837 1812
1838 1813 (void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1839 1814
1840 1815 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1841 1816 rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1842 1817 (int)dop->option);
1843 1818 if (rval != DDI_PROP_SUCCESS) {
1844 1819 EL(ha, "failed, prop_update = %xh\n", rval);
1845 1820 return (EINVAL);
1846 1821 } else {
1847 1822 dop->option ?
1848 1823 (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1849 1824 (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1850 1825 }
1851 1826
1852 1827 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1853 1828
1854 1829 return (0);
1855 1830 }
1856 1831
1857 1832 /*
1858 1833 * ql_adm_loop_reset
1859 1834 * Performs qladm QL_LOOP_RESET command
1860 1835 *
1861 1836 * Input:
1862 1837 * ha: adapter state pointer.
1863 1838 *
1864 1839 * Returns:
1865 1840 *
1866 1841 * Context:
1867 1842 * Kernel context.
1868 1843 */
1869 1844 static int
1870 1845 ql_adm_loop_reset(ql_adapter_state_t *ha)
1871 1846 {
1872 1847 int rval;
1873 1848
1874 1849 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1875 1850
1876 1851 if (ha->task_daemon_flags & LOOP_DOWN) {
1877 1852 (void) ql_full_login_lip(ha);
1878 1853 } else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1879 1854 EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1880 1855 return (EIO);
1881 1856 }
1882 1857
1883 1858 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1884 1859
1885 1860 return (0);
1886 1861 }
1887 1862
1888 1863 /*
1889 1864 * ql_adm_device_list
1890 1865 * Performs qladm QL_DEVICE_LIST command
1891 1866 *
1892 1867 * Input:
1893 1868 * ha: adapter state pointer.
1894 1869 * dop: ql_adm_op_t structure pointer.
1895 1870 * mode: flags.
1896 1871 *
1897 1872 * Returns:
1898 1873 *
1899 1874 * Context:
1900 1875 * Kernel context.
1901 1876 */
1902 1877 static int
1903 1878 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1904 1879 {
1905 1880 ql_device_info_t dev;
1906 1881 ql_link_t *link;
1907 1882 ql_tgt_t *tq;
1908 1883 uint32_t index, cnt;
1909 1884
1910 1885 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1911 1886
1912 1887 cnt = 0;
1913 1888 dev.address = 0xffffffff;
1914 1889
1915 1890 /* Scan port list for requested target and fill in the values */
1916 1891 for (link = NULL, index = 0;
1917 1892 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1918 1893 for (link = ha->dev[index].first; link != NULL;
1919 1894 link = link->next) {
1920 1895 tq = link->base_address;
1921 1896
1922 1897 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1923 1898 continue;
1924 1899 }
1925 1900 if (cnt != dop->option) {
1926 1901 cnt++;
1927 1902 continue;
1928 1903 }
1929 1904 /* fill in the values */
1930 1905 bcopy(tq->port_name, dev.wwpn, 8);
1931 1906 dev.address = tq->d_id.b24;
1932 1907 dev.loop_id = tq->loop_id;
1933 1908 if (tq->flags & TQF_TAPE_DEVICE) {
1934 1909 dev.type = FCT_TAPE;
1935 1910 } else if (tq->flags & TQF_INITIATOR_DEVICE) {
1936 1911 dev.type = FCT_INITIATOR;
1937 1912 } else {
1938 1913 dev.type = FCT_TARGET;
1939 1914 }
1940 1915 break;
1941 1916 }
1942 1917 }
1943 1918
1944 1919 if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1945 1920 dop->length, mode) != 0) {
1946 1921 EL(ha, "failed, ddi_copyout\n");
1947 1922 return (EFAULT);
1948 1923 }
1949 1924
1950 1925 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1951 1926
1952 1927 return (0);
1953 1928 }
1954 1929
1955 1930 /*
1956 1931 * ql_adm_update_properties
1957 1932 * Performs qladm QL_UPDATE_PROPERTIES command
1958 1933 *
1959 1934 * Input:
1960 1935 * ha: adapter state pointer.
1961 1936 *
1962 1937 * Returns:
1963 1938 *
1964 1939 * Context:
1965 1940 * Kernel context.
1966 1941 */
1967 1942 static int
1968 1943 ql_adm_update_properties(ql_adapter_state_t *ha)
1969 1944 {
1970 1945 ql_comb_init_cb_t init_ctrl_blk;
1971 1946 ql_comb_ip_init_cb_t ip_init_ctrl_blk;
1972 1947
1973 1948 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1974 1949
1975 1950 /* Stall driver instance. */
1976 1951 (void) ql_stall_driver(ha, 0);
1977 1952
1978 1953 /* Save init control blocks. */
1979 1954 bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1980 1955 bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1981 1956 sizeof (ql_comb_ip_init_cb_t));
1982 1957
1983 1958 /* Update PCI configration. */
1984 1959 (void) ql_pci_sbus_config(ha);
1985 1960
1986 1961 /* Get configuration properties. */
1987 1962 (void) ql_nvram_config(ha);
1988 1963
1989 1964 /* Check for init firmware required. */
1990 1965 if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1991 1966 sizeof (ql_comb_init_cb_t)) != 0 ||
1992 1967 bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1993 1968 sizeof (ql_comb_ip_init_cb_t)) != 0) {
1994 1969
1995 1970 EL(ha, "isp_abort_needed\n");
1996 1971 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1997 1972 TASK_DAEMON_LOCK(ha);
1998 1973 ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
1999 1974 TASK_DAEMON_UNLOCK(ha);
2000 1975 }
2001 1976
2002 1977 /* Update AEN queue. */
2003 1978 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2004 1979 ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2005 1980 }
2006 1981
2007 1982 /* Restart driver instance. */
2008 1983 ql_restart_driver(ha);
2009 1984
2010 1985 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2011 1986
2012 1987 return (0);
2013 1988 }
2014 1989
2015 1990 /*
2016 1991 * ql_adm_prop_update_int
2017 1992 * Performs qladm QL_PROP_UPDATE_INT command
2018 1993 *
2019 1994 * Input:
2020 1995 * ha: adapter state pointer.
2021 1996 * dop: ql_adm_op_t structure pointer.
2022 1997 * mode: flags.
2023 1998 *
2024 1999 * Returns:
2025 2000 *
2026 2001 * Context:
2027 2002 * Kernel context.
↓ open down ↓ |
435 lines elided |
↑ open up ↑ |
2028 2003 */
2029 2004 static int
2030 2005 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2031 2006 {
2032 2007 char *prop_name;
2033 2008 int rval;
2034 2009
2035 2010 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2036 2011
2037 2012 prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2038 - if (prop_name == NULL) {
2039 - EL(ha, "failed, kmem_zalloc\n");
2040 - return (ENOMEM);
2041 - }
2042 2013
2043 2014 if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2044 2015 mode) != 0) {
2045 2016 EL(ha, "failed, prop_name ddi_copyin\n");
2046 2017 kmem_free(prop_name, dop->length);
2047 2018 return (EFAULT);
2048 2019 }
2049 2020
2050 2021 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2051 2022 if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2052 2023 (int)dop->option)) != DDI_PROP_SUCCESS) {
2053 2024 EL(ha, "failed, prop_update=%xh\n", rval);
2054 2025 kmem_free(prop_name, dop->length);
2055 2026 return (EINVAL);
2056 2027 }
2057 2028
2058 2029 kmem_free(prop_name, dop->length);
2059 2030
2060 2031 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2061 2032
2062 2033 return (0);
2063 2034 }
2064 2035
2065 2036 /*
2066 2037 * ql_adm_fw_dump
2067 2038 * Performs qladm QL_FW_DUMP command
2068 2039 *
2069 2040 * Input:
2070 2041 * ha: adapter state pointer.
2071 2042 * dop: ql_adm_op_t structure pointer.
2072 2043 * udop: user space ql_adm_op_t structure pointer.
2073 2044 * mode: flags.
2074 2045 *
2075 2046 * Returns:
2076 2047 *
2077 2048 * Context:
2078 2049 * Kernel context.
2079 2050 */
2080 2051 static int
2081 2052 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2082 2053 {
2083 2054 caddr_t dmp;
2084 2055
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
2085 2056 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2086 2057
2087 2058 if (dop->length < ha->risc_dump_size) {
2088 2059 EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2089 2060 dop->length, ha->risc_dump_size);
2090 2061 return (EINVAL);
2091 2062 }
2092 2063
2093 2064 if (ha->ql_dump_state & QL_DUMP_VALID) {
2094 2065 dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2095 - if (dmp == NULL) {
2096 - EL(ha, "failed, kmem_zalloc\n");
2097 - return (ENOMEM);
2098 - }
2099 2066
2100 2067 dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2101 2068 if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2102 2069 dop->length, mode) != 0) {
2103 2070 EL(ha, "failed, ddi_copyout\n");
2104 2071 kmem_free(dmp, ha->risc_dump_size);
2105 2072 return (EFAULT);
2106 2073 }
2107 2074
2108 2075 kmem_free(dmp, ha->risc_dump_size);
2109 2076 ha->ql_dump_state |= QL_DUMP_UPLOADED;
2110 2077
2111 2078 } else {
2112 2079 EL(ha, "failed, no dump file\n");
2113 2080 dop->length = 0;
2114 2081 }
2115 2082
2116 2083 if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2117 2084 EL(ha, "failed, driver_op_t ddi_copyout\n");
2118 2085 return (EFAULT);
2119 2086 }
2120 2087
2121 2088 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2122 2089
2123 2090 return (0);
2124 2091 }
2125 2092
2126 2093 /*
2127 2094 * ql_adm_nvram_dump
2128 2095 * Performs qladm QL_NVRAM_DUMP command
2129 2096 *
2130 2097 * Input:
2131 2098 * ha: adapter state pointer.
2132 2099 * dop: ql_adm_op_t structure pointer.
2133 2100 * mode: flags.
2134 2101 *
2135 2102 * Returns:
2136 2103 *
2137 2104 * Context:
2138 2105 * Kernel context.
2139 2106 */
2140 2107 static int
2141 2108 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2142 2109 {
2143 2110 int rval;
2144 2111
2145 2112 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2146 2113
2147 2114 if (dop->length < ha->nvram_cache->size) {
2148 2115 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2149 2116 ha->nvram_cache->size);
2150 2117 return (EINVAL);
2151 2118 }
2152 2119
2153 2120 if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2154 2121 mode)) != 0) {
2155 2122 EL(ha, "failed, ql_nv_util_dump\n");
2156 2123 } else {
2157 2124 /*EMPTY*/
2158 2125 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2159 2126 }
2160 2127
2161 2128 return (rval);
2162 2129 }
2163 2130
2164 2131 /*
2165 2132 * ql_adm_nvram_load
2166 2133 * Performs qladm QL_NVRAM_LOAD command
2167 2134 *
2168 2135 * Input:
2169 2136 * ha: adapter state pointer.
2170 2137 * dop: ql_adm_op_t structure pointer.
2171 2138 * mode: flags.
2172 2139 *
2173 2140 * Returns:
2174 2141 *
2175 2142 * Context:
2176 2143 * Kernel context.
2177 2144 */
2178 2145 static int
2179 2146 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2180 2147 {
2181 2148 int rval;
2182 2149
2183 2150 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2184 2151
2185 2152 if (dop->length < ha->nvram_cache->size) {
2186 2153 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2187 2154 ha->nvram_cache->size);
2188 2155 return (EINVAL);
2189 2156 }
2190 2157
2191 2158 if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2192 2159 mode)) != 0) {
2193 2160 EL(ha, "failed, ql_nv_util_dump\n");
2194 2161 } else {
2195 2162 /*EMPTY*/
2196 2163 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2197 2164 }
2198 2165
2199 2166 return (rval);
2200 2167 }
2201 2168
2202 2169 /*
2203 2170 * ql_adm_flash_load
2204 2171 * Performs qladm QL_FLASH_LOAD command
2205 2172 *
2206 2173 * Input:
2207 2174 * ha: adapter state pointer.
2208 2175 * dop: ql_adm_op_t structure pointer.
2209 2176 * mode: flags.
2210 2177 *
2211 2178 * Returns:
2212 2179 *
2213 2180 * Context:
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
2214 2181 * Kernel context.
2215 2182 */
2216 2183 static int
2217 2184 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2218 2185 {
2219 2186 uint8_t *dp;
2220 2187 int rval;
2221 2188
2222 2189 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2223 2190
2224 - if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2225 - EL(ha, "failed, kmem_zalloc\n");
2226 - return (ENOMEM);
2227 - }
2191 + dp = kmem_zalloc(dop->length, KM_SLEEP);
2228 2192
2229 2193 if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2230 2194 mode) != 0) {
2231 2195 EL(ha, "ddi_copyin failed\n");
2232 2196 kmem_free(dp, dop->length);
2233 2197 return (EFAULT);
2234 2198 }
2235 2199
2236 2200 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2237 2201 EL(ha, "ql_stall_driver failed\n");
2238 2202 kmem_free(dp, dop->length);
2239 2203 return (EBUSY);
2240 2204 }
2241 2205
2242 2206 rval = (CFG_IST(ha, CFG_CTRL_24258081) ?
2243 2207 ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2244 2208 ql_load_flash(ha, dp, dop->length));
2245 2209
2246 2210 ql_restart_driver(ha);
2247 2211
2248 2212 kmem_free(dp, dop->length);
2249 2213
2250 2214 if (rval != QL_SUCCESS) {
2251 2215 EL(ha, "failed\n");
2252 2216 return (EIO);
2253 2217 }
2254 2218
2255 2219 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2256 2220
2257 2221 return (0);
2258 2222 }
2259 2223
2260 2224 /*
2261 2225 * ql_adm_vpd_dump
2262 2226 * Performs qladm QL_VPD_DUMP command
2263 2227 *
2264 2228 * Input:
2265 2229 * ha: adapter state pointer.
2266 2230 * dop: ql_adm_op_t structure pointer.
2267 2231 * mode: flags.
2268 2232 *
2269 2233 * Returns:
2270 2234 *
2271 2235 * Context:
2272 2236 * Kernel context.
2273 2237 */
2274 2238 static int
2275 2239 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2276 2240 {
2277 2241 int rval;
2278 2242
2279 2243 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2280 2244
2281 2245 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2282 2246 EL(ha, "hba does not support VPD\n");
2283 2247 return (EINVAL);
2284 2248 }
2285 2249
2286 2250 if (dop->length < QL_24XX_VPD_SIZE) {
2287 2251 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2288 2252 QL_24XX_VPD_SIZE);
2289 2253 return (EINVAL);
2290 2254 }
2291 2255
2292 2256 if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2293 2257 != 0) {
2294 2258 EL(ha, "failed, ql_vpd_dump\n");
2295 2259 } else {
2296 2260 /*EMPTY*/
2297 2261 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2298 2262 }
2299 2263
2300 2264 return (rval);
2301 2265 }
2302 2266
2303 2267 /*
2304 2268 * ql_adm_vpd_load
2305 2269 * Performs qladm QL_VPD_LOAD command
2306 2270 *
2307 2271 * Input:
2308 2272 * ha: adapter state pointer.
2309 2273 * dop: ql_adm_op_t structure pointer.
2310 2274 * mode: flags.
2311 2275 *
2312 2276 * Returns:
2313 2277 *
2314 2278 * Context:
2315 2279 * Kernel context.
2316 2280 */
2317 2281 static int
2318 2282 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2319 2283 {
2320 2284 int rval;
2321 2285
2322 2286 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2323 2287
2324 2288 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2325 2289 EL(ha, "hba does not support VPD\n");
2326 2290 return (EINVAL);
2327 2291 }
2328 2292
2329 2293 if (dop->length < QL_24XX_VPD_SIZE) {
2330 2294 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2331 2295 QL_24XX_VPD_SIZE);
2332 2296 return (EINVAL);
2333 2297 }
2334 2298
2335 2299 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2336 2300 != 0) {
2337 2301 EL(ha, "failed, ql_vpd_dump\n");
2338 2302 } else {
2339 2303 /*EMPTY*/
2340 2304 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2341 2305 }
2342 2306
2343 2307 return (rval);
2344 2308 }
2345 2309
2346 2310 /*
2347 2311 * ql_adm_vpd_gettag
2348 2312 * Performs qladm QL_VPD_GETTAG command
2349 2313 *
2350 2314 * Input:
2351 2315 * ha: adapter state pointer.
2352 2316 * dop: ql_adm_op_t structure pointer.
2353 2317 * mode: flags.
2354 2318 *
2355 2319 * Returns:
2356 2320 *
2357 2321 * Context:
2358 2322 * Kernel context.
2359 2323 */
2360 2324 static int
2361 2325 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2362 2326 {
2363 2327 int rval = 0;
2364 2328 uint8_t *lbuf;
2365 2329
2366 2330 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2367 2331
2368 2332 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2369 2333 EL(ha, "hba does not support VPD\n");
2370 2334 return (EINVAL);
2371 2335 }
2372 2336
2373 2337 if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2374 2338 EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2375 2339 rval = EFAULT;
2376 2340 } else {
2377 2341 if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2378 2342 dop->length, mode) != 0) {
2379 2343 EL(ha, "ddi_copyin failed\n");
2380 2344 kmem_free(lbuf, dop->length);
2381 2345 return (EFAULT);
2382 2346 }
2383 2347
2384 2348 if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2385 2349 dop->length)) < 0) {
2386 2350 EL(ha, "failed vpd_lookup\n");
2387 2351 } else {
2388 2352 if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2389 2353 strlen((int8_t *)lbuf)+1, mode) != 0) {
2390 2354 EL(ha, "failed, ddi_copyout\n");
2391 2355 rval = EFAULT;
2392 2356 } else {
2393 2357 rval = 0;
2394 2358 }
2395 2359 }
2396 2360 kmem_free(lbuf, dop->length);
2397 2361 }
2398 2362
2399 2363 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2400 2364
2401 2365 return (rval);
2402 2366 }
2403 2367
2404 2368 /*
2405 2369 * ql_adm_updfwmodule
2406 2370 * Performs qladm QL_UPD_FWMODULE command
2407 2371 *
2408 2372 * Input:
2409 2373 * ha: adapter state pointer.
2410 2374 * dop: ql_adm_op_t structure pointer.
2411 2375 * mode: flags.
2412 2376 *
2413 2377 * Returns:
2414 2378 *
2415 2379 * Context:
2416 2380 * Kernel context.
2417 2381 */
2418 2382 /* ARGSUSED */
2419 2383 static int
2420 2384 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2421 2385 {
2422 2386 int rval = DDI_SUCCESS;
2423 2387 ql_link_t *link;
2424 2388 ql_adapter_state_t *ha2 = NULL;
2425 2389 uint16_t fw_class = (uint16_t)dop->option;
2426 2390
2427 2391 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2428 2392
2429 2393 /* zero the firmware module reference count */
2430 2394 for (link = ql_hba.first; link != NULL; link = link->next) {
2431 2395 ha2 = link->base_address;
2432 2396 if (fw_class == ha2->fw_class) {
2433 2397 if ((rval = ddi_modclose(ha2->fw_module)) !=
2434 2398 DDI_SUCCESS) {
2435 2399 EL(ha2, "modclose rval=%xh\n", rval);
2436 2400 break;
2437 2401 }
2438 2402 ha2->fw_module = NULL;
2439 2403 }
2440 2404 }
2441 2405
2442 2406 /* reload the f/w modules */
2443 2407 for (link = ql_hba.first; link != NULL; link = link->next) {
2444 2408 ha2 = link->base_address;
2445 2409
2446 2410 if ((fw_class == ha2->fw_class) && (ha2->fw_class == NULL)) {
2447 2411 if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2448 2412 QL_SUCCESS) {
2449 2413 EL(ha2, "unable to load f/w module: '%x' "
2450 2414 "(rval=%xh)\n", ha2->fw_class, rval);
2451 2415 rval = EFAULT;
2452 2416 } else {
2453 2417 EL(ha2, "f/w module updated: '%x'\n",
2454 2418 ha2->fw_class);
2455 2419 }
2456 2420
2457 2421 EL(ha2, "isp abort needed (%d)\n", ha->instance);
2458 2422
2459 2423 ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2460 2424
2461 2425 rval = 0;
2462 2426 }
2463 2427 }
2464 2428
2465 2429 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2466 2430
2467 2431 return (rval);
2468 2432 }
↓ open down ↓ |
231 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX